direct-io.hg
changeset 6002:f294acb25858
MErge.
line diff
213.1 --- a/xen/arch/ia64/Makefile Wed Aug 03 09:35:16 2005 +0000 213.2 +++ b/xen/arch/ia64/Makefile Wed Aug 03 09:35:38 2005 +0000 213.3 @@ -34,8 +34,27 @@ default: $(OBJS) head.o ia64lib.o xen.ld 213.4 > $(BASEDIR)/System.map 213.5 213.6 213.7 -asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp 213.8 +asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h 213.9 + $(CC) $(CFLAGS) -S -o $@ $< 213.10 + 213.11 +asm-xsi-offsets.s: asm-xsi-offsets.c 213.12 $(CC) $(CFLAGS) -S -o $@ $< 213.13 + 213.14 +$(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s 213.15 + @(set -e; \ 213.16 + echo "/*"; \ 213.17 + echo " * DO NOT MODIFY."; \ 213.18 + echo " *"; \ 213.19 + echo " * This file was auto-generated from $<"; \ 213.20 + echo " *"; \ 213.21 + echo " */"; \ 213.22 + echo ""; \ 213.23 + echo "#ifndef __ASM_XSI_OFFSETS_H__"; \ 213.24 + echo "#define __ASM_XSI_OFFSETS_H__"; \ 213.25 + echo ""; \ 213.26 + sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \ 213.27 + echo ""; \ 213.28 + echo "#endif") <$< >$@ 213.29 213.30 $(BASEDIR)/include/asm-ia64/.offsets.h.stamp: 213.31 # Need such symbol link to make linux headers available 213.32 @@ -60,6 +79,7 @@ ia64lib.o: 213.33 213.34 clean: 213.35 rm -f *.o *~ core xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s 213.36 + rm -f asm-xsi-offsets.s $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h 213.37 rm -f lib/*.o 213.38 213.39 .PHONY: default clean
214.1 --- a/xen/arch/ia64/asm-offsets.c Wed Aug 03 09:35:16 2005 +0000 214.2 +++ b/xen/arch/ia64/asm-offsets.c Wed Aug 03 09:35:38 2005 +0000 214.3 @@ -42,29 +42,34 @@ void foo(void) 214.4 214.5 BLANK(); 214.6 214.7 - DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, arch.interrupt_collection_enabled)); 214.8 - DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled))); 214.9 - DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled)); 214.10 - DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip)); 214.11 - DEFINE(XSI_IFA_OFS, offsetof(vcpu_info_t, arch.ifa)); 214.12 - DEFINE(XSI_ITIR_OFS, offsetof(vcpu_info_t, arch.itir)); 214.13 - DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr))); 214.14 - DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr)); 214.15 - DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs)); 214.16 - DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr)); 214.17 - DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim)); 214.18 - DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum)); 214.19 - DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0])); 214.20 - DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0])); 214.21 - DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0])); 214.22 - DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode)); 214.23 - DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs)); 214.24 - DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe)); 214.25 - DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption)); 214.26 - DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0])); 214.27 - DEFINE(XSI_TPR_OFS, offsetof(vcpu_info_t, arch.tpr)); 214.28 - DEFINE(XSI_PTA_OFS, offsetof (vcpu_info_t, arch.pta)); 214.29 - DEFINE(XSI_ITV_OFS, offsetof(vcpu_info_t, arch.itv)); 214.30 + DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled)); 214.31 + DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled))); 214.32 + DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled)); 214.33 + DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip)); 214.34 + DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip))); 214.35 + DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa)); 214.36 + DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa))); 214.37 + DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir)); 214.38 + DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir))); 214.39 + 214.40 + DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr))); 214.41 + DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr)); 214.42 + DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs)); 214.43 + DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs))); 214.44 + DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr)); 214.45 + DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim)); 214.46 + DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum)); 214.47 + DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0])); 214.48 + DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0])); 214.49 + DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0])); 214.50 + DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode)); 214.51 + DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs)); 214.52 + DEFINE(XSI_INCOMPL_REG_OFS, offsetof(mapped_regs_t, incomplete_regframe)); 214.53 + DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption)); 214.54 + DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0])); 214.55 + DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr)); 214.56 + DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta)); 214.57 + DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv)); 214.58 //DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); 214.59 //DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); 214.60 //DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
215.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 215.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c Wed Aug 03 09:35:38 2005 +0000 215.3 @@ -0,0 +1,110 @@ 215.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 215.5 +/* 215.6 + * asm-xsi-offsets.c_ 215.7 + * Copyright (c) 2005, Intel Corporation. 215.8 + * Kun Tian (Kevin Tian) <kevin.tian@intel.com> 215.9 + * Eddie Dong <eddie.dong@intel.com> 215.10 + * Fred Yang <fred.yang@intel.com> 215.11 + * 215.12 + * This program is free software; you can redistribute it and/or modify it 215.13 + * under the terms and conditions of the GNU General Public License, 215.14 + * version 2, as published by the Free Software Foundation. 215.15 + * 215.16 + * This program is distributed in the hope it will be useful, but WITHOUT 215.17 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 215.18 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 215.19 + * more details. 215.20 + * 215.21 + * You should have received a copy of the GNU General Public License along with 215.22 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 215.23 + * Place - Suite 330, Boston, MA 02111-1307 USA. 215.24 + * 215.25 + */ 215.26 + 215.27 +/* 215.28 + * Generate definitions needed by assembly language modules. 215.29 + * This code generates raw asm output which is post-processed 215.30 + * to extract and format the required data. 215.31 + */ 215.32 + 215.33 +#include <xen/config.h> 215.34 +#include <xen/sched.h> 215.35 +#include <asm/processor.h> 215.36 +#include <asm/ptrace.h> 215.37 +#include <public/xen.h> 215.38 +#ifdef CONFIG_VTI 215.39 +#include <asm/tlb.h> 215.40 +#include <asm/regs.h> 215.41 +#endif // CONFIG_VTI 215.42 + 215.43 +#define task_struct vcpu 215.44 + 215.45 +#define DEFINE(sym, val) \ 215.46 + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 215.47 + 215.48 +#define BLANK() asm volatile("\n->" : : ) 215.49 + 215.50 +#define OFFSET(_sym, _str, _mem) \ 215.51 + DEFINE(_sym, offsetof(_str, _mem)); 215.52 + 215.53 +void foo(void) 215.54 +{ 215.55 + 215.56 + DEFINE(XSI_BASE, SHARED_ARCHINFO_ADDR); 215.57 + 215.58 + DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled)); 215.59 + DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled))); 215.60 + DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr))); 215.61 + DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr)); 215.62 + DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip)); 215.63 + DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip))); 215.64 + DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs)); 215.65 + DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs))); 215.66 + DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs)); 215.67 + DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, precover_ifs))); 215.68 + DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr)); 215.69 + DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr))); 215.70 + DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa)); 215.71 + DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa))); 215.72 + DEFINE(XSI_IIPA_OFS, offsetof(mapped_regs_t, iipa)); 215.73 + DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa))); 215.74 + DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim)); 215.75 + DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim))); 215.76 + DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr)); 215.77 + DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr))); 215.78 + DEFINE(XSI_IHA_OFS, offsetof(mapped_regs_t, iha)); 215.79 + DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha))); 215.80 + DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir)); 215.81 + DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir))); 215.82 + DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv)); 215.83 + DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv))); 215.84 + DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta)); 215.85 + DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta))); 215.86 + DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled)); 215.87 + DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled))); 215.88 + DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption)); 215.89 + DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pending_interruption))); 215.90 + DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t, incomplete_regframe)); 215.91 + DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, incomplete_regframe))); 215.92 + DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0])); 215.93 + DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, delivery_mask[0]))); 215.94 + DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode)); 215.95 + DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, metaphysical_mode))); 215.96 + 215.97 + DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum)); 215.98 + DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, banknum))); 215.99 + 215.100 + DEFINE(XSI_BANK0_R16_OFS, offsetof(mapped_regs_t, bank0_regs[0])); 215.101 + DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank0_regs[0]))); 215.102 + DEFINE(XSI_BANK1_R16_OFS, offsetof(mapped_regs_t, bank1_regs[0])); 215.103 + DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank1_regs[0]))); 215.104 + DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0])); 215.105 + DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0]))); 215.106 + DEFINE(XSI_KR0_OFS, offsetof(mapped_regs_t, krs[0])); 215.107 + DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0]))); 215.108 + DEFINE(XSI_PKR0_OFS, offsetof(mapped_regs_t, pkrs[0])); 215.109 + DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0]))); 215.110 + DEFINE(XSI_TMP0_OFS, offsetof(mapped_regs_t, tmp[0])); 215.111 + DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0]))); 215.112 + 215.113 +}
216.1 --- a/xen/arch/ia64/dom_fw.c Wed Aug 03 09:35:16 2005 +0000 216.2 +++ b/xen/arch/ia64/dom_fw.c Wed Aug 03 09:35:38 2005 +0000 216.3 @@ -291,6 +291,10 @@ xen_pal_emulator(unsigned long index, un 216.4 long r11 = 0; 216.5 long status = -1; 216.6 216.7 +#define USE_PAL_EMULATOR 216.8 +#ifdef USE_PAL_EMULATOR 216.9 + return pal_emulator_static(index); 216.10 +#endif 216.11 if (running_on_sim) return pal_emulator_static(index); 216.12 if (index >= PAL_COPY_PAL) { 216.13 printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %d!!!!\n", 216.14 @@ -314,12 +318,10 @@ xen_pal_emulator(unsigned long index, un 216.15 break; 216.16 case PAL_PTCE_INFO: 216.17 { 216.18 - ia64_ptce_info_t ptce; 216.19 - status = ia64_get_ptce(&ptce); 216.20 - if (status != 0) break; 216.21 - r9 = ptce.base; 216.22 - r10 = (ptce.count[0]<<32)|(ptce.count[1]&0xffffffffL); 216.23 - r11 = (ptce.stride[0]<<32)|(ptce.stride[1]&0xffffffffL); 216.24 + // return hard-coded xen-specific values because ptc.e 216.25 + // is emulated on xen to always flush everything 216.26 + // these values result in only one ptc.e instruction 216.27 + status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0; 216.28 } 216.29 break; 216.30 case PAL_VERSION: 216.31 @@ -335,7 +337,10 @@ xen_pal_emulator(unsigned long index, un 216.32 status = ia64_pal_cache_summary(&r9,&r10); 216.33 break; 216.34 case PAL_VM_SUMMARY: 216.35 - status = ia64_pal_vm_summary(&r9,&r10); 216.36 + // FIXME: what should xen return for these, figure out later 216.37 + // For now, linux does the right thing if pal call fails 216.38 + // In particular, rid_size must be set properly! 216.39 + //status = ia64_pal_vm_summary(&r9,&r10); 216.40 break; 216.41 case PAL_RSE_INFO: 216.42 status = ia64_pal_rse_info(&r9,&r10);
217.1 --- a/xen/arch/ia64/domain.c Wed Aug 03 09:35:16 2005 +0000 217.2 +++ b/xen/arch/ia64/domain.c Wed Aug 03 09:35:38 2005 +0000 217.3 @@ -212,6 +212,10 @@ void arch_do_createdomain(struct vcpu *v 217.4 */ 217.5 217.6 memset(d->shared_info, 0, PAGE_SIZE); 217.7 + d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 217.8 + alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 217.9 + printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs); 217.10 + memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE); 217.11 v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id]; 217.12 /* Mask all events, and specific port will be unmasked 217.13 * when customer subscribes to it. 217.14 @@ -232,8 +236,8 @@ void arch_do_createdomain(struct vcpu *v 217.15 /* FIXME: This is identity mapped address for xenheap. 217.16 * Do we need it at all? 217.17 */ 217.18 - d->xen_vastart = 0xf000000000000000; 217.19 - d->xen_vaend = 0xf300000000000000; 217.20 + d->xen_vastart = XEN_START_ADDR; 217.21 + d->xen_vaend = XEN_END_ADDR; 217.22 d->arch.breakimm = 0x1000; 217.23 } 217.24 #else // CONFIG_VTI 217.25 @@ -252,12 +256,16 @@ void arch_do_createdomain(struct vcpu *v 217.26 while (1); 217.27 } 217.28 memset(d->shared_info, 0, PAGE_SIZE); 217.29 + d->shared_info->vcpu_data[0].arch.privregs = 217.30 + alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 217.31 + printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs); 217.32 + memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE); 217.33 v->vcpu_info = &(d->shared_info->vcpu_data[0]); 217.34 217.35 - d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME 217.36 + d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME 217.37 if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL) 217.38 BUG(); 217.39 - v->vcpu_info->arch.metaphysical_mode = 1; 217.40 + VCPU(v, metaphysical_mode) = 1; 217.41 v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0; 217.42 v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0; 217.43 #define DOMAIN_RID_BITS_DEFAULT 18 217.44 @@ -266,9 +274,9 @@ void arch_do_createdomain(struct vcpu *v 217.45 v->arch.starting_rid = d->arch.starting_rid; 217.46 v->arch.ending_rid = d->arch.ending_rid; 217.47 // the following will eventually need to be negotiated dynamically 217.48 - d->xen_vastart = 0xf000000000000000; 217.49 - d->xen_vaend = 0xf300000000000000; 217.50 - d->shared_info_va = 0xf100000000000000; 217.51 + d->xen_vastart = XEN_START_ADDR; 217.52 + d->xen_vaend = XEN_END_ADDR; 217.53 + d->shared_info_va = SHAREDINFO_ADDR; 217.54 d->arch.breakimm = 0x1000; 217.55 v->arch.breakimm = d->arch.breakimm; 217.56 217.57 @@ -292,7 +300,15 @@ void arch_getdomaininfo_ctxt(struct vcpu 217.58 217.59 printf("arch_getdomaininfo_ctxt\n"); 217.60 c->regs = *regs; 217.61 - c->vcpu = v->vcpu_info->arch; 217.62 + c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector; 217.63 +#if 0 217.64 + if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs, 217.65 + v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) { 217.66 + printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs); 217.67 + return -EFAULT; 217.68 + } 217.69 +#endif 217.70 + 217.71 c->shared = v->domain->shared_info->arch; 217.72 } 217.73 217.74 @@ -307,13 +323,20 @@ int arch_set_info_guest(struct vcpu *v, 217.75 regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; 217.76 regs->ar_rsc |= (2 << 2); /* force PL2/3 */ 217.77 217.78 - v->vcpu_info->arch = c->vcpu; 217.79 + v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector; 217.80 + if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs, 217.81 + c->vcpu.privregs, sizeof(mapped_regs_t))) { 217.82 + printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", c->vcpu.privregs); 217.83 + return -EFAULT; 217.84 + } 217.85 + 217.86 init_all_rr(v); 217.87 217.88 // this should be in userspace 217.89 - regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS console=ttyS0",256L); //FIXME 217.90 - v->vcpu_info->arch.banknum = 1; 217.91 - v->vcpu_info->arch.metaphysical_mode = 1; 217.92 + regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=tty0 console=tty0 root=/dev/hda1",256L); //FIXME 217.93 + v->arch.domain_itm_last = -1L; 217.94 + VCPU(v, banknum) = 1; 217.95 + VCPU(v, metaphysical_mode) = 1; 217.96 217.97 v->domain->shared_info->arch = c->shared; 217.98 return 0; 217.99 @@ -325,6 +348,7 @@ int arch_set_info_guest( 217.100 struct domain *d = v->domain; 217.101 int i, rc, ret; 217.102 unsigned long progress = 0; 217.103 + shared_iopage_t *sp; 217.104 217.105 if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 217.106 return 0; 217.107 @@ -350,8 +374,17 @@ int arch_set_info_guest( 217.108 /* FIXME: only support PMT table continuously by far */ 217.109 d->arch.pmt = __va(c->pt_base); 217.110 d->arch.max_pfn = c->pt_max_pfn; 217.111 - v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg); 217.112 - memset((char *)__va(c->share_io_pg),0,PAGE_SIZE); 217.113 + d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg); 217.114 + sp = get_sp(d); 217.115 + memset((char *)sp,0,PAGE_SIZE); 217.116 + /* FIXME: temp due to old CP */ 217.117 + sp->sp_global.eport = 2; 217.118 +#ifdef V_IOSAPIC_READY 217.119 + sp->vcpu_number = 1; 217.120 +#endif 217.121 + /* TEMP */ 217.122 + d->arch.vmx_platform.pib_base = 0xfee00000UL; 217.123 + 217.124 217.125 if (c->flags & VGCF_VMX_GUEST) { 217.126 if (!vmx_enabled) 217.127 @@ -370,7 +403,7 @@ int arch_set_info_guest( 217.128 if (v == d->vcpu[0]) { 217.129 memset(&d->shared_info->evtchn_mask[0], 0xff, 217.130 sizeof(d->shared_info->evtchn_mask)); 217.131 - clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]); 217.132 + clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]); 217.133 } 217.134 /* Setup domain context. Actually IA-64 is a bit different with 217.135 * x86, with almost all system resources better managed by HV 217.136 @@ -380,8 +413,8 @@ int arch_set_info_guest( 217.137 new_thread(v, c->guest_iip, 0, 0); 217.138 217.139 217.140 - d->xen_vastart = 0xf000000000000000; 217.141 - d->xen_vaend = 0xf300000000000000; 217.142 + d->xen_vastart = XEN_START_ADDR; 217.143 + d->xen_vaend = XEN_END_ADDR; 217.144 d->arch.breakimm = 0x1000 + d->domain_id; 217.145 v->arch._thread.on_ustack = 0; 217.146 217.147 @@ -394,7 +427,13 @@ int arch_set_info_guest( 217.148 217.149 void arch_do_boot_vcpu(struct vcpu *v) 217.150 { 217.151 + struct domain *d = v->domain; 217.152 printf("arch_do_boot_vcpu: not implemented\n"); 217.153 + 217.154 + d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 217.155 + alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 217.156 + printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[v->vcpu_id].arch.privregs); 217.157 + memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE); 217.158 return; 217.159 } 217.160 217.161 @@ -449,8 +488,8 @@ void new_thread(struct vcpu *v, 217.162 VPD_CR(v, dcr) = 0; 217.163 } else { 217.164 regs->r28 = dom_fw_setup(d,saved_command_line,256L); 217.165 - v->vcpu_info->arch.banknum = 1; 217.166 - v->vcpu_info->arch.metaphysical_mode = 1; 217.167 + VCPU(v, banknum) = 1; 217.168 + VCPU(v, metaphysical_mode) = 1; 217.169 d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0; 217.170 } 217.171 } 217.172 @@ -482,8 +521,8 @@ void new_thread(struct vcpu *v, 217.173 regs->ar_fpsr = FPSR_DEFAULT; 217.174 init_all_rr(v); 217.175 regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME 217.176 - v->vcpu_info->arch.banknum = 1; 217.177 - v->vcpu_info->arch.metaphysical_mode = 1; 217.178 + VCPU(v, banknum) = 1; 217.179 + VCPU(v, metaphysical_mode) = 1; 217.180 d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0; 217.181 } 217.182 #endif // CONFIG_VTI 217.183 @@ -894,7 +933,6 @@ void build_shared_info(struct domain *d) 217.184 217.185 /* Set up shared-info area. */ 217.186 update_dom_time(d); 217.187 - d->shared_info->domain_time = 0; 217.188 217.189 /* Mask all upcalls... */ 217.190 for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 217.191 @@ -1072,12 +1110,12 @@ if (d == dom0) 217.192 #endif 217.193 serial_input_init(); 217.194 if (d == dom0) { 217.195 - v->vcpu_info->arch.delivery_mask[0] = -1L; 217.196 - v->vcpu_info->arch.delivery_mask[1] = -1L; 217.197 - v->vcpu_info->arch.delivery_mask[2] = -1L; 217.198 - v->vcpu_info->arch.delivery_mask[3] = -1L; 217.199 + VCPU(v, delivery_mask[0]) = -1L; 217.200 + VCPU(v, delivery_mask[1]) = -1L; 217.201 + VCPU(v, delivery_mask[2]) = -1L; 217.202 + VCPU(v, delivery_mask[3]) = -1L; 217.203 } 217.204 - else __set_bit(0x30,v->vcpu_info->arch.delivery_mask); 217.205 + else __set_bit(0x30,VCPU(v, delivery_mask)); 217.206 217.207 return 0; 217.208 } 217.209 @@ -1233,12 +1271,12 @@ if (d == dom0) 217.210 #endif 217.211 serial_input_init(); 217.212 if (d == dom0) { 217.213 - v->vcpu_info->arch.delivery_mask[0] = -1L; 217.214 - v->vcpu_info->arch.delivery_mask[1] = -1L; 217.215 - v->vcpu_info->arch.delivery_mask[2] = -1L; 217.216 - v->vcpu_info->arch.delivery_mask[3] = -1L; 217.217 + VCPU(v, delivery_mask[0]) = -1L; 217.218 + VCPU(v, delivery_mask[1]) = -1L; 217.219 + VCPU(v, delivery_mask[2]) = -1L; 217.220 + VCPU(v, delivery_mask[3]) = -1L; 217.221 } 217.222 - else __set_bit(0x30,v->vcpu_info->arch.delivery_mask); 217.223 + else __set_bit(0x30, VCPU(v, delivery_mask)); 217.224 217.225 return 0; 217.226 } 217.227 @@ -1285,7 +1323,7 @@ int construct_domU(struct domain *d, 217.228 #endif 217.229 new_thread(v, pkern_entry, 0, 0); 217.230 printk("new_thread returns\n"); 217.231 - __set_bit(0x30,v->vcpu_info->arch.delivery_mask); 217.232 + __set_bit(0x30, VCPU(v, delivery_mask)); 217.233 217.234 return 0; 217.235 }
218.1 --- a/xen/arch/ia64/hypercall.c Wed Aug 03 09:35:16 2005 +0000 218.2 +++ b/xen/arch/ia64/hypercall.c Wed Aug 03 09:35:38 2005 +0000 218.3 @@ -41,13 +41,13 @@ ia64_hypercall (struct pt_regs *regs) 218.4 // to a yet-to-be-found bug where pending_interruption 218.5 // is zero when it shouldn't be. Since PAL is called 218.6 // in the idle loop, this should resolve it 218.7 - v->vcpu_info->arch.pending_interruption = 1; 218.8 + VCPU(v,pending_interruption) = 1; 218.9 #endif 218.10 if (regs->r28 == PAL_HALT_LIGHT) { 218.11 #define SPURIOUS_VECTOR 15 218.12 pi = vcpu_check_pending_interrupts(v); 218.13 if (pi != SPURIOUS_VECTOR) { 218.14 - if (!v->vcpu_info->arch.pending_interruption) 218.15 + if (!VCPU(v,pending_interruption)) 218.16 idle_when_pending++; 218.17 vcpu_pend_unspecified_interrupt(v); 218.18 //printf("idle w/int#%d pending!\n",pi);
219.1 --- a/xen/arch/ia64/hyperprivop.S Wed Aug 03 09:35:16 2005 +0000 219.2 +++ b/xen/arch/ia64/hyperprivop.S Wed Aug 03 09:35:38 2005 +0000 219.3 @@ -18,12 +18,18 @@ 219.4 #define FAST_HYPERPRIVOPS 219.5 #define FAST_HYPERPRIVOP_CNT 219.6 #define FAST_REFLECT_CNT 219.7 -#define FAST_TICK 219.8 +//#define FAST_TICK 219.9 #define FAST_BREAK 219.10 #define FAST_ACCESS_REFLECT 219.11 +#define FAST_RFI 219.12 +#define FAST_SSM_I 219.13 +#define FAST_PTC_GA 219.14 #undef RFI_TO_INTERRUPT // not working yet 219.15 #endif 219.16 219.17 +// FIXME: turn off for now... but NaTs may crash Xen so re-enable soon! 219.18 +//#define HANDLE_AR_UNAT 219.19 + 219.20 // FIXME: This is defined in include/asm-ia64/hw_irq.h but this 219.21 // doesn't appear to be include'able from assembly? 219.22 #define IA64_TIMER_VECTOR 0xef 219.23 @@ -183,6 +189,9 @@ 1: // when we get to here r20=~=interrup 219.24 // r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits) 219.25 // r31 == pr 219.26 ENTRY(hyper_ssm_i) 219.27 +#ifndef FAST_SSM_I 219.28 + br.spnt.few dispatch_break_fault ;; 219.29 +#endif 219.30 // give up for now if: ipsr.be==1, ipsr.pp==1 219.31 mov r30=cr.ipsr;; 219.32 mov r29=cr.iip;; 219.33 @@ -259,7 +268,8 @@ ENTRY(hyper_ssm_i) 219.34 adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; 219.35 adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; 219.36 bsw.1;; 219.37 - // FIXME: need to handle ar.unat! 219.38 + // FIXME?: ar.unat is not really handled correctly, 219.39 + // but may not matter if the OS is NaT-clean 219.40 .mem.offset 0,0; st8.spill [r2]=r16,16; 219.41 .mem.offset 8,0; st8.spill [r3]=r17,16 ;; 219.42 .mem.offset 0,0; st8.spill [r2]=r18,16; 219.43 @@ -425,10 +435,12 @@ GLOBAL_ENTRY(fast_tick_reflect) 219.44 mov cr.iip=r24;; 219.45 // OK, now all set to go except for switch to virtual bank0 219.46 mov r30=r2; mov r29=r3;; 219.47 +#ifdef HANDLE_AR_UNAT 219.48 + mov r28=ar.unat; 219.49 +#endif 219.50 adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; 219.51 adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; 219.52 bsw.1;; 219.53 - // FIXME: need to handle ar.unat! 219.54 .mem.offset 0,0; st8.spill [r2]=r16,16; 219.55 .mem.offset 8,0; st8.spill [r3]=r17,16 ;; 219.56 .mem.offset 0,0; st8.spill [r2]=r18,16; 219.57 @@ -445,9 +457,18 @@ GLOBAL_ENTRY(fast_tick_reflect) 219.58 .mem.offset 8,0; st8.spill [r3]=r29,16 ;; 219.59 .mem.offset 0,0; st8.spill [r2]=r30,16; 219.60 .mem.offset 8,0; st8.spill [r3]=r31,16 ;; 219.61 - movl r31=XSI_IPSR;; 219.62 +#ifdef HANDLE_AR_UNAT 219.63 + // bank0 regs have no NaT bit, so ensure they are NaT clean 219.64 + mov r16=r0; mov r17=r0; mov r18=r0; mov r19=r0; 219.65 + mov r20=r0; mov r21=r0; mov r22=r0; mov r23=r0; 219.66 + mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0; 219.67 + mov r28=r0; mov r29=r0; mov r30=r0; movl r31=XSI_IPSR;; 219.68 +#endif 219.69 bsw.0 ;; 219.70 mov r2=r30; mov r3=r29;; 219.71 +#ifdef HANDLE_AR_UNAT 219.72 + mov ar.unat=r28; 219.73 +#endif 219.74 adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;; 219.75 st4 [r20]=r0 ;; 219.76 fast_tick_reflect_done: 219.77 @@ -567,10 +588,12 @@ ENTRY(fast_reflect) 219.78 mov cr.iip=r20;; 219.79 // OK, now all set to go except for switch to virtual bank0 219.80 mov r30=r2; mov r29=r3;; 219.81 +#ifdef HANDLE_AR_UNAT 219.82 + mov r28=ar.unat; 219.83 +#endif 219.84 adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; 219.85 adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; 219.86 bsw.1;; 219.87 - // FIXME: need to handle ar.unat! 219.88 .mem.offset 0,0; st8.spill [r2]=r16,16; 219.89 .mem.offset 8,0; st8.spill [r3]=r17,16 ;; 219.90 .mem.offset 0,0; st8.spill [r2]=r18,16; 219.91 @@ -587,9 +610,19 @@ ENTRY(fast_reflect) 219.92 .mem.offset 8,0; st8.spill [r3]=r29,16 ;; 219.93 .mem.offset 0,0; st8.spill [r2]=r30,16; 219.94 .mem.offset 8,0; st8.spill [r3]=r31,16 ;; 219.95 +#ifdef HANDLE_AR_UNAT 219.96 + // bank0 regs have no NaT bit, so ensure they are NaT clean 219.97 + mov r16=r0; mov r17=r0; mov r18=r0; mov r19=r0; 219.98 + mov r20=r0; mov r21=r0; mov r22=r0; mov r23=r0; 219.99 + mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0; 219.100 + mov r28=r0; mov r29=r0; mov r30=r0; movl r31=XSI_IPSR;; 219.101 +#endif 219.102 movl r31=XSI_IPSR;; 219.103 bsw.0 ;; 219.104 mov r2=r30; mov r3=r29;; 219.105 +#ifdef HANDLE_AR_UNAT 219.106 + mov ar.unat=r28; 219.107 +#endif 219.108 adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;; 219.109 st4 [r20]=r0 ;; 219.110 mov pr=r31,-1 ;; 219.111 @@ -637,6 +670,9 @@ GLOBAL_ENTRY(fast_access_reflect) 219.112 219.113 // ensure that, if giving up, registers at entry to fast_hyperprivop unchanged 219.114 ENTRY(hyper_rfi) 219.115 +#ifndef FAST_RFI 219.116 + br.spnt.few dispatch_break_fault ;; 219.117 +#endif 219.118 // if no interrupts pending, proceed 219.119 mov r30=r0 219.120 cmp.eq p7,p0=r20,r0 219.121 @@ -736,7 +772,8 @@ just_do_rfi: 219.122 adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; 219.123 adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; 219.124 bsw.1;; 219.125 - // FIXME: need to handle ar.unat! 219.126 + // FIXME?: ar.unat is not really handled correctly, 219.127 + // but may not matter if the OS is NaT-clean 219.128 .mem.offset 0,0; ld8.fill r16=[r2],16 ; 219.129 .mem.offset 8,0; ld8.fill r17=[r3],16 ;; 219.130 .mem.offset 0,0; ld8.fill r18=[r2],16 ; 219.131 @@ -1461,6 +1498,9 @@ ENTRY(hyper_ptc_ga) 219.132 #ifdef CONFIG_SMP 219.133 FIXME: ptc.ga instruction requires spinlock for SMP 219.134 #endif 219.135 +#ifndef FAST_PTC_GA 219.136 + br.spnt.few dispatch_break_fault ;; 219.137 +#endif 219.138 // FIXME: validate not flushing Xen addresses 219.139 #ifdef FAST_HYPERPRIVOP_CNT 219.140 movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_PTC_GA);;
220.1 --- a/xen/arch/ia64/mmio.c Wed Aug 03 09:35:16 2005 +0000 220.2 +++ b/xen/arch/ia64/mmio.c Wed Aug 03 09:35:38 2005 +0000 220.3 @@ -66,7 +66,7 @@ static void pib_write(VCPU *vcpu, void * 220.4 default: 220.5 if ( PIB_LOW_HALF(pib_off) ) { // lower half 220.6 if ( s != 8 || ma != 0x4 /* UC */ ) { 220.7 - panic("Undefined IPI-LHF write!\n"); 220.8 + panic("Undefined IPI-LHF write with s %d, ma %d!\n", s, ma); 220.9 } 220.10 else { 220.11 write_ipi(vcpu, pib_off, *(uint64_t *)src); 220.12 @@ -135,13 +135,13 @@ static void low_mmio_access(VCPU *vcpu, 220.13 ioreq_t *p; 220.14 unsigned long addr; 220.15 220.16 - vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va; 220.17 + vio = get_vio(v->domain, v->vcpu_id); 220.18 if (vio == 0) { 220.19 panic("bad shared page: %lx", (unsigned long)vio); 220.20 } 220.21 p = &vio->vp_ioreq; 220.22 p->addr = pa; 220.23 - p->size = 1<<s; 220.24 + p->size = s; 220.25 p->count = 1; 220.26 p->dir = dir; 220.27 if(dir==IOREQ_WRITE) //write; 220.28 @@ -152,9 +152,9 @@ static void low_mmio_access(VCPU *vcpu, 220.29 220.30 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 220.31 p->state = STATE_IOREQ_READY; 220.32 - evtchn_send(IOPACKET_PORT); 220.33 + evtchn_send(iopacket_port(v->domain)); 220.34 vmx_wait_io(); 220.35 - if(dir){ //read 220.36 + if(dir==IOREQ_READ){ //read 220.37 *val=p->u.data; 220.38 } 220.39 return; 220.40 @@ -168,13 +168,13 @@ static void legacy_io_access(VCPU *vcpu, 220.41 ioreq_t *p; 220.42 unsigned long addr; 220.43 220.44 - vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va; 220.45 + vio = get_vio(v->domain, v->vcpu_id); 220.46 if (vio == 0) { 220.47 panic("bad shared page: %lx"); 220.48 } 220.49 p = &vio->vp_ioreq; 220.50 p->addr = TO_LEGACY_IO(pa&0x3ffffffUL); 220.51 - p->size = 1<<s; 220.52 + p->size = s; 220.53 p->count = 1; 220.54 p->dir = dir; 220.55 if(dir==IOREQ_WRITE) //write; 220.56 @@ -185,11 +185,20 @@ static void legacy_io_access(VCPU *vcpu, 220.57 220.58 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 220.59 p->state = STATE_IOREQ_READY; 220.60 - evtchn_send(IOPACKET_PORT); 220.61 + evtchn_send(iopacket_port(v->domain)); 220.62 + 220.63 vmx_wait_io(); 220.64 - if(dir){ //read 220.65 + if(dir==IOREQ_READ){ //read 220.66 *val=p->u.data; 220.67 } 220.68 +#ifdef DEBUG_PCI 220.69 + if(dir==IOREQ_WRITE) 220.70 + if(p->addr == 0xcf8UL) 220.71 + printk("Write 0xcf8, with val [0x%lx]\n", p->u.data); 220.72 + else 220.73 + if(p->addr == 0xcfcUL) 220.74 + printk("Read 0xcfc, with val [0x%lx]\n", p->u.data); 220.75 +#endif //DEBUG_PCI 220.76 return; 220.77 } 220.78 220.79 @@ -204,12 +213,13 @@ static void mmio_access(VCPU *vcpu, u64 220.80 switch (iot) { 220.81 case GPFN_PIB: 220.82 if(!dir) 220.83 - pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma); 220.84 + pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma); 220.85 else 220.86 pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma); 220.87 break; 220.88 case GPFN_GFW: 220.89 break; 220.90 + case GPFN_IOSAPIC: 220.91 case GPFN_FRAME_BUFFER: 220.92 case GPFN_LOW_MMIO: 220.93 low_mmio_access(vcpu, src_pa, dest, s, dir); 220.94 @@ -217,7 +227,6 @@ static void mmio_access(VCPU *vcpu, u64 220.95 case GPFN_LEGACY_IO: 220.96 legacy_io_access(vcpu, src_pa, dest, s, dir); 220.97 break; 220.98 - case GPFN_IOSAPIC: 220.99 default: 220.100 panic("Bad I/O access\n"); 220.101 break; 220.102 @@ -342,6 +351,8 @@ static inline VCPU *lid_2_vcpu (struct d 220.103 LID lid; 220.104 for (i=0; i<MAX_VIRT_CPUS; i++) { 220.105 vcpu = d->vcpu[i]; 220.106 + if (!vcpu) 220.107 + continue; 220.108 lid.val = VPD_CR(vcpu, lid); 220.109 if ( lid.id == id && lid.eid == eid ) { 220.110 return vcpu; 220.111 @@ -379,15 +390,16 @@ static int write_ipi (VCPU *vcpu, uint64 220.112 inst_type 0:integer 1:floating point 220.113 */ 220.114 extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip); 220.115 - 220.116 +#define SL_INTEGER 0 // store/load interger 220.117 +#define SL_FLOATING 1 // store/load floating 220.118 220.119 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma) 220.120 { 220.121 REGS *regs; 220.122 IA64_BUNDLE bundle; 220.123 - int slot, dir, inst_type=0; 220.124 + int slot, dir, inst_type; 220.125 size_t size; 220.126 - u64 data, value, slot1a, slot1b; 220.127 + u64 data, value,post_update, slot1a, slot1b, temp; 220.128 INST64 inst; 220.129 regs=vcpu_regs(vcpu); 220.130 bundle = __vmx_get_domain_bundle(regs->cr_iip); 220.131 @@ -400,28 +412,70 @@ void emulate_io_inst(VCPU *vcpu, u64 pad 220.132 } 220.133 else if (slot == 2) inst.inst = bundle.slot2; 220.134 220.135 + 220.136 + // Integer Load/Store 220.137 if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){ 220.138 - inst_type=0; //fp 220.139 + inst_type = SL_INTEGER; // 220.140 size=(inst.M1.x6&0x3); 220.141 if((inst.M1.x6>>2)>0xb){ // write 220.142 + dir=IOREQ_WRITE; //write 220.143 vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data); 220.144 - dir=IOREQ_WRITE; //write 220.145 }else if((inst.M1.x6>>2)<0xb){ // read 220.146 - vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value); 220.147 dir=IOREQ_READ; 220.148 - }else{ 220.149 - printf("This memory access instruction can't be emulated one : %lx\n",inst.inst); 220.150 - while(1); 220.151 + vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value); 220.152 } 220.153 - }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){ 220.154 - inst_type=1; //fp 220.155 - dir=IOREQ_READ; 220.156 - size=3; //ldfd 220.157 - }else{ 220.158 + } 220.159 + // Integer Load + Reg update 220.160 + else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){ 220.161 + inst_type = SL_INTEGER; 220.162 + dir = IOREQ_READ; //write 220.163 + size = (inst.M2.x6&0x3); 220.164 + vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value); 220.165 + vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp); 220.166 + vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update); 220.167 + temp += post_update; 220.168 + vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0); 220.169 + } 220.170 + // Integer Load/Store + Imm update 220.171 + else if(inst.M3.major==5){ 220.172 + inst_type = SL_INTEGER; // 220.173 + size=(inst.M3.x6&0x3); 220.174 + if((inst.M5.x6>>2)>0xb){ // write 220.175 + dir=IOREQ_WRITE; //write 220.176 + vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data); 220.177 + vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp); 220.178 + post_update = (inst.M5.i<<7)+inst.M5.imm7; 220.179 + if(inst.M5.s) 220.180 + temp -= post_update; 220.181 + else 220.182 + temp += post_update; 220.183 + vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0); 220.184 + 220.185 + }else if((inst.M3.x6>>2)<0xb){ // read 220.186 + dir=IOREQ_READ; 220.187 + vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value); 220.188 + vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp); 220.189 + post_update = (inst.M3.i<<7)+inst.M3.imm7; 220.190 + if(inst.M3.s) 220.191 + temp -= post_update; 220.192 + else 220.193 + temp += post_update; 220.194 + vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0); 220.195 + 220.196 + } 220.197 + } 220.198 + // Floating-point Load/Store 220.199 +// else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){ 220.200 +// inst_type=SL_FLOATING; //fp 220.201 +// dir=IOREQ_READ; 220.202 +// size=3; //ldfd 220.203 +// } 220.204 + else{ 220.205 printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst); 220.206 while(1); 220.207 } 220.208 220.209 + size = 1 << size; 220.210 if(dir==IOREQ_WRITE){ 220.211 mmio_access(vcpu, padr, &data, size, ma, dir); 220.212 }else{ 220.213 @@ -433,7 +487,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad 220.214 else if(size==2) 220.215 data = (value & 0xffffffff00000000U) | (data & 0xffffffffU); 220.216 220.217 - if(inst_type==0){ //gp 220.218 + if(inst_type==SL_INTEGER){ //gp 220.219 vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0); 220.220 }else{ 220.221 panic("Don't support ldfd now !");
221.1 --- a/xen/arch/ia64/pal_emul.c Wed Aug 03 09:35:16 2005 +0000 221.2 +++ b/xen/arch/ia64/pal_emul.c Wed Aug 03 09:35:38 2005 +0000 221.3 @@ -1,7 +1,21 @@ 221.4 -/* PAL/SAL call delegation 221.5 +/* 221.6 + * PAL/SAL call delegation 221.7 * 221.8 * Copyright (c) 2004 Li Susie <susie.li@intel.com> 221.9 * Copyright (c) 2005 Yu Ke <ke.yu@intel.com> 221.10 + * 221.11 + * This program is free software; you can redistribute it and/or modify it 221.12 + * under the terms and conditions of the GNU General Public License, 221.13 + * version 2, as published by the Free Software Foundation. 221.14 + * 221.15 + * This program is distributed in the hope it will be useful, but WITHOUT 221.16 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 221.17 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 221.18 + * more details. 221.19 + * 221.20 + * You should have received a copy of the GNU General Public License along with 221.21 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 221.22 + * Place - Suite 330, Boston, MA 02111-1307 USA. 221.23 */ 221.24 221.25 #include <asm/vmx_vcpu.h> 221.26 @@ -98,23 +112,6 @@ pal_halt (VCPU *vcpu) { 221.27 221.28 static struct ia64_pal_retval 221.29 pal_halt_light (VCPU *vcpu) { 221.30 -#if 0 221.31 - // GVMM will go back to HVMM and ask HVMM to call yield(). 221.32 - vmmdata.p_ctlblk->status = VM_OK; 221.33 - vmmdata.p_ctlblk->ctlcode = ExitVM_YIELD; 221.34 - 221.35 - vmm_transition((UINT64)&vmmdata.p_gsa->guest, 221.36 - (UINT64)&vmmdata.p_gsa->host, 221.37 - (UINT64) vmmdata.p_tramp,0,0); 221.38 - 221.39 - 221.40 - result.status = 0; 221.41 - result.pal_result[0]=0; 221.42 - result.pal_result[1]=0; 221.43 - result.pal_result[2]=0; 221.44 - 221.45 - return result; 221.46 -#endif 221.47 struct ia64_pal_retval result; 221.48 221.49 result.status= -1; //unimplemented
222.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Wed Aug 03 09:35:16 2005 +0000 222.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Wed Aug 03 09:35:38 2005 +0000 222.3 @@ -20,11 +20,19 @@ 222.4 __do_IRQ(local_vector_to_irq(vector), regs); 222.5 222.6 /* 222.7 -@@ -167,6 +173,95 @@ 222.8 +@@ -167,6 +173,103 @@ 222.9 irq_exit(); 222.10 } 222.11 222.12 +#ifdef CONFIG_VTI 222.13 ++#define vmx_irq_enter() \ 222.14 ++ add_preempt_count(HARDIRQ_OFFSET); 222.15 ++ 222.16 ++/* Now softirq will be checked when leaving hypervisor, or else 222.17 ++ * scheduler irq will be executed too early. 222.18 ++ */ 222.19 ++#define vmx_irq_exit(void) \ 222.20 ++ sub_preempt_count(HARDIRQ_OFFSET); 222.21 +/* 222.22 + * That's where the IVT branches when we get an external 222.23 + * interrupt. This branches to the correct hardware IRQ handler via 222.24 @@ -72,7 +80,7 @@ 222.25 + * 16 (without this, it would be ~240, which could easily lead 222.26 + * to kernel stack overflows). 222.27 + */ 222.28 -+ irq_enter(); 222.29 ++ vmx_irq_enter(); 222.30 + saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 222.31 + ia64_srlz_d(); 222.32 + while (vector != IA64_SPURIOUS_INT_VECTOR) { 222.33 @@ -106,7 +114,7 @@ 222.34 + * handler needs to be able to wait for further keyboard interrupts, which can't 222.35 + * come through until ia64_eoi() has been done. 222.36 + */ 222.37 -+ irq_exit(); 222.38 ++ vmx_irq_exit(); 222.39 + if ( wake_dom0 && current != dom0 ) 222.40 + domain_wake(dom0->vcpu[0]); 222.41 +}
223.1 --- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h Wed Aug 03 09:35:16 2005 +0000 223.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/kregs.h Wed Aug 03 09:35:38 2005 +0000 223.3 @@ -1,6 +1,6 @@ 223.4 --- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h 2005-03-01 23:37:49.000000000 -0800 223.5 +++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h 2005-05-18 12:40:50.000000000 -0700 223.6 -@@ -29,8 +29,20 @@ 223.7 +@@ -29,8 +29,21 @@ 223.8 */ 223.9 #define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */ 223.10 #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ 223.11 @@ -12,6 +12,7 @@ 223.12 +#ifdef XEN 223.13 +#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ 223.14 +#define IA64_TR_VHPT 4 /* dtr4: vhpt */ 223.15 ++#define IA64_TR_ARCH_INFO 5 223.16 +#ifdef CONFIG_VTI 223.17 +#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table in domain space */ 223.18 +#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch stub */
225.1 --- a/xen/arch/ia64/process.c Wed Aug 03 09:35:16 2005 +0000 225.2 +++ b/xen/arch/ia64/process.c Wed Aug 03 09:35:38 2005 +0000 225.3 @@ -50,7 +50,7 @@ extern unsigned long dom0_start, dom0_si 225.4 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \ 225.5 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA) 225.6 225.7 -#define PSCB(x,y) x->vcpu_info->arch.y 225.8 +#define PSCB(x,y) VCPU(x,y) 225.9 #define PSCBX(x,y) x->arch.y 225.10 225.11 extern unsigned long vcpu_verbose; 225.12 @@ -226,7 +226,7 @@ panic_domain(regs,"psr.ic off, deliverin 225.13 #ifdef CONFIG_SMP 225.14 #error "sharedinfo doesn't handle smp yet" 225.15 #endif 225.16 - regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch; 225.17 + regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr); 225.18 225.19 PSCB(v,interrupt_delivery_enabled) = 0; 225.20 PSCB(v,interrupt_collection_enabled) = 0;
226.1 --- a/xen/arch/ia64/regionreg.c Wed Aug 03 09:35:16 2005 +0000 226.2 +++ b/xen/arch/ia64/regionreg.c Wed Aug 03 09:35:38 2005 +0000 226.3 @@ -14,6 +14,8 @@ 226.4 #include <asm/page.h> 226.5 #include <asm/regionreg.h> 226.6 #include <asm/vhpt.h> 226.7 +#include <asm/vcpu.h> 226.8 +extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info); 226.9 226.10 226.11 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1) 226.12 @@ -273,7 +275,8 @@ int set_one_rr(unsigned long rr, unsigne 226.13 newrrv.rid = newrid; 226.14 newrrv.ve = VHPT_ENABLED_REGION_7; 226.15 newrrv.ps = IA64_GRANULE_SHIFT; 226.16 - ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info); 226.17 + ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, 226.18 + v->vcpu_info->arch.privregs); 226.19 } 226.20 else { 226.21 newrrv.rid = newrid; 226.22 @@ -290,7 +293,8 @@ int set_one_rr(unsigned long rr, unsigne 226.23 newrrv.ve = 1; // VHPT now enabled for region 7!! 226.24 newrrv.ps = PAGE_SHIFT; 226.25 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval; 226.26 - if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info); 226.27 + if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, 226.28 + v->vcpu_info->arch.privregs); 226.29 else set_rr(rr,newrrv.rrval); 226.30 #endif 226.31 return 1; 226.32 @@ -332,14 +336,14 @@ void init_all_rr(struct vcpu *v) 226.33 rrv.ps = PAGE_SHIFT; 226.34 rrv.ve = 1; 226.35 if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); } 226.36 - v->vcpu_info->arch.rrs[0] = -1; 226.37 - v->vcpu_info->arch.rrs[1] = rrv.rrval; 226.38 - v->vcpu_info->arch.rrs[2] = rrv.rrval; 226.39 - v->vcpu_info->arch.rrs[3] = rrv.rrval; 226.40 - v->vcpu_info->arch.rrs[4] = rrv.rrval; 226.41 - v->vcpu_info->arch.rrs[5] = rrv.rrval; 226.42 + VCPU(v,rrs[0]) = -1; 226.43 + VCPU(v,rrs[1]) = rrv.rrval; 226.44 + VCPU(v,rrs[2]) = rrv.rrval; 226.45 + VCPU(v,rrs[3]) = rrv.rrval; 226.46 + VCPU(v,rrs[4]) = rrv.rrval; 226.47 + VCPU(v,rrs[5]) = rrv.rrval; 226.48 rrv.ve = 0; 226.49 - v->vcpu_info->arch.rrs[6] = rrv.rrval; 226.50 + VCPU(v,rrs[6]) = rrv.rrval; 226.51 // v->shared_info->arch.rrs[7] = rrv.rrval; 226.52 } 226.53 226.54 @@ -378,7 +382,7 @@ unsigned long load_region_regs(struct vc 226.55 // TODO: These probably should be validated 226.56 unsigned long bad = 0; 226.57 226.58 - if (v->vcpu_info->arch.metaphysical_mode) { 226.59 + if (VCPU(v,metaphysical_mode)) { 226.60 ia64_rr rrv; 226.61 226.62 rrv.rrval = 0; 226.63 @@ -390,16 +394,16 @@ unsigned long load_region_regs(struct vc 226.64 ia64_srlz_d(); 226.65 } 226.66 else { 226.67 - rr0 = v->vcpu_info->arch.rrs[0]; 226.68 + rr0 = VCPU(v,rrs[0]); 226.69 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1; 226.70 } 226.71 - rr1 = v->vcpu_info->arch.rrs[1]; 226.72 - rr2 = v->vcpu_info->arch.rrs[2]; 226.73 - rr3 = v->vcpu_info->arch.rrs[3]; 226.74 - rr4 = v->vcpu_info->arch.rrs[4]; 226.75 - rr5 = v->vcpu_info->arch.rrs[5]; 226.76 - rr6 = v->vcpu_info->arch.rrs[6]; 226.77 - rr7 = v->vcpu_info->arch.rrs[7]; 226.78 + rr1 = VCPU(v,rrs[1]); 226.79 + rr2 = VCPU(v,rrs[2]); 226.80 + rr3 = VCPU(v,rrs[3]); 226.81 + rr4 = VCPU(v,rrs[4]); 226.82 + rr5 = VCPU(v,rrs[5]); 226.83 + rr6 = VCPU(v,rrs[6]); 226.84 + rr7 = VCPU(v,rrs[7]); 226.85 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2; 226.86 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4; 226.87 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8; 226.88 @@ -410,4 +414,5 @@ unsigned long load_region_regs(struct vc 226.89 if (bad) { 226.90 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad); 226.91 } 226.92 + return 0; 226.93 }
227.1 --- a/xen/arch/ia64/vcpu.c Wed Aug 03 09:35:16 2005 +0000 227.2 +++ b/xen/arch/ia64/vcpu.c Wed Aug 03 09:35:38 2005 +0000 227.3 @@ -28,7 +28,7 @@ typedef union { 227.4 227.5 // this def for vcpu_regs won't work if kernel stack is present 227.6 #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs) 227.7 -#define PSCB(x,y) x->vcpu_info->arch.y 227.8 +#define PSCB(x,y) VCPU(x,y) 227.9 #define PSCBX(x,y) x->arch.y 227.10 227.11 #define TRUE 1 227.12 @@ -155,7 +155,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 227.13 // interrupt collection flag 227.14 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0; 227.15 // just handle psr.up and psr.pp for now 227.16 - if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP 227.17 + if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP 227.18 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT 227.19 | IA64_PSR_DFL | IA64_PSR_DFH)) 227.20 return (IA64_ILLOP_FAULT); 227.21 @@ -164,6 +164,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 227.22 if (imm.pp) { ipsr->pp = 0; psr.pp = 0; } 227.23 if (imm.up) { ipsr->up = 0; psr.up = 0; } 227.24 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; } 227.25 + if (imm.be) ipsr->be = 0; 227.26 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE); 227.27 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory"); 227.28 return IA64_NO_FAULT; 227.29 @@ -214,6 +215,7 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI 227.30 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; 227.31 // TODO: do this faster 227.32 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; } 227.33 + if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; } 227.34 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; } 227.35 if (imm.up) { ipsr->up = 1; psr.up = 1; } 227.36 if (imm.be) { 227.37 @@ -262,6 +264,7 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UIN 227.38 } 227.39 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; 227.40 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; } 227.41 + if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; } 227.42 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; } 227.43 if (newpsr.up) { ipsr->up = 1; psr.up = 1; } 227.44 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE); 227.45 @@ -389,6 +392,21 @@ IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT6 227.46 return (IA64_NO_FAULT); 227.47 } 227.48 227.49 +unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr) 227.50 +{ 227.51 + ia64_rr rr; 227.52 + 227.53 + rr.rrval = PSCB(vcpu,rrs)[vadr>>61]; 227.54 + return(rr.ps); 227.55 +} 227.56 + 227.57 +unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr) 227.58 +{ 227.59 + ia64_rr rr; 227.60 + 227.61 + rr.rrval = PSCB(vcpu,rrs)[vadr>>61]; 227.62 + return(rr.rid); 227.63 +} 227.64 227.65 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa) 227.66 { 227.67 @@ -881,6 +899,15 @@ IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT 227.68 return (IA64_NO_FAULT); 227.69 } 227.70 227.71 +// parameter is a time interval specified in cycles 227.72 +void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles) 227.73 +{ 227.74 + PSCBX(vcpu,xen_timer_interval) = cycles; 227.75 + vcpu_set_next_timer(vcpu); 227.76 + printf("vcpu_enable_timer(%d): interval set to %d cycles\n", 227.77 + PSCBX(vcpu,xen_timer_interval)); 227.78 + __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask)); 227.79 +} 227.80 227.81 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val) 227.82 { 227.83 @@ -1009,16 +1036,6 @@ void vcpu_set_next_timer(VCPU *vcpu) 227.84 } 227.85 } 227.86 227.87 -// parameter is a time interval specified in cycles 227.88 -void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles) 227.89 -{ 227.90 - PSCBX(vcpu,xen_timer_interval) = cycles; 227.91 - vcpu_set_next_timer(vcpu); 227.92 - printf("vcpu_enable_timer(%d): interval set to %d cycles\n", 227.93 - PSCBX(vcpu,xen_timer_interval)); 227.94 - __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask)); 227.95 -} 227.96 - 227.97 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val) 227.98 { 227.99 UINT now = ia64_get_itc(); 227.100 @@ -1182,12 +1199,6 @@ IA64FAULT vcpu_rfi(VCPU *vcpu) 227.101 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) { 227.102 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) { 227.103 if (ifs & regs->cr_ifs & 0x8000000000000000L) { 227.104 -#define SI_OFS(x) ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info)) 227.105 -if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) { 227.106 -printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n"); 227.107 -printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs)); 227.108 -while(1); 227.109 -} 227.110 // TODO: validate PSCB(vcpu,iip) 227.111 // TODO: PSCB(vcpu,ipsr) = psr; 227.112 PSCB(vcpu,ipsr) = psr.i64; 227.113 @@ -1222,7 +1233,6 @@ IA64FAULT vcpu_cover(VCPU *vcpu) 227.114 227.115 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval) 227.116 { 227.117 - extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr); 227.118 UINT64 pta = PSCB(vcpu,pta); 227.119 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT; 227.120 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1); 227.121 @@ -1263,7 +1273,6 @@ IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 v 227.122 #define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1)) 227.123 227.124 unsigned long vhpt_translate_count = 0; 227.125 -int in_vcpu_tpa = 0; 227.126 227.127 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir) 227.128 { 227.129 @@ -1278,12 +1287,6 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN 227.130 unsigned long vipsr = PSCB(vcpu,ipsr); 227.131 unsigned long iip = regs->cr_iip; 227.132 unsigned long ipsr = regs->cr_ipsr; 227.133 -#if 0 227.134 - printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p\n", address, viip, vipsr, iip, ipsr); 227.135 - if (in_vcpu_tpa) printk("vcpu_translate called from vcpu_tpa\n"); 227.136 - while(1); 227.137 - panic_domain(0,"vcpu_translate: bad address %p\n", address); 227.138 -#endif 227.139 printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr); 227.140 } 227.141 227.142 @@ -1304,7 +1307,6 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN 227.143 /* check 1-entry TLB */ 227.144 if ((trp = match_dtlb(vcpu,address))) { 227.145 dtlb_translate_count++; 227.146 -if (!in_vcpu_tpa) printf("vcpu_translate: found in vdtlb\n"); 227.147 *pteval = trp->page_flags; 227.148 *itir = trp->itir; 227.149 return IA64_NO_FAULT; 227.150 @@ -1356,9 +1358,7 @@ IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 va 227.151 UINT64 pteval, itir, mask; 227.152 IA64FAULT fault; 227.153 227.154 -in_vcpu_tpa=1; 227.155 fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir); 227.156 -in_vcpu_tpa=0; 227.157 if (fault == IA64_NO_FAULT) 227.158 { 227.159 mask = itir_mask(itir); 227.160 @@ -1534,28 +1534,8 @@ unsigned long vcpu_get_rr_ve(VCPU *vcpu, 227.161 return(rr.ve); 227.162 } 227.163 227.164 - 227.165 -unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr) 227.166 -{ 227.167 - ia64_rr rr; 227.168 - 227.169 - rr.rrval = PSCB(vcpu,rrs)[vadr>>61]; 227.170 - return(rr.ps); 227.171 -} 227.172 - 227.173 - 227.174 -unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr) 227.175 -{ 227.176 - ia64_rr rr; 227.177 - 227.178 - rr.rrval = PSCB(vcpu,rrs)[vadr>>61]; 227.179 - return(rr.rid); 227.180 -} 227.181 - 227.182 - 227.183 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val) 227.184 { 227.185 - extern void set_one_rr(UINT64, UINT64); 227.186 PSCB(vcpu,rrs)[reg>>61] = val; 227.187 // warning: set_one_rr() does it "live" 227.188 set_one_rr(reg,val); 227.189 @@ -1785,49 +1765,26 @@ IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 227.190 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr) 227.191 { 227.192 // TODO: Only allowed for current vcpu 227.193 - UINT64 mpaddr, ps; 227.194 + UINT64 mpaddr, paddr; 227.195 IA64FAULT fault; 227.196 - TR_ENTRY *trp; 227.197 - unsigned long lookup_domain_mpa(struct domain *,unsigned long); 227.198 - unsigned long pteval, dom_imva; 227.199 + unsigned long translate_domain_mpaddr(unsigned long); 227.200 + IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *); 227.201 227.202 - if ((trp = match_dtlb(vcpu,vadr))) { 227.203 - pteval = trp->page_flags; 227.204 - dom_imva = __va(pteval & _PFN_MASK); 227.205 - ia64_fc(dom_imva); 227.206 - return IA64_NO_FAULT; 227.207 - } 227.208 fault = vcpu_tpa(vcpu, vadr, &mpaddr); 227.209 if (fault == IA64_NO_FAULT) { 227.210 - struct domain *dom0; 227.211 - unsigned long dom0_start, dom0_size; 227.212 - if (vcpu == dom0) { 227.213 - if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) { 227.214 - printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr); 227.215 - } 227.216 - } 227.217 - pteval = lookup_domain_mpa(vcpu->domain,mpaddr); 227.218 - if (pteval) { 227.219 - dom_imva = __va(pteval & _PFN_MASK); 227.220 - ia64_fc(dom_imva); 227.221 - } 227.222 - else { 227.223 - REGS *regs = vcpu_regs(vcpu); 227.224 - printk("vcpu_fc: can't flush vadr=%p, iip=%p\n", 227.225 - vadr,regs->cr_iip); 227.226 - } 227.227 + paddr = translate_domain_mpaddr(mpaddr); 227.228 + ia64_fc(__va(paddr)); 227.229 } 227.230 return fault; 227.231 } 227.232 227.233 +int ptce_count = 0; 227.234 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr) 227.235 { 227.236 - 227.237 // Note that this only needs to be called once, i.e. the 227.238 // architected loop to purge the entire TLB, should use 227.239 // base = stride1 = stride2 = 0, count0 = count 1 = 1 227.240 227.241 - // FIXME: When VHPT is in place, flush that too! 227.242 #ifdef VHPT_GLOBAL 227.243 vhpt_flush(); // FIXME: This is overdoing it 227.244 #endif 227.245 @@ -1850,6 +1807,7 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 227.246 // FIXME: validate not flushing Xen addresses 227.247 // if (Xen address) return(IA64_ILLOP_FAULT); 227.248 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE 227.249 +//printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range); 227.250 #ifdef VHPT_GLOBAL 227.251 vhpt_flush_address(vadr,addr_range); 227.252 #endif
228.1 --- a/xen/arch/ia64/vlsapic.c Wed Aug 03 09:35:16 2005 +0000 228.2 +++ b/xen/arch/ia64/vlsapic.c Wed Aug 03 09:35:38 2005 +0000 228.3 @@ -38,6 +38,14 @@ 228.4 #include <asm/vmx_pal_vsa.h> 228.5 #include <asm/kregs.h> 228.6 228.7 +#define SHARED_VLAPIC_INF 228.8 +#ifdef V_IOSAPIC_READY 228.9 +static inline vl_apic_info* get_psapic(VCPU *vcpu) 228.10 +{ 228.11 + shared_iopage_t *sp = get_sp(vcpu->domain); 228.12 + return &(sp->vcpu_iodata[vcpu->vcpu_id].apic_intr); 228.13 +} 228.14 +#endif 228.15 //u64 fire_itc; 228.16 //u64 fire_itc2; 228.17 //u64 fire_itm; 228.18 @@ -216,7 +224,8 @@ void vtm_interruption_update(VCPU *vcpu, 228.19 */ 228.20 void vtm_domain_out(VCPU *vcpu) 228.21 { 228.22 - rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer); 228.23 + if(!is_idle_task(vcpu->domain)) 228.24 + rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer); 228.25 } 228.26 228.27 /* 228.28 @@ -226,9 +235,11 @@ void vtm_domain_out(VCPU *vcpu) 228.29 void vtm_domain_in(VCPU *vcpu) 228.30 { 228.31 vtime_t *vtm; 228.32 - 228.33 - vtm=&(vcpu->arch.arch_vmx.vtm); 228.34 - vtm_interruption_update(vcpu, vtm); 228.35 + 228.36 + if(!is_idle_task(vcpu->domain)) { 228.37 + vtm=&(vcpu->arch.arch_vmx.vtm); 228.38 + vtm_interruption_update(vcpu, vtm); 228.39 + } 228.40 } 228.41 228.42 /* 228.43 @@ -262,10 +273,50 @@ static void update_vhpi(VCPU *vcpu, int 228.44 } 228.45 } 228.46 228.47 +#ifdef V_IOSAPIC_READY 228.48 +void vlapic_update_shared_info(VCPU *vcpu) 228.49 +{ 228.50 + //int i; 228.51 + 228.52 + vl_apic_info *ps; 228.53 + 228.54 + if (vcpu->domain == dom0) 228.55 + return; 228.56 + 228.57 + ps = get_psapic(vcpu); 228.58 + ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16; 228.59 + printf("vl_lapic_id = %x\n", ps->vl_lapic_id); 228.60 + ps->vl_apr = 0; 228.61 + // skip ps->vl_logical_dest && ps->vl_dest_format 228.62 + // IPF support physical destination mode only 228.63 + ps->vl_arb_id = 0; 228.64 + /* 228.65 + for ( i=0; i<4; i++ ) { 228.66 + ps->tmr[i] = 0; // edge trigger 228.67 + } 228.68 + */ 228.69 +} 228.70 + 228.71 +void vlapic_update_ext_irq(VCPU *vcpu) 228.72 +{ 228.73 + int vec; 228.74 + 228.75 + vl_apic_info *ps = get_psapic(vcpu); 228.76 + while ( (vec = highest_bits(ps->irr)) != NULL_VECTOR ) { 228.77 + clear_bit (vec, ps->irr); 228.78 + vmx_vcpu_pend_interrupt(vcpu, vec); 228.79 + } 228.80 +} 228.81 +#endif 228.82 + 228.83 void vlsapic_reset(VCPU *vcpu) 228.84 { 228.85 int i; 228.86 - VPD_CR(vcpu, lid) = 0; 228.87 +#ifdef V_IOSAPIC_READY 228.88 + vl_apic_info *psapic; // shared lapic inf. 228.89 +#endif 228.90 + 228.91 + VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID); 228.92 VPD_CR(vcpu, ivr) = 0; 228.93 VPD_CR(vcpu,tpr) = 0x10000; 228.94 VPD_CR(vcpu, eoi) = 0; 228.95 @@ -281,6 +332,10 @@ void vlsapic_reset(VCPU *vcpu) 228.96 for ( i=0; i<4; i++) { 228.97 VLSAPIC_INSVC(vcpu,i) = 0; 228.98 } 228.99 +#ifdef V_IOSAPIC_READY 228.100 + vlapic_update_shared_info(vcpu); 228.101 + //vlapic_update_shared_irr(vcpu); 228.102 +#endif 228.103 DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) ); 228.104 } 228.105 228.106 @@ -414,6 +469,7 @@ void vmx_vcpu_pend_interrupt(VCPU *vcpu, 228.107 } 228.108 local_irq_save(spsr); 228.109 VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63); 228.110 + //vlapic_update_shared_irr(vcpu); 228.111 local_irq_restore(spsr); 228.112 vcpu->arch.irq_new_pending = 1; 228.113 } 228.114 @@ -432,6 +488,7 @@ void vmx_vcpu_pend_batch_interrupt(VCPU 228.115 for (i=0 ; i<4; i++ ) { 228.116 VPD_CR(vcpu,irr[i]) |= pend_irr[i]; 228.117 } 228.118 + //vlapic_update_shared_irr(vcpu); 228.119 local_irq_restore(spsr); 228.120 vcpu->arch.irq_new_pending = 1; 228.121 } 228.122 @@ -518,6 +575,7 @@ uint64_t guest_read_vivr(VCPU *vcpu) 228.123 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63)); 228.124 VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63)); 228.125 update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write 228.126 + //vlapic_update_shared_irr(vcpu); 228.127 local_irq_restore(spsr); 228.128 return (uint64_t)vec; 228.129 }
229.1 --- a/xen/arch/ia64/vmmu.c Wed Aug 03 09:35:16 2005 +0000 229.2 +++ b/xen/arch/ia64/vmmu.c Wed Aug 03 09:35:38 2005 +0000 229.3 @@ -145,7 +145,7 @@ static thash_cb_t *init_domain_vhpt(stru 229.4 thash_cb_t *vhpt; 229.5 PTA pta_value; 229.6 229.7 - page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER); 229.8 + page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0); 229.9 if ( page == NULL ) { 229.10 panic("No enough contiguous memory for init_domain_mm\n"); 229.11 } 229.12 @@ -187,7 +187,7 @@ thash_cb_t *init_domain_tlb(struct vcpu 229.13 tlb_special_t *ts; 229.14 thash_cb_t *tlb; 229.15 229.16 - page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER); 229.17 + page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0); 229.18 if ( page == NULL ) { 229.19 panic("No enough contiguous memory for init_domain_mm\n"); 229.20 } 229.21 @@ -224,7 +224,7 @@ alloc_pmt(struct domain *d) 229.22 /* Only called once */ 229.23 ASSERT(d->arch.pmt); 229.24 229.25 - page = alloc_domheap_pages(NULL, get_order(d->max_pages)); 229.26 + page = alloc_domheap_pages(NULL, get_order(d->max_pages), 0); 229.27 ASSERT(page); 229.28 229.29 d->arch.pmt = page_to_virt(page);
230.1 --- a/xen/arch/ia64/vmx_hypercall.c Wed Aug 03 09:35:16 2005 +0000 230.2 +++ b/xen/arch/ia64/vmx_hypercall.c Wed Aug 03 09:35:38 2005 +0000 230.3 @@ -29,6 +29,7 @@ 230.4 #include <asm/regionreg.h> 230.5 #include <asm/page.h> 230.6 #include <xen/mm.h> 230.7 +#include <xen/multicall.h> 230.8 230.9 230.10 void hyper_not_support(void) 230.11 @@ -51,6 +52,42 @@ void hyper_mmu_update(void) 230.12 vmx_vcpu_increment_iip(vcpu); 230.13 } 230.14 230.15 +unsigned long __hypercall_create_continuation( 230.16 + unsigned int op, unsigned int nr_args, ...) 230.17 +{ 230.18 + struct mc_state *mcs = &mc_state[smp_processor_id()]; 230.19 + VCPU *vcpu = current; 230.20 + struct cpu_user_regs *regs = vcpu_regs(vcpu); 230.21 + unsigned int i; 230.22 + va_list args; 230.23 + 230.24 + va_start(args, nr_args); 230.25 + if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) { 230.26 + panic("PREEMPT happen in multicall\n"); // Not support yet 230.27 + } else { 230.28 + vmx_vcpu_set_gr(vcpu, 15, op, 0); 230.29 + for ( i = 0; i < nr_args; i++) { 230.30 + switch (i) { 230.31 + case 0: vmx_vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0); 230.32 + break; 230.33 + case 1: vmx_vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0); 230.34 + break; 230.35 + case 2: vmx_vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0); 230.36 + break; 230.37 + case 3: vmx_vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0); 230.38 + break; 230.39 + case 4: vmx_vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0); 230.40 + break; 230.41 + default: panic("Too many args for hypercall continuation\n"); 230.42 + break; 230.43 + } 230.44 + } 230.45 + } 230.46 + vcpu->arch.hypercall_continuation = 1; 230.47 + va_end(args); 230.48 + return op; 230.49 +} 230.50 + 230.51 void hyper_dom_mem_op(void) 230.52 { 230.53 VCPU *vcpu=current; 230.54 @@ -65,7 +102,13 @@ void hyper_dom_mem_op(void) 230.55 printf("do_dom_mem return value: %lx\n", ret); 230.56 vmx_vcpu_set_gr(vcpu, 8, ret, 0); 230.57 230.58 - vmx_vcpu_increment_iip(vcpu); 230.59 + /* Hard to define a special return value to indicate hypercall restart. 230.60 + * So just add a new mark, which is SMP safe 230.61 + */ 230.62 + if (vcpu->arch.hypercall_continuation == 1) 230.63 + vcpu->arch.hypercall_continuation = 0; 230.64 + else 230.65 + vmx_vcpu_increment_iip(vcpu); 230.66 } 230.67 230.68
231.1 --- a/xen/arch/ia64/vmx_ivt.S Wed Aug 03 09:35:16 2005 +0000 231.2 +++ b/xen/arch/ia64/vmx_ivt.S Wed Aug 03 09:35:38 2005 +0000 231.3 @@ -560,6 +560,21 @@ END(vmx_virtual_exirq) 231.4 VMX_DBG_FAULT(19) 231.5 VMX_FAULT(19) 231.6 231.7 + .org vmx_ia64_ivt+0x5000 231.8 +///////////////////////////////////////////////////////////////////////////////////////// 231.9 +// 0x5000 Entry 20 (size 16 bundles) Page Not Present 231.10 +ENTRY(vmx_page_not_present) 231.11 + VMX_REFLECT(20) 231.12 +END(vmx_page_not_present) 231.13 + 231.14 + .org vmx_ia64_ivt+0x5100 231.15 +///////////////////////////////////////////////////////////////////////////////////////// 231.16 +// 0x5100 Entry 21 (size 16 bundles) Key Permission vector 231.17 +ENTRY(vmx_key_permission) 231.18 + VMX_REFLECT(21) 231.19 +END(vmx_key_permission) 231.20 + 231.21 + .org vmx_ia64_ivt+0x5200 231.22 ///////////////////////////////////////////////////////////////////////////////////////// 231.23 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 231.24 ENTRY(vmx_iaccess_rights)
232.1 --- a/xen/arch/ia64/vmx_support.c Wed Aug 03 09:35:16 2005 +0000 232.2 +++ b/xen/arch/ia64/vmx_support.c Wed Aug 03 09:35:38 2005 +0000 232.3 @@ -37,18 +37,19 @@ void vmx_wait_io(void) 232.4 struct vcpu *v = current; 232.5 struct domain *d = v->domain; 232.6 extern void do_block(); 232.7 + int port = iopacket_port(d); 232.8 232.9 do { 232.10 - if (!test_bit(IOPACKET_PORT, 232.11 + if (!test_bit(port, 232.12 &d->shared_info->evtchn_pending[0])) 232.13 do_block(); 232.14 232.15 /* Unblocked when some event is coming. Clear pending indication 232.16 * immediately if deciding to go for io assist 232.17 */ 232.18 - if (test_and_clear_bit(IOPACKET_PORT, 232.19 + if (test_and_clear_bit(port, 232.20 &d->shared_info->evtchn_pending[0])) { 232.21 - clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel); 232.22 + clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel); 232.23 clear_bit(0, &v->vcpu_info->evtchn_upcall_pending); 232.24 vmx_io_assist(v); 232.25 } 232.26 @@ -66,7 +67,7 @@ void vmx_wait_io(void) 232.27 * nothing losed. Next loop will check I/O channel to fix this 232.28 * window. 232.29 */ 232.30 - clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel); 232.31 + clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel); 232.32 } 232.33 else 232.34 break; 232.35 @@ -88,7 +89,7 @@ void vmx_io_assist(struct vcpu *v) 232.36 * This shared page contains I/O request between emulation code 232.37 * and device model. 232.38 */ 232.39 - vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va; 232.40 + vio = get_vio(v->domain, v->vcpu_id); 232.41 if (!vio) 232.42 panic("Corruption: bad shared page: %lx\n", (unsigned long)vio); 232.43 232.44 @@ -127,6 +128,7 @@ void vmx_intr_assist(struct vcpu *v) 232.45 struct domain *d = v->domain; 232.46 extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, 232.47 unsigned long *pend_irr); 232.48 + int port = iopacket_port(d); 232.49 232.50 /* I/O emulation is atomic, so it's impossible to see execution flow 232.51 * out of vmx_wait_io, when guest is still waiting for response. 232.52 @@ -135,10 +137,10 @@ void vmx_intr_assist(struct vcpu *v) 232.53 panic("!!!Bad resume to guest before I/O emulation is done.\n"); 232.54 232.55 /* Clear indicator specific to interrupt delivered from DM */ 232.56 - if (test_and_clear_bit(IOPACKET_PORT, 232.57 + if (test_and_clear_bit(port, 232.58 &d->shared_info->evtchn_pending[0])) { 232.59 - if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5]) 232.60 - clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel); 232.61 + if (!d->shared_info->evtchn_pending[port >> 5]) 232.62 + clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel); 232.63 232.64 if (!v->vcpu_info->evtchn_pending_sel) 232.65 clear_bit(0, &v->vcpu_info->evtchn_upcall_pending); 232.66 @@ -149,11 +151,14 @@ void vmx_intr_assist(struct vcpu *v) 232.67 * shares same event channel as I/O emulation, with corresponding 232.68 * indicator possibly cleared when vmx_wait_io(). 232.69 */ 232.70 - vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va; 232.71 + vio = get_vio(v->domain, v->vcpu_id); 232.72 if (!vio) 232.73 panic("Corruption: bad shared page: %lx\n", (unsigned long)vio); 232.74 232.75 - vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]); 232.76 - memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr)); 232.77 +#ifdef V_IOSAPIC_READY 232.78 + vlapic_update_ext_irq(v); 232.79 +#else 232.80 + panic("IOSAPIC model is missed in qemu\n"); 232.81 +#endif 232.82 return; 232.83 }
233.1 --- a/xen/arch/ia64/vmx_vcpu.c Wed Aug 03 09:35:16 2005 +0000 233.2 +++ b/xen/arch/ia64/vmx_vcpu.c Wed Aug 03 09:35:38 2005 +0000 233.3 @@ -23,7 +23,7 @@ 233.4 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 233.5 */ 233.6 233.7 -#include <linux/sched.h> 233.8 +#include <xen/sched.h> 233.9 #include <public/arch-ia64.h> 233.10 #include <asm/ia64_int.h> 233.11 #include <asm/vmx_vcpu.h> 233.12 @@ -201,7 +201,7 @@ vmx_vcpu_get_vtlb(VCPU *vcpu) 233.13 struct virutal_platform_def * 233.14 vmx_vcpu_get_plat(VCPU *vcpu) 233.15 { 233.16 - return &(vcpu->arch.arch_vmx.vmx_platform); 233.17 + return &(vcpu->domain->arch.vmx_platform); 233.18 } 233.19 233.20 233.21 @@ -213,7 +213,6 @@ ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 va 233.22 233.23 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val) 233.24 { 233.25 - extern void set_one_rr(UINT64, UINT64); 233.26 ia64_rr oldrr,newrr; 233.27 thash_cb_t *hcb; 233.28 oldrr=vmx_vcpu_rr(vcpu,reg); 233.29 @@ -375,7 +374,7 @@ IA64FAULT 233.30 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val) 233.31 { 233.32 REGS *regs=vcpu_regs(vcpu); 233.33 - u64 nat; 233.34 + int nat; 233.35 //TODO, Eddie 233.36 if (!regs) return 0; 233.37 if (reg >= 16 && reg < 32) {
234.1 --- a/xen/arch/ia64/vmx_virt.c Wed Aug 03 09:35:16 2005 +0000 234.2 +++ b/xen/arch/ia64/vmx_virt.c Wed Aug 03 09:35:38 2005 +0000 234.3 @@ -1193,7 +1193,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, 234.4 case 23:return vmx_vcpu_set_ifs(vcpu,r2); 234.5 case 24:return vmx_vcpu_set_iim(vcpu,r2); 234.6 case 25:return vmx_vcpu_set_iha(vcpu,r2); 234.7 - case 64:return vmx_vcpu_set_lid(vcpu,r2); 234.8 + case 64:printk("SET LID to 0x%lx\n", r2); 234.9 + return vmx_vcpu_set_lid(vcpu,r2); 234.10 case 65:return IA64_NO_FAULT; 234.11 case 66:return vmx_vcpu_set_tpr(vcpu,r2); 234.12 case 67:return vmx_vcpu_set_eoi(vcpu,r2); 234.13 @@ -1253,9 +1254,9 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp 234.14 case 23:return cr_get(ifs); 234.15 case 24:return cr_get(iim); 234.16 case 25:return cr_get(iha); 234.17 - case 64:val = ia64_getreg(_IA64_REG_CR_LID); 234.18 - return vmx_vcpu_set_gr(vcpu,tgt,val,0); 234.19 -// case 64:return cr_get(lid); 234.20 +// case 64:val = ia64_getreg(_IA64_REG_CR_LID); 234.21 +// return vmx_vcpu_set_gr(vcpu,tgt,val,0); 234.22 + case 64:return cr_get(lid); 234.23 case 65: 234.24 vmx_vcpu_get_ivr(vcpu,&val); 234.25 return vmx_vcpu_set_gr(vcpu,tgt,val,0);
235.1 --- a/xen/arch/ia64/vtlb.c Wed Aug 03 09:35:16 2005 +0000 235.2 +++ b/xen/arch/ia64/vtlb.c Wed Aug 03 09:35:38 2005 +0000 235.3 @@ -23,6 +23,7 @@ 235.4 235.5 #include <linux/sched.h> 235.6 #include <asm/tlb.h> 235.7 +#include <asm/mm.h> 235.8 #include <asm/vmx_mm_def.h> 235.9 #include <asm/gcc_intrin.h> 235.10 #include <xen/interrupt.h> 235.11 @@ -359,7 +360,10 @@ thash_data_t *__alloc_chain(thash_cb_t * 235.12 void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 235.13 { 235.14 thash_data_t *hash_table, *cch; 235.15 + int flag; 235.16 rr_t vrr; 235.17 + u64 gppn; 235.18 + u64 ppns, ppne; 235.19 235.20 hash_table = (hcb->hash_func)(hcb->pta, 235.21 va, entry->rid, entry->ps); 235.22 @@ -375,7 +379,18 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 235.23 *hash_table = *entry; 235.24 hash_table->next = cch; 235.25 } 235.26 - thash_insert (hcb->ts->vhpt, entry, va); 235.27 + if(hcb->vcpu->domain->domain_id==0){ 235.28 + thash_insert(hcb->ts->vhpt, entry, va); 235.29 + return; 235.30 + } 235.31 + flag = 1; 235.32 + gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT; 235.33 + ppns = PAGEALIGN((entry->ppn<<12),entry->ps); 235.34 + ppne = ppns + PSIZE(entry->ps); 235.35 + if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000))) 235.36 + flag = 0; 235.37 + if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag)) 235.38 + thash_insert(hcb->ts->vhpt, entry, va); 235.39 return ; 235.40 } 235.41 235.42 @@ -427,18 +442,22 @@ static void rem_thash(thash_cb_t *hcb, t 235.43 thash_data_t *hash_table, *p, *q; 235.44 thash_internal_t *priv = &hcb->priv; 235.45 int idx; 235.46 - 235.47 + 235.48 hash_table = priv->hash_base; 235.49 if ( hash_table == entry ) { 235.50 - __rem_hash_head (hcb, entry); 235.51 +// if ( PURGABLE_ENTRY(hcb, entry) ) { 235.52 + __rem_hash_head (hcb, entry); 235.53 +// } 235.54 return ; 235.55 } 235.56 // remove from collision chain 235.57 p = hash_table; 235.58 for ( q=p->next; q; q = p->next ) { 235.59 - if ( q == entry ) { 235.60 - p->next = q->next; 235.61 - __rem_chain(hcb, entry); 235.62 + if ( q == entry ){ 235.63 +// if ( PURGABLE_ENTRY(hcb,q ) ) { 235.64 + p->next = q->next; 235.65 + __rem_chain(hcb, entry); 235.66 +// } 235.67 return ; 235.68 } 235.69 p = q; 235.70 @@ -939,7 +958,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb) 235.71 if ( sanity_check == 0 ) return; 235.72 sanity_check --; 235.73 s_sect.v = 0; 235.74 -// page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER); 235.75 +// page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0); 235.76 // if ( page == NULL ) { 235.77 // panic("No enough contiguous memory for init_domain_mm\n"); 235.78 // };
236.1 --- a/xen/arch/ia64/xenasm.S Wed Aug 03 09:35:16 2005 +0000 236.2 +++ b/xen/arch/ia64/xenasm.S Wed Aug 03 09:35:38 2005 +0000 236.3 @@ -48,10 +48,11 @@ END(platform_is_hp_ski) 236.4 // FIXME? Note that this turns off the DB bit (debug) 236.5 #define PSR_BITS_TO_SET IA64_PSR_BN 236.6 236.7 +//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info); 236.8 GLOBAL_ENTRY(ia64_new_rr7) 236.9 // not sure this unwind statement is correct... 236.10 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) 236.11 - alloc loc1 = ar.pfs, 2, 7, 0, 0 236.12 + alloc loc1 = ar.pfs, 3, 8, 0, 0 236.13 1: { 236.14 mov r28 = in0 // copy procedure index 236.15 mov r8 = ip // save ip to compute branch 236.16 @@ -72,6 +73,10 @@ 1: { 236.17 ;; 236.18 tpa loc5=loc5 // grab this BEFORE changing rr7 236.19 ;; 236.20 + mov loc7=in2 // arch_vcpu_info_t 236.21 + ;; 236.22 + tpa loc7=loc7 // grab this BEFORE changing rr7 236.23 + ;; 236.24 mov loc3 = psr // save psr 236.25 adds r8 = 1f-1b,r8 // calculate return address for call 236.26 ;; 236.27 @@ -206,6 +211,25 @@ 1: 236.28 ;; 236.29 itr.d dtr[r25]=r23 // wire in new mapping... 236.30 ;; 236.31 + // Map for arch_vcpu_info_t 236.32 + movl r22=SHARED_ARCHINFO_ADDR 236.33 + ;; 236.34 + movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW) 236.35 + ;; 236.36 + mov r21=loc7 // saved sharedinfo physical address 236.37 + ;; 236.38 + or r23=r25,r21 // construct PA | page properties 236.39 + mov r24=PAGE_SHIFT<<2 236.40 + ;; 236.41 + ptr.d r22,r24 236.42 + ;; 236.43 + mov cr.itir=r24 236.44 + mov cr.ifa=r22 236.45 + ;; 236.46 + mov r25=IA64_TR_ARCH_INFO 236.47 + ;; 236.48 + itr.d dtr[r25]=r23 // wire in new mapping... 236.49 + ;; 236.50 236.51 // done, switch back to virtual and return 236.52 mov r16=loc3 // r16= original psr 236.53 @@ -278,12 +302,9 @@ GLOBAL_ENTRY(__get_domain_bundle) 236.54 END(__get_domain_bundle) 236.55 236.56 GLOBAL_ENTRY(dorfirfi) 236.57 -#define SI_CR_IIP_OFFSET 0x10 236.58 -#define SI_CR_IPSR_OFFSET 0x08 236.59 -#define SI_CR_IFS_OFFSET 0x18 236.60 - movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET 236.61 - movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET 236.62 - movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET 236.63 + movl r16 = XSI_IIP 236.64 + movl r17 = XSI_IPSR 236.65 + movl r18 = XSI_IFS 236.66 ;; 236.67 ld8 r16 = [r16] 236.68 ld8 r17 = [r17]
237.1 --- a/xen/arch/ia64/xenmem.c Wed Aug 03 09:35:16 2005 +0000 237.2 +++ b/xen/arch/ia64/xenmem.c Wed Aug 03 09:35:38 2005 +0000 237.3 @@ -65,7 +65,7 @@ paging_init (void) 237.4 #else // CONFIG_VTI 237.5 237.6 /* Allocate and map the machine-to-phys table */ 237.7 - if ((pg = alloc_domheap_pages(NULL, 10)) == NULL) 237.8 + if ((pg = alloc_domheap_pages(NULL, 10, 0)) == NULL) 237.9 panic("Not enough memory to bootstrap Xen.\n"); 237.10 memset(page_to_virt(pg), 0x55, 16UL << 20); 237.11 #endif // CONFIG_VTI
238.1 --- a/xen/arch/ia64/xenmisc.c Wed Aug 03 09:35:16 2005 +0000 238.2 +++ b/xen/arch/ia64/xenmisc.c Wed Aug 03 09:35:38 2005 +0000 238.3 @@ -103,11 +103,13 @@ while(1); 238.4 } 238.5 #endif 238.6 238.7 +#ifndef CONFIG_VTI 238.8 unsigned long __hypercall_create_continuation( 238.9 unsigned int op, unsigned int nr_args, ...) 238.10 { 238.11 printf("__hypercall_create_continuation: not implemented!!!\n"); 238.12 } 238.13 +#endif 238.14 238.15 /////////////////////////////// 238.16 238.17 @@ -115,14 +117,17 @@ unsigned long __hypercall_create_continu 238.18 // from arch/x86/apic.c 238.19 /////////////////////////////// 238.20 238.21 +extern unsigned long domain0_ready; 238.22 + 238.23 int reprogram_ac_timer(s_time_t timeout) 238.24 { 238.25 struct vcpu *v = current; 238.26 238.27 #ifdef CONFIG_VTI 238.28 - if(VMX_DOMAIN(v)) 238.29 +// if(VMX_DOMAIN(v)) 238.30 return 1; 238.31 #endif // CONFIG_VTI 238.32 + if (!domain0_ready) return 1; 238.33 local_cpu_data->itm_next = timeout; 238.34 if (is_idle_task(v->domain)) vcpu_safe_set_itm(timeout); 238.35 else vcpu_set_next_timer(current); 238.36 @@ -177,6 +182,22 @@ void show_registers(struct pt_regs *regs 238.37 printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n"); 238.38 } 238.39 238.40 +int is_kernel_text(unsigned long addr) 238.41 +{ 238.42 + extern char _stext[], _etext[]; 238.43 + if (addr >= (unsigned long) _stext && 238.44 + addr <= (unsigned long) _etext) 238.45 + return 1; 238.46 + 238.47 + return 0; 238.48 +} 238.49 + 238.50 +unsigned long kernel_text_end(void) 238.51 +{ 238.52 + extern char _etext[]; 238.53 + return (unsigned long) _etext; 238.54 +} 238.55 + 238.56 /////////////////////////////// 238.57 // from common/keyhandler.c 238.58 /////////////////////////////// 238.59 @@ -291,8 +312,8 @@ void context_switch(struct vcpu *prev, s 238.60 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50}; 238.61 static int i = 100; 238.62 int id = ((struct vcpu *)current)->domain->domain_id & 0xf; 238.63 -if (!cnt[id]--) { printk("%x",id); cnt[id] = 500; } 238.64 -if (!i--) { printk("+",id); cnt[id] = 1000; } 238.65 +if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; } 238.66 +if (!i--) { printk("+",id); i = 1000000; } 238.67 } 238.68 clear_bit(_VCPUF_running, &prev->vcpu_flags); 238.69 //if (!is_idle_task(next->domain) )
239.1 --- a/xen/arch/ia64/xensetup.c Wed Aug 03 09:35:16 2005 +0000 239.2 +++ b/xen/arch/ia64/xensetup.c Wed Aug 03 09:35:38 2005 +0000 239.3 @@ -136,6 +136,12 @@ struct ns16550_defaults ns16550_com1 = { 239.4 .stop_bits = 1 239.5 }; 239.6 239.7 +struct ns16550_defaults ns16550_com2 = { 239.8 + .data_bits = 8, 239.9 + .parity = 'n', 239.10 + .stop_bits = 1 239.11 +}; 239.12 + 239.13 void start_kernel(void) 239.14 { 239.15 unsigned char *cmdline; 239.16 @@ -158,7 +164,13 @@ void start_kernel(void) 239.17 239.18 /* We initialise the serial devices very early so we can get debugging. */ 239.19 if (running_on_sim) hpsim_serial_init(); 239.20 - else ns16550_init(0, &ns16550_com1); 239.21 + else { 239.22 + ns16550_init(0, &ns16550_com1); 239.23 + /* Also init com2 for Tiger4. */ 239.24 + ns16550_com2.io_base = 0x2f8; 239.25 + ns16550_com2.irq = 3; 239.26 + ns16550_init(1, &ns16550_com2); 239.27 + } 239.28 serial_init_preirq(); 239.29 239.30 init_console();
240.1 --- a/xen/arch/ia64/xentime.c Wed Aug 03 09:35:16 2005 +0000 240.2 +++ b/xen/arch/ia64/xentime.c Wed Aug 03 09:35:38 2005 +0000 240.3 @@ -27,6 +27,7 @@ 240.4 #include <asm/sections.h> 240.5 #include <asm/system.h> 240.6 #ifdef XEN 240.7 +#include <asm/vcpu.h> 240.8 #include <linux/jiffies.h> // not included by xen/sched.h 240.9 #endif 240.10 #include <xen/softirq.h> 240.11 @@ -143,8 +144,8 @@ xen_timer_interrupt (int irq, void *dev_ 240.12 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) { 240.13 printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n", 240.14 regs->cr_iip, 240.15 - current->vcpu_info->arch.interrupt_delivery_enabled, 240.16 - current->vcpu_info->arch.pending_interruption); 240.17 + VCPU(current,interrupt_delivery_enabled), 240.18 + VCPU(current,pending_interruption)); 240.19 count = 0; 240.20 } 240.21 #endif 240.22 @@ -159,7 +160,7 @@ xen_timer_interrupt (int irq, void *dev_ 240.23 // We have to ensure that domain0 is launched before we 240.24 // call vcpu_timer_expired on it 240.25 //domain0_ready = 1; // moved to xensetup.c 240.26 - current->vcpu_info->arch.pending_interruption = 1; 240.27 + VCPU(current,pending_interruption) = 1; 240.28 } 240.29 if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { 240.30 vcpu_pend_timer(dom0->vcpu[0]);
275.1 --- a/xen/common/xmalloc.c Wed Aug 03 09:35:16 2005 +0000 275.2 +++ b/xen/common/xmalloc.c Wed Aug 03 09:35:38 2005 +0000 275.3 @@ -111,7 +111,9 @@ void *_xmalloc(size_t size, size_t align 275.4 unsigned long flags; 275.5 275.6 /* We currently always return cacheline aligned. */ 275.7 +#ifndef __ia64__ 275.8 BUG_ON(align > SMP_CACHE_BYTES); 275.9 +#endif 275.10 275.11 /* Add room for header, pad to align next header. */ 275.12 size += sizeof(struct xmalloc_hdr);
279.1 --- a/xen/include/asm-ia64/config.h Wed Aug 03 09:35:16 2005 +0000 279.2 +++ b/xen/include/asm-ia64/config.h Wed Aug 03 09:35:38 2005 +0000 279.3 @@ -230,6 +230,7 @@ struct screen_info { }; 279.4 279.5 #define FORCE_CRASH() asm("break 0;;"); 279.6 279.7 +void dummy_called(char *function); 279.8 #define dummy() dummy_called(__FUNCTION__) 279.9 279.10 // these declarations got moved at some point, find a better place for them
280.1 --- a/xen/include/asm-ia64/domain.h Wed Aug 03 09:35:16 2005 +0000 280.2 +++ b/xen/include/asm-ia64/domain.h Wed Aug 03 09:35:38 2005 +0000 280.3 @@ -8,6 +8,7 @@ 280.4 #include <asm/vmmu.h> 280.5 #include <asm/regionreg.h> 280.6 #include <public/arch-ia64.h> 280.7 +#include <asm/vmx_platform.h> 280.8 #endif // CONFIG_VTI 280.9 #include <xen/list.h> 280.10 280.11 @@ -42,6 +43,7 @@ struct arch_domain { 280.12 * max_pages in domain struct, which indicates maximum memory size 280.13 */ 280.14 unsigned long max_pfn; 280.15 + struct virutal_platform_def vmx_platform; 280.16 #endif //CONFIG_VTI 280.17 u64 xen_vastart; 280.18 u64 xen_vaend; 280.19 @@ -88,6 +90,7 @@ struct arch_vcpu { 280.20 thash_cb_t *vtlb; 280.21 char irq_new_pending; 280.22 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI 280.23 + char hypercall_continuation; 280.24 //for phycial emulation 280.25 unsigned long old_rsc; 280.26 int mode_flags;
281.1 --- a/xen/include/asm-ia64/event.h Wed Aug 03 09:35:16 2005 +0000 281.2 +++ b/xen/include/asm-ia64/event.h Wed Aug 03 09:35:38 2005 +0000 281.3 @@ -9,6 +9,9 @@ 281.4 #ifndef __ASM_EVENT_H__ 281.5 #define __ASM_EVENT_H__ 281.6 281.7 +#include <public/arch-ia64.h> 281.8 +#include <asm/vcpu.h> 281.9 + 281.10 static inline void evtchn_notify(struct vcpu *v) 281.11 { 281.12 vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector);
282.1 --- a/xen/include/asm-ia64/ia64_int.h Wed Aug 03 09:35:16 2005 +0000 282.2 +++ b/xen/include/asm-ia64/ia64_int.h Wed Aug 03 09:35:38 2005 +0000 282.3 @@ -37,7 +37,9 @@ 282.4 #define IA64_RFI_IN_PROGRESS 0x0002 282.5 #define IA64_RETRY 0x0003 282.6 #ifdef CONFIG_VTI 282.7 -#define IA64_FAULT 0x0002 282.8 +#undef IA64_NO_FAULT 282.9 +#define IA64_NO_FAULT 0x0000 282.10 +#define IA64_FAULT 0x0001 282.11 #endif //CONFIG_VTI 282.12 #define IA64_FORCED_IFA 0x0004 282.13 #define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
283.1 --- a/xen/include/asm-ia64/privop.h Wed Aug 03 09:35:16 2005 +0000 283.2 +++ b/xen/include/asm-ia64/privop.h Wed Aug 03 09:35:38 2005 +0000 283.3 @@ -138,14 +138,32 @@ typedef union U_INST64_M47 { 283.4 IA64_INST inst; 283.5 struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; }; 283.6 } INST64_M47; 283.7 + 283.8 typedef union U_INST64_M1{ 283.9 IA64_INST inst; 283.10 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; }; 283.11 } INST64_M1; 283.12 + 283.13 +typedef union U_INST64_M2{ 283.14 + IA64_INST inst; 283.15 + struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; }; 283.16 +} INST64_M2; 283.17 + 283.18 +typedef union U_INST64_M3{ 283.19 + IA64_INST inst; 283.20 + struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; }; 283.21 +} INST64_M3; 283.22 + 283.23 typedef union U_INST64_M4 { 283.24 IA64_INST inst; 283.25 struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; }; 283.26 } INST64_M4; 283.27 + 283.28 +typedef union U_INST64_M5 { 283.29 + IA64_INST inst; 283.30 + struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; }; 283.31 +} INST64_M5; 283.32 + 283.33 typedef union U_INST64_M6 { 283.34 IA64_INST inst; 283.35 struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; }; 283.36 @@ -166,7 +184,10 @@ typedef union U_INST64 { 283.37 INST64_I28 I28; // mov from ar (I unit) 283.38 #ifdef CONFIG_VTI 283.39 INST64_M1 M1; // ld integer 283.40 + INST64_M2 M2; 283.41 + INST64_M3 M3; 283.42 INST64_M4 M4; // st integer 283.43 + INST64_M5 M5; 283.44 INST64_M6 M6; // ldfd floating pointer 283.45 #endif // CONFIG_VTI 283.46 INST64_M28 M28; // purge translation cache entry
284.1 --- a/xen/include/asm-ia64/regionreg.h Wed Aug 03 09:35:16 2005 +0000 284.2 +++ b/xen/include/asm-ia64/regionreg.h Wed Aug 03 09:35:38 2005 +0000 284.3 @@ -39,4 +39,7 @@ typedef union ia64_rr { 284.4 #define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8) 284.5 #define RR_RID_MASK 0x00000000ffffff00L 284.6 284.7 + 284.8 +int set_one_rr(unsigned long rr, unsigned long val); 284.9 + 284.10 #endif /* !_REGIONREG_H_ */
285.1 --- a/xen/include/asm-ia64/vcpu.h Wed Aug 03 09:35:16 2005 +0000 285.2 +++ b/xen/include/asm-ia64/vcpu.h Wed Aug 03 09:35:38 2005 +0000 285.3 @@ -13,13 +13,9 @@ typedef int BOOLEAN; 285.4 struct vcpu; 285.5 typedef struct vcpu VCPU; 285.6 285.7 -// NOTE: The actual VCPU structure (struct virtualcpu) is defined in 285.8 -// thread.h. Moving it to here caused a lot of files to change, so 285.9 -// for now, we'll leave well enough alone. 285.10 typedef struct pt_regs REGS; 285.11 -//#define PSCB(vcpu) (((struct spk_thread_t *)vcpu)->pscb) 285.12 -//#define vcpu_regs(vcpu) &((struct spk_thread_t *)vcpu)->thread_regs 285.13 -//#define vcpu_thread(vcpu) ((struct spk_thread_t *)vcpu) 285.14 + 285.15 +#define VCPU(_v,_x) _v->vcpu_info->arch.privregs->_x 285.16 285.17 #define PRIVOP_ADDR_COUNT 285.18 #ifdef PRIVOP_ADDR_COUNT
286.1 --- a/xen/include/asm-ia64/vmx.h Wed Aug 03 09:35:16 2005 +0000 286.2 +++ b/xen/include/asm-ia64/vmx.h Wed Aug 03 09:35:38 2005 +0000 286.3 @@ -23,6 +23,7 @@ 286.4 #define _ASM_IA64_VT_H 286.5 286.6 #define RR7_SWITCH_SHIFT 12 /* 4k enough */ 286.7 +#include <public/io/ioreq.h> 286.8 286.9 extern void identify_vmx_feature(void); 286.10 extern unsigned int vmx_enabled; 286.11 @@ -35,6 +36,22 @@ extern vmx_insert_double_mapping(u64,u64 286.12 extern void vmx_purge_double_mapping(u64, u64, u64); 286.13 extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7); 286.14 286.15 + 286.16 extern void vmx_wait_io(void); 286.17 extern void vmx_io_assist(struct vcpu *v); 286.18 + 286.19 +static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu) 286.20 +{ 286.21 + return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu]; 286.22 +} 286.23 + 286.24 +static inline int iopacket_port(struct domain *d) 286.25 +{ 286.26 + return ((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->sp_global.eport; 286.27 +} 286.28 + 286.29 +static inline shared_iopage_t *get_sp(struct domain *d) 286.30 +{ 286.31 + return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va; 286.32 +} 286.33 #endif /* _ASM_IA64_VT_H */
287.1 --- a/xen/include/asm-ia64/vmx_uaccess.h Wed Aug 03 09:35:16 2005 +0000 287.2 +++ b/xen/include/asm-ia64/vmx_uaccess.h Wed Aug 03 09:35:38 2005 +0000 287.3 @@ -40,6 +40,8 @@ 287.4 */ 287.5 asm (".section \"__ex_table\", \"a\"\n\t.previous"); 287.6 287.7 +/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */ 287.8 +#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1) 287.9 /* For back compatibility */ 287.10 #define __access_ok(addr, size, segment) 1 287.11 #define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
288.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Wed Aug 03 09:35:16 2005 +0000 288.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Wed Aug 03 09:35:38 2005 +0000 288.3 @@ -105,6 +105,10 @@ extern void vtm_set_itv(VCPU *vcpu); 288.4 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm); 288.5 extern void vtm_domain_out(VCPU *vcpu); 288.6 extern void vtm_domain_in(VCPU *vcpu); 288.7 +#ifdef V_IOSAPIC_READY 288.8 +extern void vlapic_update_ext_irq(VCPU *vcpu); 288.9 +extern void vlapic_update_shared_info(VCPU *vcpu); 288.10 +#endif 288.11 extern void vlsapic_reset(VCPU *vcpu); 288.12 extern int vmx_check_pending_irq(VCPU *vcpu); 288.13 extern void guest_write_eoi(VCPU *vcpu); 288.14 @@ -399,6 +403,9 @@ IA64FAULT 288.15 vmx_vcpu_set_lid(VCPU *vcpu, u64 val) 288.16 { 288.17 VPD_CR(vcpu,lid)=val; 288.18 +#ifdef V_IOSAPIC_READY 288.19 + vlapic_update_shared_info(vcpu); 288.20 +#endif 288.21 return IA64_NO_FAULT; 288.22 } 288.23 extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
289.1 --- a/xen/include/asm-ia64/vmx_vpd.h Wed Aug 03 09:35:16 2005 +0000 289.2 +++ b/xen/include/asm-ia64/vmx_vpd.h Wed Aug 03 09:35:38 2005 +0000 289.3 @@ -25,37 +25,10 @@ 289.4 #ifndef __ASSEMBLY__ 289.5 289.6 #include <asm/vtm.h> 289.7 -#include <asm/vmx_platform.h> 289.8 #include <public/arch-ia64.h> 289.9 289.10 #define VPD_SHIFT 17 /* 128K requirement */ 289.11 #define VPD_SIZE (1 << VPD_SHIFT) 289.12 -typedef union { 289.13 - unsigned long value; 289.14 - struct { 289.15 - int a_int:1; 289.16 - int a_from_int_cr:1; 289.17 - int a_to_int_cr:1; 289.18 - int a_from_psr:1; 289.19 - int a_from_cpuid:1; 289.20 - int a_cover:1; 289.21 - int a_bsw:1; 289.22 - long reserved:57; 289.23 - }; 289.24 -} vac_t; 289.25 - 289.26 -typedef union { 289.27 - unsigned long value; 289.28 - struct { 289.29 - int d_vmsw:1; 289.30 - int d_extint:1; 289.31 - int d_ibr_dbr:1; 289.32 - int d_pmc:1; 289.33 - int d_to_pmd:1; 289.34 - int d_itm:1; 289.35 - long reserved:58; 289.36 - }; 289.37 -} vdc_t; 289.38 289.39 typedef struct { 289.40 unsigned long dcr; // CR0 289.41 @@ -89,29 +62,6 @@ typedef struct { 289.42 unsigned long rsv6[46]; 289.43 } cr_t; 289.44 289.45 -typedef struct vpd { 289.46 - vac_t vac; 289.47 - vdc_t vdc; 289.48 - unsigned long virt_env_vaddr; 289.49 - unsigned long reserved1[29]; 289.50 - unsigned long vhpi; 289.51 - unsigned long reserved2[95]; 289.52 - unsigned long vgr[16]; 289.53 - unsigned long vbgr[16]; 289.54 - unsigned long vnat; 289.55 - unsigned long vbnat; 289.56 - unsigned long vcpuid[5]; 289.57 - unsigned long reserved3[11]; 289.58 - unsigned long vpsr; 289.59 - unsigned long vpr; 289.60 - unsigned long reserved4[76]; 289.61 - unsigned long vcr[128]; 289.62 - unsigned long reserved5[128]; 289.63 - unsigned long reserved6[3456]; 289.64 - unsigned long vmm_avail[128]; 289.65 - unsigned long reserved7[4096]; 289.66 -} vpd_t; 289.67 - 289.68 void vmx_enter_scheduler(void); 289.69 289.70 //FIXME: Map for LID to vcpu, Eddie 289.71 @@ -133,7 +83,6 @@ struct arch_vmx_struct { 289.72 unsigned long rfi_ipsr; 289.73 unsigned long rfi_ifs; 289.74 unsigned long in_service[4]; // vLsapic inservice IRQ bits 289.75 - struct virutal_platform_def vmx_platform; 289.76 unsigned long flags; 289.77 }; 289.78 289.79 @@ -175,7 +124,6 @@ extern unsigned int opt_vmx_debug_level; 289.80 289.81 #endif //__ASSEMBLY__ 289.82 289.83 - 289.84 // VPD field offset 289.85 #define VPD_VAC_START_OFFSET 0 289.86 #define VPD_VDC_START_OFFSET 8
290.1 --- a/xen/include/asm-ia64/xensystem.h Wed Aug 03 09:35:16 2005 +0000 290.2 +++ b/xen/include/asm-ia64/xensystem.h Wed Aug 03 09:35:38 2005 +0000 290.3 @@ -21,10 +21,13 @@ 290.4 #define XEN_RR7_SWITCH_STUB 0xb700000000000000 290.5 #endif // CONFIG_VTI 290.6 290.7 +#define XEN_START_ADDR 0xf000000000000000 290.8 #define KERNEL_START 0xf000000004000000 290.9 #define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE 290.10 #define SHAREDINFO_ADDR 0xf100000000000000 290.11 #define VHPT_ADDR 0xf200000000000000 290.12 +#define SHARED_ARCHINFO_ADDR 0xf300000000000000 290.13 +#define XEN_END_ADDR 0xf400000000000000 290.14 290.15 #ifndef __ASSEMBLY__ 290.16 290.17 @@ -58,8 +61,9 @@ extern struct task_struct *vmx_ia64_swit 290.18 ia64_save_extra(prev); \ 290.19 if (IA64_HAS_EXTRA_STATE(next)) \ 290.20 ia64_load_extra(next); \ 290.21 - ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 290.22 + /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ \ 290.23 (last) = ia64_switch_to((next)); \ 290.24 + vcpu_set_next_timer(current); \ 290.25 } while (0) 290.26 #endif // CONFIG_VTI 290.27
310.1 --- a/xen/include/public/arch-ia64.h Wed Aug 03 09:35:16 2005 +0000 310.2 +++ b/xen/include/public/arch-ia64.h Wed Aug 03 09:35:38 2005 +0000 310.3 @@ -140,38 +140,121 @@ struct pt_regs { 310.4 struct pt_fpreg f11; /* scratch */ 310.5 }; 310.6 310.7 +typedef union { 310.8 + unsigned long value; 310.9 + struct { 310.10 + int a_int:1; 310.11 + int a_from_int_cr:1; 310.12 + int a_to_int_cr:1; 310.13 + int a_from_psr:1; 310.14 + int a_from_cpuid:1; 310.15 + int a_cover:1; 310.16 + int a_bsw:1; 310.17 + long reserved:57; 310.18 + }; 310.19 +} vac_t; 310.20 + 310.21 +typedef union { 310.22 + unsigned long value; 310.23 + struct { 310.24 + int d_vmsw:1; 310.25 + int d_extint:1; 310.26 + int d_ibr_dbr:1; 310.27 + int d_pmc:1; 310.28 + int d_to_pmd:1; 310.29 + int d_itm:1; 310.30 + long reserved:58; 310.31 + }; 310.32 +} vdc_t; 310.33 + 310.34 typedef struct { 310.35 - unsigned long ipsr; 310.36 - unsigned long iip; 310.37 - unsigned long ifs; 310.38 - unsigned long precover_ifs; 310.39 - unsigned long isr; 310.40 - unsigned long ifa; 310.41 - unsigned long iipa; 310.42 - unsigned long iim; 310.43 - unsigned long unat; // not sure if this is needed until NaT arch is done 310.44 - unsigned long tpr; 310.45 - unsigned long iha; 310.46 - unsigned long itir; 310.47 - unsigned long itv; 310.48 - unsigned long pmv; 310.49 - unsigned long cmcv; 310.50 - unsigned long pta; 310.51 - int interrupt_collection_enabled; // virtual psr.ic 310.52 - int interrupt_delivery_enabled; // virtual psr.i 310.53 - int pending_interruption; 310.54 - int incomplete_regframe; // see SDM vol2 6.8 310.55 - unsigned long delivery_mask[4]; 310.56 - int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual 310.57 - int banknum; // 0 or 1, which virtual register bank is active 310.58 - unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active 310.59 - unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active 310.60 - unsigned long rrs[8]; // region registers 310.61 - unsigned long krs[8]; // kernel registers 310.62 - unsigned long pkrs[8]; // protection key registers 310.63 - unsigned long tmp[8]; // temp registers (e.g. for hyperprivops) 310.64 + vac_t vac; 310.65 + vdc_t vdc; 310.66 + unsigned long virt_env_vaddr; 310.67 + unsigned long reserved1[29]; 310.68 + unsigned long vhpi; 310.69 + unsigned long reserved2[95]; 310.70 + union { 310.71 + unsigned long vgr[16]; 310.72 + unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active 310.73 + }; 310.74 + union { 310.75 + unsigned long vbgr[16]; 310.76 + unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active 310.77 + }; 310.78 + unsigned long vnat; 310.79 + unsigned long vbnat; 310.80 + unsigned long vcpuid[5]; 310.81 + unsigned long reserved3[11]; 310.82 + unsigned long vpsr; 310.83 + unsigned long vpr; 310.84 + unsigned long reserved4[76]; 310.85 + union { 310.86 + unsigned long vcr[128]; 310.87 + struct { 310.88 + unsigned long dcr; // CR0 310.89 + unsigned long itm; 310.90 + unsigned long iva; 310.91 + unsigned long rsv1[5]; 310.92 + unsigned long pta; // CR8 310.93 + unsigned long rsv2[7]; 310.94 + unsigned long ipsr; // CR16 310.95 + unsigned long isr; 310.96 + unsigned long rsv3; 310.97 + unsigned long iip; 310.98 + unsigned long ifa; 310.99 + unsigned long itir; 310.100 + unsigned long iipa; 310.101 + unsigned long ifs; 310.102 + unsigned long iim; // CR24 310.103 + unsigned long iha; 310.104 + unsigned long rsv4[38]; 310.105 + unsigned long lid; // CR64 310.106 + unsigned long ivr; 310.107 + unsigned long tpr; 310.108 + unsigned long eoi; 310.109 + unsigned long irr[4]; 310.110 + unsigned long itv; // CR72 310.111 + unsigned long pmv; 310.112 + unsigned long cmcv; 310.113 + unsigned long rsv5[5]; 310.114 + unsigned long lrr0; // CR80 310.115 + unsigned long lrr1; 310.116 + unsigned long rsv6[46]; 310.117 + }; 310.118 + }; 310.119 + union { 310.120 + unsigned long reserved5[128]; 310.121 + struct { 310.122 + unsigned long precover_ifs; 310.123 + unsigned long unat; // not sure if this is needed until NaT arch is done 310.124 + int interrupt_collection_enabled; // virtual psr.ic 310.125 + int interrupt_delivery_enabled; // virtual psr.i 310.126 + int pending_interruption; 310.127 + int incomplete_regframe; // see SDM vol2 6.8 310.128 + unsigned long delivery_mask[4]; 310.129 + int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual 310.130 + int banknum; // 0 or 1, which virtual register bank is active 310.131 + unsigned long rrs[8]; // region registers 310.132 + unsigned long krs[8]; // kernel registers 310.133 + unsigned long pkrs[8]; // protection key registers 310.134 + unsigned long tmp[8]; // temp registers (e.g. for hyperprivops) 310.135 + }; 310.136 + }; 310.137 +#ifdef CONFIG_VTI 310.138 + unsigned long reserved6[3456]; 310.139 + unsigned long vmm_avail[128]; 310.140 + unsigned long reserved7[4096]; 310.141 +#endif 310.142 +} mapped_regs_t; 310.143 + 310.144 +typedef struct { 310.145 + mapped_regs_t *privregs; 310.146 int evtchn_vector; 310.147 } arch_vcpu_info_t; 310.148 + 310.149 +typedef mapped_regs_t vpd_t; 310.150 + 310.151 #define __ARCH_HAS_VCPU_INFO 310.152 310.153 typedef struct {