ia64/xen-unstable
changeset 7333:760f5e85c706
Some outstanding bug fixes found in VT merge
- Consistence of region id mangling algrithm:
- Metaphysical RID is not mangled, which may conflict with other
domain's virtual RID
- Sometimes rr0 is mangled, but sometimes not
- Sometimes only rid value is saved to saved_rr0_metaphysical, but
sometimes the whole value.
- Nat bit comsumption happens but handled as priv_emulate to forward progress.
But this is definitely wrong. We found reason of nat consumption from fast_rfi,
which doesn't save unat again after spill guest states, and then use guest
unat to fill guest states when return.
- In some corner case, timer interrupt handler won't update itm and then return
directly. When that happens, machine timer interrupt disappears until guest
timer interrupt sets v_itm actively. But vti domain depends on ac_timer while
the latter will stop when above condition happens. Then if current context is
vti domain, context switch disappears and machine halt.
Also many compatibility issues to support non-vti and vti domain are solved,eg:
- Changing lazy PAL mapping switch to eager switch per domain switch, since
vti domain always depends on pal call.
- evtchn_notify should also vcpu_wake target domain, since vti domain may block for io emulation. Xenolinux is free of this issue, since it's always runnable.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
Signed-off-by Anthony Xu <anthony.xu@intel.com>
- Consistence of region id mangling algrithm:
- Metaphysical RID is not mangled, which may conflict with other
domain's virtual RID
- Sometimes rr0 is mangled, but sometimes not
- Sometimes only rid value is saved to saved_rr0_metaphysical, but
sometimes the whole value.
- Nat bit comsumption happens but handled as priv_emulate to forward progress.
But this is definitely wrong. We found reason of nat consumption from fast_rfi,
which doesn't save unat again after spill guest states, and then use guest
unat to fill guest states when return.
- In some corner case, timer interrupt handler won't update itm and then return
directly. When that happens, machine timer interrupt disappears until guest
timer interrupt sets v_itm actively. But vti domain depends on ac_timer while
the latter will stop when above condition happens. Then if current context is
vti domain, context switch disappears and machine halt.
Also many compatibility issues to support non-vti and vti domain are solved,eg:
- Changing lazy PAL mapping switch to eager switch per domain switch, since
vti domain always depends on pal call.
- evtchn_notify should also vcpu_wake target domain, since vti domain may block for io emulation. Xenolinux is free of this issue, since it's always runnable.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
Signed-off-by Anthony Xu <anthony.xu@intel.com>
line diff
1.1 --- a/xen/Rules.mk Wed Oct 12 17:12:59 2005 -0600 1.2 +++ b/xen/Rules.mk Thu Oct 13 14:24:45 2005 -0600 1.3 @@ -47,6 +47,7 @@ test-gcc-flag = $(shell $(CC) -v --help 1.4 1.5 include $(BASEDIR)/arch/$(TARGET_ARCH)/Rules.mk 1.6 1.7 +CFLAGS += -D__HYPERVISOR__ 1.8 ifneq ($(debug),y) 1.9 CFLAGS += -DNDEBUG 1.10 ifeq ($(verbose),y)
2.1 --- a/xen/arch/ia64/Rules.mk Wed Oct 12 17:12:59 2005 -0600 2.2 +++ b/xen/arch/ia64/Rules.mk Thu Oct 13 14:24:45 2005 -0600 2.3 @@ -24,7 +24,7 @@ CFLAGS += -I$(BASEDIR)/include/asm-ia64 2.4 -I$(BASEDIR)/include/asm-ia64/linux-null \ 2.5 -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen 2.6 CFLAGS += -Wno-pointer-arith -Wredundant-decls 2.7 -CFLAGS += -DIA64 -DXEN -DLINUX_2_6 2.8 +CFLAGS += -DIA64 -DXEN -DLINUX_2_6 -DV_IOSAPIC_READY 2.9 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127 2.10 CFLAGS += -w -g 2.11 ifeq ($(VALIDATE_VT),y)
3.1 --- a/xen/arch/ia64/asm-offsets.c Wed Oct 12 17:12:59 2005 -0600 3.2 +++ b/xen/arch/ia64/asm-offsets.c Thu Oct 13 14:24:45 2005 -0600 3.3 @@ -59,6 +59,8 @@ void foo(void) 3.4 DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum)); 3.5 DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0])); 3.6 DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0])); 3.7 + DEFINE(XSI_B0NATS_OFS, offsetof(mapped_regs_t, vbnat)); 3.8 + DEFINE(XSI_B1NATS_OFS, offsetof(mapped_regs_t, vnat)); 3.9 DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0])); 3.10 DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode)); 3.11 DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs)); 3.12 @@ -79,6 +81,7 @@ void foo(void) 3.13 //DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand)); 3.14 //DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal)); 3.15 //DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid)); 3.16 + DEFINE(IA64_PGD, offsetof(struct domain, arch.mm)); 3.17 DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp)); 3.18 DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack)); 3.19
4.1 --- a/xen/arch/ia64/linux-xen/entry.S Wed Oct 12 17:12:59 2005 -0600 4.2 +++ b/xen/arch/ia64/linux-xen/entry.S Thu Oct 13 14:24:45 2005 -0600 4.3 @@ -900,10 +900,17 @@ GLOBAL_ENTRY(ia64_leave_kernel) 4.4 adds r7 = PT(EML_UNAT)+16,r12 4.5 ;; 4.6 ld8 r7 = [r7] 4.7 + ;; 4.8 +#if 0 4.9 +leave_kernel_self: 4.10 + cmp.ne p8,p0 = r0, r7 4.11 +(p8) br.sptk.few leave_kernel_self 4.12 + ;; 4.13 +#endif 4.14 (p6) br.call.sptk.many b0=deliver_pending_interrupt 4.15 ;; 4.16 mov ar.pfs=loc0 4.17 - mov ar.unat=r7 /* load eml_unat */ 4.18 + mov ar.unat=r7 /* load eml_unat */ 4.19 mov r31=r0 4.20 4.21
5.1 --- a/xen/arch/ia64/vmx/vmx_entry.S Wed Oct 12 17:12:59 2005 -0600 5.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S Thu Oct 13 14:24:45 2005 -0600 5.3 @@ -720,11 +720,11 @@ 1: 5.4 5.5 // re-pin mappings for guest_vhpt 5.6 5.7 - mov r24=IA64_TR_VHPT 5.8 + mov r24=IA64_TR_PERVP_VHPT 5.9 movl r25=PAGE_KERNEL 5.10 ;; 5.11 or loc5 = r25,loc5 // construct PA | page properties 5.12 - mov r23 = VCPU_TLB_SHIFT<<2 5.13 + mov r23 = IA64_GRANULE_SHIFT <<2 5.14 ;; 5.15 ptr.d in3,r23 5.16 ;;
6.1 --- a/xen/arch/ia64/vmx/vmx_init.c Wed Oct 12 17:12:59 2005 -0600 6.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Thu Oct 13 14:24:45 2005 -0600 6.3 @@ -47,6 +47,7 @@ 6.4 #include <asm/processor.h> 6.5 #include <asm/vmx.h> 6.6 #include <xen/mm.h> 6.7 +#include <public/arch-ia64.h> 6.8 6.9 /* Global flag to identify whether Intel vmx feature is on */ 6.10 u32 vmx_enabled = 0; 6.11 @@ -136,39 +137,6 @@ vmx_init_env(void) 6.12 #endif 6.13 } 6.14 6.15 -void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c) 6.16 -{ 6.17 - struct domain *d = v->domain; 6.18 - shared_iopage_t *sp; 6.19 - 6.20 - ASSERT(d != dom0); /* only for non-privileged vti domain */ 6.21 - d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg); 6.22 - sp = get_sp(d); 6.23 - memset((char *)sp,0,PAGE_SIZE); 6.24 - /* FIXME: temp due to old CP */ 6.25 - sp->sp_global.eport = 2; 6.26 -#ifdef V_IOSAPIC_READY 6.27 - sp->vcpu_number = 1; 6.28 -#endif 6.29 - /* TEMP */ 6.30 - d->arch.vmx_platform.pib_base = 0xfee00000UL; 6.31 - 6.32 - /* One more step to enable interrupt assist */ 6.33 - set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags); 6.34 - /* Only open one port for I/O and interrupt emulation */ 6.35 - if (v == d->vcpu[0]) { 6.36 - memset(&d->shared_info->evtchn_mask[0], 0xff, 6.37 - sizeof(d->shared_info->evtchn_mask)); 6.38 - clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]); 6.39 - } 6.40 - 6.41 - /* FIXME: only support PMT table continuously by far */ 6.42 -// d->arch.pmt = __va(c->pt_base); 6.43 - 6.44 - 6.45 - vmx_final_setup_domain(d); 6.46 -} 6.47 - 6.48 typedef union { 6.49 u64 value; 6.50 struct { 6.51 @@ -376,40 +344,6 @@ vmx_final_setup_domain(struct domain *d) 6.52 /* Other vmx specific initialization work */ 6.53 } 6.54 6.55 -/* 6.56 - * Following stuff should really move to domain builder. However currently 6.57 - * XEN/IA64 doesn't export physical -> machine page table to domain builder, 6.58 - * instead only the copy. Also there's no hypercall to notify hypervisor 6.59 - * IO ranges by far. Let's enhance it later. 6.60 - */ 6.61 - 6.62 -#define MEM_G (1UL << 30) 6.63 -#define MEM_M (1UL << 20) 6.64 - 6.65 -#define MMIO_START (3 * MEM_G) 6.66 -#define MMIO_SIZE (512 * MEM_M) 6.67 - 6.68 -#define VGA_IO_START 0xA0000UL 6.69 -#define VGA_IO_SIZE 0x20000 6.70 - 6.71 -#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) 6.72 -#define LEGACY_IO_SIZE (64*MEM_M) 6.73 - 6.74 -#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) 6.75 -#define IO_PAGE_SIZE PAGE_SIZE 6.76 - 6.77 -#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) 6.78 -#define STORE_PAGE_SIZE PAGE_SIZE 6.79 - 6.80 -#define IO_SAPIC_START 0xfec00000UL 6.81 -#define IO_SAPIC_SIZE 0x100000 6.82 - 6.83 -#define PIB_START 0xfee00000UL 6.84 -#define PIB_SIZE 0x100000 6.85 - 6.86 -#define GFW_START (4*MEM_G -16*MEM_M) 6.87 -#define GFW_SIZE (16*MEM_M) 6.88 - 6.89 typedef struct io_range { 6.90 unsigned long start; 6.91 unsigned long size; 6.92 @@ -424,18 +358,26 @@ io_range_t io_ranges[] = { 6.93 {PIB_START, PIB_SIZE, GPFN_PIB}, 6.94 }; 6.95 6.96 -#define VMX_SYS_PAGES (2 + GFW_SIZE >> PAGE_SHIFT) 6.97 +#define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT)) 6.98 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES) 6.99 6.100 int vmx_alloc_contig_pages(struct domain *d) 6.101 { 6.102 - unsigned int order, i, j; 6.103 - unsigned long start, end, pgnr, conf_nr; 6.104 + unsigned int order; 6.105 + unsigned long i, j, start, end, pgnr, conf_nr; 6.106 struct pfn_info *page; 6.107 struct vcpu *v = d->vcpu[0]; 6.108 6.109 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags)); 6.110 6.111 + /* Mark I/O ranges */ 6.112 + for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { 6.113 + for (j = io_ranges[i].start; 6.114 + j < io_ranges[i].start + io_ranges[i].size; 6.115 + j += PAGE_SIZE) 6.116 + map_domain_page(d, j, io_ranges[i].type); 6.117 + } 6.118 + 6.119 conf_nr = VMX_CONFIG_PAGES(d); 6.120 order = get_order_from_pages(conf_nr); 6.121 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { 6.122 @@ -462,32 +404,64 @@ int vmx_alloc_contig_pages(struct domain 6.123 6.124 d->arch.max_pfn = end >> PAGE_SHIFT; 6.125 6.126 - order = get_order_from_pages(VMX_SYS_PAGES); 6.127 + order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT); 6.128 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { 6.129 printk("Could not allocate order=%d pages for vmx contig alloc\n", 6.130 order); 6.131 return -1; 6.132 } 6.133 6.134 + /* Map guest firmware */ 6.135 + pgnr = page_to_pfn(page); 6.136 + for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++) 6.137 + map_domain_page(d, i, pgnr << PAGE_SHIFT); 6.138 + 6.139 + if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) { 6.140 + printk("Could not allocate order=1 pages for vmx contig alloc\n"); 6.141 + return -1; 6.142 + } 6.143 + 6.144 /* Map for shared I/O page and xenstore */ 6.145 pgnr = page_to_pfn(page); 6.146 map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT); 6.147 pgnr++; 6.148 map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT); 6.149 - pgnr++; 6.150 - 6.151 - /* Map guest firmware */ 6.152 - for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++) 6.153 - map_domain_page(d, i, pgnr << PAGE_SHIFT); 6.154 - 6.155 - /* Mark I/O ranges */ 6.156 - for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { 6.157 - for (j = io_ranges[i].start; 6.158 - j < io_ranges[i].start + io_ranges[i].size; 6.159 - j += PAGE_SIZE) 6.160 - map_domain_page(d, j, io_ranges[i].type); 6.161 - } 6.162 6.163 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags); 6.164 return 0; 6.165 } 6.166 + 6.167 +void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c) 6.168 +{ 6.169 + struct domain *d = v->domain; 6.170 + shared_iopage_t *sp; 6.171 + 6.172 + ASSERT(d != dom0); /* only for non-privileged vti domain */ 6.173 + d->arch.vmx_platform.shared_page_va = 6.174 + __va(__gpa_to_mpa(d, IO_PAGE_START)); 6.175 + sp = get_sp(d); 6.176 + //memset((char *)sp,0,PAGE_SIZE); 6.177 + //sp->sp_global.eport = 2; 6.178 +#ifdef V_IOSAPIC_READY 6.179 + sp->vcpu_number = 1; 6.180 +#endif 6.181 + /* TEMP */ 6.182 + d->arch.vmx_platform.pib_base = 0xfee00000UL; 6.183 + 6.184 + /* One more step to enable interrupt assist */ 6.185 + set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags); 6.186 + /* Only open one port for I/O and interrupt emulation */ 6.187 + if (v == d->vcpu[0]) { 6.188 + memset(&d->shared_info->evtchn_mask[0], 0xff, 6.189 + sizeof(d->shared_info->evtchn_mask)); 6.190 + clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]); 6.191 + } 6.192 + 6.193 + /* FIXME: only support PMT table continuously by far */ 6.194 +// d->arch.pmt = __va(c->pt_base); 6.195 + 6.196 + 6.197 + vmx_final_setup_domain(d); 6.198 +} 6.199 + 6.200 +
7.1 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c Wed Oct 12 17:12:59 2005 -0600 7.2 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c Thu Oct 13 14:24:45 2005 -0600 7.3 @@ -101,7 +101,10 @@ vmx_ia64_handle_irq (ia64_vector vector, 7.4 7.5 if (vector != IA64_TIMER_VECTOR) { 7.6 /* FIXME: Leave IRQ re-route later */ 7.7 - vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector); 7.8 + if (!VMX_DOMAIN(dom0->vcpu[0])) 7.9 + vcpu_pend_interrupt(dom0->vcpu[0],vector); 7.10 + else 7.11 + vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector); 7.12 wake_dom0 = 1; 7.13 } 7.14 else { // FIXME: Handle Timer only now
8.1 --- a/xen/arch/ia64/vmx/vmx_process.c Wed Oct 12 17:12:59 2005 -0600 8.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Thu Oct 13 14:24:45 2005 -0600 8.3 @@ -271,7 +271,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 8.4 { 8.5 IA64_PSR vpsr; 8.6 CACHE_LINE_TYPE type; 8.7 - u64 vhpt_adr; 8.8 + u64 vhpt_adr, gppa; 8.9 ISR misr; 8.10 ia64_rr vrr; 8.11 REGS *regs; 8.12 @@ -314,9 +314,9 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 8.13 // prepare_if_physical_mode(v); 8.14 8.15 if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){ 8.16 - if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,data->ppn>>(PAGE_SHIFT-12))){ 8.17 - vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); 8.18 - emulate_io_inst(v, vadr, data->ma); 8.19 + gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); 8.20 + if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){ 8.21 + emulate_io_inst(v, gppa, data->ma); 8.22 return IA64_FAULT; 8.23 } 8.24
9.1 --- a/xen/arch/ia64/vmx/vmx_support.c Wed Oct 12 17:12:59 2005 -0600 9.2 +++ b/xen/arch/ia64/vmx/vmx_support.c Thu Oct 13 14:24:45 2005 -0600 9.3 @@ -158,7 +158,7 @@ void vmx_intr_assist(struct vcpu *v) 9.4 #ifdef V_IOSAPIC_READY 9.5 vlapic_update_ext_irq(v); 9.6 #else 9.7 - panic("IOSAPIC model is missed in qemu\n"); 9.8 + //panic("IOSAPIC model is missed in qemu\n"); 9.9 #endif 9.10 return; 9.11 }
10.1 --- a/xen/arch/ia64/vmx/vtlb.c Wed Oct 12 17:12:59 2005 -0600 10.2 +++ b/xen/arch/ia64/vmx/vtlb.c Thu Oct 13 14:24:45 2005 -0600 10.3 @@ -387,6 +387,15 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 10.4 thash_insert(hcb->ts->vhpt, entry, va); 10.5 return; 10.6 } 10.7 + 10.8 +#if 1 10.9 + vrr=vmx_vcpu_rr(current, va); 10.10 + if (vrr.ps != entry->ps) { 10.11 + printk("not preferred ps with va: 0x%lx\n", va); 10.12 + return; 10.13 + } 10.14 +#endif 10.15 + 10.16 flag = 1; 10.17 gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT; 10.18 ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
11.1 --- a/xen/arch/ia64/xen/dom0_ops.c Wed Oct 12 17:12:59 2005 -0600 11.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Thu Oct 13 14:24:45 2005 -0600 11.3 @@ -177,13 +177,8 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 11.4 11.5 for ( i = start_page; i < (start_page + nr_pages); i++ ) 11.6 { 11.7 - page = map_new_domain_page(d, i << PAGE_SHIFT); 11.8 - if ( page == NULL ) 11.9 - { 11.10 - ret = -ENOMEM; 11.11 - break; 11.12 - } 11.13 - pfn = page_to_pfn(page); 11.14 + pfn = __gpfn_to_mfn_foreign(d, i); 11.15 + 11.16 if ( put_user(pfn, buffer) ) 11.17 { 11.18 ret = -EFAULT;
12.1 --- a/xen/arch/ia64/xen/dom_fw.c Wed Oct 12 17:12:59 2005 -0600 12.2 +++ b/xen/arch/ia64/xen/dom_fw.c Thu Oct 13 14:24:45 2005 -0600 12.3 @@ -301,7 +301,7 @@ xen_pal_emulator(unsigned long index, un 12.4 // pal code must be mapped by a TR when pal is called, however 12.5 // calls are rare enough that we will map it lazily rather than 12.6 // at every context switch 12.7 - efi_map_pal_code(); 12.8 + //efi_map_pal_code(); 12.9 switch (index) { 12.10 case PAL_MEM_ATTRIB: 12.11 status = ia64_pal_mem_attrib(&r9);
13.1 --- a/xen/arch/ia64/xen/domain.c Wed Oct 12 17:12:59 2005 -0600 13.2 +++ b/xen/arch/ia64/xen/domain.c Thu Oct 13 14:24:45 2005 -0600 13.3 @@ -59,6 +59,7 @@ unsigned long *domU_staging_area; 13.4 13.5 // initialized by arch/ia64/setup.c:find_initrd() 13.6 unsigned long initrd_start = 0, initrd_end = 0; 13.7 +extern unsigned long running_on_sim; 13.8 13.9 #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend)) 13.10 13.11 @@ -190,6 +191,9 @@ void arch_do_createdomain(struct vcpu *v 13.12 while (1); 13.13 } 13.14 memset(d->shared_info, 0, PAGE_SIZE); 13.15 + if (v == d->vcpu[0]) 13.16 + memset(&d->shared_info->evtchn_mask[0], 0xff, 13.17 + sizeof(d->shared_info->evtchn_mask)); 13.18 #if 0 13.19 d->vcpu[0].arch.privregs = 13.20 alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); 13.21 @@ -271,6 +275,14 @@ int arch_set_info_guest(struct vcpu *v, 13.22 if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 13.23 return 0; 13.24 13.25 + /* Sync d/i cache conservatively */ 13.26 + if (!running_on_sim) { 13.27 + ret = ia64_pal_cache_flush(4, 0, &progress, NULL); 13.28 + if (ret != PAL_STATUS_SUCCESS) 13.29 + panic("PAL CACHE FLUSH failed for domain.\n"); 13.30 + printk("Sync i/d cache for dom0 image SUCC\n"); 13.31 + } 13.32 + 13.33 if (c->flags & VGCF_VMX_GUEST) { 13.34 if (!vmx_enabled) { 13.35 printk("No VMX hardware feature for vmx domain.\n"); 13.36 @@ -547,7 +559,8 @@ tryagain: 13.37 if (pte_present(*pte)) { 13.38 //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte)); 13.39 return *(unsigned long *)pte; 13.40 - } 13.41 + } else if (VMX_DOMAIN(d->vcpu[0])) 13.42 + return GPFN_INV_MASK; 13.43 } 13.44 } 13.45 } 13.46 @@ -799,7 +812,6 @@ void physdev_init_dom0(struct domain *d) 13.47 set_bit(_DOMF_physdev_access, &d->domain_flags); 13.48 } 13.49 13.50 -extern unsigned long running_on_sim; 13.51 unsigned int vmx_dom0 = 0; 13.52 int construct_dom0(struct domain *d, 13.53 unsigned long image_start, unsigned long image_len,
14.1 --- a/xen/arch/ia64/xen/hyperprivop.S Wed Oct 12 17:12:59 2005 -0600 14.2 +++ b/xen/arch/ia64/xen/hyperprivop.S Thu Oct 13 14:24:45 2005 -0600 14.3 @@ -807,8 +807,11 @@ just_do_rfi: 14.4 // OK, now all set to go except for switch to virtual bank1 14.5 mov r22=1;; st4 [r20]=r22; 14.6 mov r30=r2; mov r29=r3;; 14.7 + adds r16=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 14.8 adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; 14.9 adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; 14.10 + ld8 r16=[r16];; 14.11 + mov ar.unat=r16;; 14.12 bsw.1;; 14.13 // FIXME?: ar.unat is not really handled correctly, 14.14 // but may not matter if the OS is NaT-clean
15.1 --- a/xen/arch/ia64/xen/ivt.S Wed Oct 12 17:12:59 2005 -0600 15.2 +++ b/xen/arch/ia64/xen/ivt.S Thu Oct 13 14:24:45 2005 -0600 15.3 @@ -1460,7 +1460,28 @@ END(general_exception) 15.4 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 15.5 ENTRY(disabled_fp_reg) 15.6 #ifdef XEN 15.7 +#if 0 15.8 + mov r20=pr 15.9 + movl r16=0x2000000000000000 15.10 + movl r17=0x2000000000176b60 15.11 + mov r18=cr.iip 15.12 + mov r19=rr[r16] 15.13 + movl r22=0xe95d0439 15.14 + ;; 15.15 + mov pr=r0,-1 15.16 + ;; 15.17 + cmp.eq p6,p7=r22,r19 15.18 + ;; 15.19 + (p6) cmp.eq p8,p9=r17,r18 15.20 + (p8) br.sptk.few floating_panic 15.21 + ;; 15.22 + mov pr=r20,-1 15.23 + ;; 15.24 +#endif 15.25 REFLECT(25) 15.26 +//floating_panic: 15.27 +// br.sptk.many floating_panic 15.28 + ;; 15.29 #endif 15.30 DBG_FAULT(25) 15.31 rsm psr.dfh // ensure we can access fph
16.1 --- a/xen/arch/ia64/xen/process.c Wed Oct 12 17:12:59 2005 -0600 16.2 +++ b/xen/arch/ia64/xen/process.c Thu Oct 13 14:24:45 2005 -0600 16.3 @@ -745,6 +745,8 @@ ia64_handle_reflection (unsigned long if 16.4 case 26: 16.5 printf("*** NaT fault... attempting to handle as privop\n"); 16.6 printf("isr=%p, ifa=%p,iip=%p,ipsr=%p\n",isr,ifa,regs->cr_iip,psr); 16.7 + regs->eml_unat = 0; 16.8 + return; 16.9 vector = priv_emulate(v,regs,isr); 16.10 if (vector == IA64_NO_FAULT) { 16.11 printf("*** Handled privop masquerading as NaT fault\n");
17.1 --- a/xen/arch/ia64/xen/regionreg.c Wed Oct 12 17:12:59 2005 -0600 17.2 +++ b/xen/arch/ia64/xen/regionreg.c Thu Oct 13 14:24:45 2005 -0600 17.3 @@ -15,7 +15,8 @@ 17.4 #include <asm/regionreg.h> 17.5 #include <asm/vhpt.h> 17.6 #include <asm/vcpu.h> 17.7 -extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info); 17.8 +extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal); 17.9 +extern void *pal_vaddr; 17.10 17.11 17.12 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1) 17.13 @@ -66,9 +67,12 @@ unsigned long allocate_metaphysical_rr(v 17.14 { 17.15 ia64_rr rrv; 17.16 17.17 + rrv.rrval = 0; // Or else may see reserved bit fault 17.18 rrv.rid = allocate_reserved_rid(); 17.19 rrv.ps = PAGE_SHIFT; 17.20 rrv.ve = 0; 17.21 + /* Mangle metaphysical rid */ 17.22 + rrv.rrval = vmMangleRID(rrv.rrval); 17.23 return rrv.rrval; 17.24 } 17.25 17.26 @@ -213,6 +217,7 @@ int set_one_rr(unsigned long rr, unsigne 17.27 unsigned long rreg = REGION_NUMBER(rr); 17.28 ia64_rr rrv, newrrv, memrrv; 17.29 unsigned long newrid; 17.30 + extern unsigned long vhpt_paddr; 17.31 17.32 if (val == -1) return 1; 17.33 17.34 @@ -250,9 +255,10 @@ int set_one_rr(unsigned long rr, unsigne 17.35 newrrv.rid = newrid; 17.36 newrrv.ve = 1; // VHPT now enabled for region 7!! 17.37 newrrv.ps = PAGE_SHIFT; 17.38 - if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval; 17.39 + if (rreg == 0) v->arch.metaphysical_saved_rr0 = 17.40 + vmMangleRID(newrrv.rrval); 17.41 if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, 17.42 - v->arch.privregs); 17.43 + v->arch.privregs, vhpt_paddr, pal_vaddr); 17.44 else set_rr(rr,newrrv.rrval); 17.45 #endif 17.46 return 1; 17.47 @@ -265,7 +271,8 @@ int set_metaphysical_rr0(void) 17.48 ia64_rr rrv; 17.49 17.50 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING 17.51 - set_rr(0,v->arch.metaphysical_rr0); 17.52 + ia64_set_rr(0,v->arch.metaphysical_rr0); 17.53 + ia64_srlz_d(); 17.54 } 17.55 17.56 // validates/changes region registers 0-6 in the currently executing domain 17.57 @@ -290,7 +297,7 @@ void init_all_rr(struct vcpu *v) 17.58 ia64_rr rrv; 17.59 17.60 rrv.rrval = 0; 17.61 - rrv.rrval = v->domain->arch.metaphysical_rr0; 17.62 + //rrv.rrval = v->domain->arch.metaphysical_rr0; 17.63 rrv.ps = PAGE_SHIFT; 17.64 rrv.ve = 1; 17.65 if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); } 17.66 @@ -343,12 +350,16 @@ unsigned long load_region_regs(struct vc 17.67 if (VCPU(v,metaphysical_mode)) { 17.68 ia64_rr rrv; 17.69 17.70 +#if 0 17.71 rrv.rrval = 0; 17.72 rrv.rid = v->domain->arch.metaphysical_rr0; 17.73 rrv.ps = PAGE_SHIFT; 17.74 rrv.ve = 1; 17.75 rr0 = rrv.rrval; 17.76 set_rr_no_srlz(0x0000000000000000L, rr0); 17.77 +#endif 17.78 + rr0 = v->domain->arch.metaphysical_rr0; 17.79 + ia64_set_rr(0x0000000000000000L, rr0); 17.80 ia64_srlz_d(); 17.81 } 17.82 else {
18.1 --- a/xen/arch/ia64/xen/vcpu.c Wed Oct 12 17:12:59 2005 -0600 18.2 +++ b/xen/arch/ia64/xen/vcpu.c Thu Oct 13 14:24:45 2005 -0600 18.3 @@ -775,6 +775,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6 18.4 } 18.5 #ifdef HEARTBEAT_FREQ 18.6 if (domid >= N_DOMS) domid = N_DOMS-1; 18.7 +#if 0 18.8 if (vector == (PSCB(vcpu,itv) & 0xff)) { 18.9 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) { 18.10 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n", 18.11 @@ -783,6 +784,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6 18.12 //dump_runq(); 18.13 } 18.14 } 18.15 +#endif 18.16 else nonclockcount[domid]++; 18.17 #endif 18.18 // now have an unmasked, pending, deliverable vector!
19.1 --- a/xen/arch/ia64/xen/xenasm.S Wed Oct 12 17:12:59 2005 -0600 19.2 +++ b/xen/arch/ia64/xen/xenasm.S Thu Oct 13 14:24:45 2005 -0600 19.3 @@ -48,11 +48,11 @@ END(platform_is_hp_ski) 19.4 // FIXME? Note that this turns off the DB bit (debug) 19.5 #define PSR_BITS_TO_SET IA64_PSR_BN 19.6 19.7 -//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info); 19.8 +//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal); 19.9 GLOBAL_ENTRY(ia64_new_rr7) 19.10 // not sure this unwind statement is correct... 19.11 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) 19.12 - alloc loc1 = ar.pfs, 3, 8, 0, 0 19.13 + alloc loc1 = ar.pfs, 5, 9, 0, 0 19.14 1: { 19.15 mov r28 = in0 // copy procedure index 19.16 mov r8 = ip // save ip to compute branch 19.17 @@ -63,10 +63,12 @@ 1: { 19.18 ;; 19.19 tpa loc2=loc2 // grab this BEFORE changing rr7 19.20 ;; 19.21 + dep loc8=0,in4,60,4 19.22 + ;; 19.23 #if VHPT_ENABLED 19.24 - movl loc6=VHPT_ADDR 19.25 + mov loc6=in3 19.26 ;; 19.27 - tpa loc6=loc6 // grab this BEFORE changing rr7 19.28 + //tpa loc6=loc6 // grab this BEFORE changing rr7 19.29 ;; 19.30 #endif 19.31 mov loc5=in1 19.32 @@ -231,6 +233,21 @@ 1: 19.33 itr.d dtr[r25]=r23 // wire in new mapping... 19.34 ;; 19.35 19.36 + //Purge/insert PAL TR 19.37 + mov r24=IA64_TR_PALCODE 19.38 + movl r25=PAGE_KERNEL 19.39 + ;; 19.40 + or loc8=r25,loc8 19.41 + mov r23=IA64_GRANULE_SHIFT<<2 19.42 + ;; 19.43 + ptr.i in4,r23 19.44 + ;; 19.45 + mov cr.itir=r23 19.46 + mov cr.ifa=in4 19.47 + ;; 19.48 + itr.i itr[r24]=loc8 19.49 + ;; 19.50 + 19.51 // done, switch back to virtual and return 19.52 mov r16=loc3 // r16= original psr 19.53 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
20.1 --- a/xen/arch/ia64/xen/xenmisc.c Wed Oct 12 17:12:59 2005 -0600 20.2 +++ b/xen/arch/ia64/xen/xenmisc.c Thu Oct 13 14:24:45 2005 -0600 20.3 @@ -17,6 +17,7 @@ 20.4 #include <asm/io.h> 20.5 #include <xen/softirq.h> 20.6 #include <public/sched.h> 20.7 +#include <asm/vhpt.h> 20.8 20.9 efi_memory_desc_t ia64_efi_io_md; 20.10 EXPORT_SYMBOL(ia64_efi_io_md); 20.11 @@ -310,9 +311,13 @@ if (!cnt[id]--) { printk("%x",id); cnt[i 20.12 if (!i--) { printk("+",id); i = 1000000; } 20.13 } 20.14 20.15 - if (VMX_DOMAIN(current)){ 20.16 + if (VMX_DOMAIN(current)){ 20.17 vmx_load_all_rr(current); 20.18 }else{ 20.19 + extern char ia64_ivt; 20.20 + ia64_set_iva(&ia64_ivt); 20.21 + ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | 20.22 + VHPT_ENABLED); 20.23 if (!is_idle_task(current->domain)) { 20.24 load_region_regs(current); 20.25 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
21.1 --- a/xen/arch/ia64/xen/xentime.c Wed Oct 12 17:12:59 2005 -0600 21.2 +++ b/xen/arch/ia64/xen/xentime.c Thu Oct 13 14:24:45 2005 -0600 21.3 @@ -99,6 +99,7 @@ xen_timer_interrupt (int irq, void *dev_ 21.4 { 21.5 unsigned long new_itm, old_itc; 21.6 21.7 +#if 0 21.8 #define HEARTBEAT_FREQ 16 // period in seconds 21.9 #ifdef HEARTBEAT_FREQ 21.10 static long count = 0; 21.11 @@ -110,6 +111,7 @@ xen_timer_interrupt (int irq, void *dev_ 21.12 count = 0; 21.13 } 21.14 #endif 21.15 +#endif 21.16 if (current->domain == dom0) { 21.17 // FIXME: there's gotta be a better way of doing this... 21.18 // We have to ensure that domain0 is launched before we 21.19 @@ -117,12 +119,14 @@ xen_timer_interrupt (int irq, void *dev_ 21.20 //domain0_ready = 1; // moved to xensetup.c 21.21 VCPU(current,pending_interruption) = 1; 21.22 } 21.23 - if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { 21.24 - vcpu_pend_timer(dom0->vcpu[0]); 21.25 - //vcpu_set_next_timer(dom0->vcpu[0]); 21.26 - vcpu_wake(dom0->vcpu[0]); 21.27 + if (domain0_ready && current->domain != dom0) { 21.28 + if(vcpu_timer_expired(dom0->vcpu[0])) { 21.29 + vcpu_pend_timer(dom0->vcpu[0]); 21.30 + //vcpu_set_next_timer(dom0->vcpu[0]); 21.31 + vcpu_wake(dom0->vcpu[0]); 21.32 + } 21.33 } 21.34 - if (!is_idle_task(current->domain) && current->domain != dom0) { 21.35 + if (!is_idle_task(current->domain)) { 21.36 if (vcpu_timer_expired(current)) { 21.37 vcpu_pend_timer(current); 21.38 // ensure another timer interrupt happens even if domain doesn't 21.39 @@ -132,9 +136,12 @@ xen_timer_interrupt (int irq, void *dev_ 21.40 } 21.41 new_itm = local_cpu_data->itm_next; 21.42 21.43 - if (!time_after(ia64_get_itc(), new_itm)) 21.44 + if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm)) 21.45 return; 21.46 21.47 + if (VMX_DOMAIN(current)) 21.48 + vcpu_wake(current); 21.49 + 21.50 while (1) { 21.51 new_itm += local_cpu_data->itm_delta; 21.52
22.1 --- a/xen/include/asm-ia64/config.h Wed Oct 12 17:12:59 2005 -0600 22.2 +++ b/xen/include/asm-ia64/config.h Thu Oct 13 14:24:45 2005 -0600 22.3 @@ -102,7 +102,7 @@ extern char _end[]; /* standard ELF symb 22.4 #endif 22.5 22.6 // xen/include/asm/config.h 22.7 -#define HZ 100 22.8 +//#define HZ 1000 22.9 // FIXME SMP: leave SMP for a later time 22.10 #define barrier() __asm__ __volatile__("": : :"memory") 22.11
23.1 --- a/xen/include/asm-ia64/event.h Wed Oct 12 17:12:59 2005 -0600 23.2 +++ b/xen/include/asm-ia64/event.h Thu Oct 13 14:24:45 2005 -0600 23.3 @@ -14,6 +14,21 @@ 23.4 23.5 static inline void evtchn_notify(struct vcpu *v) 23.6 { 23.7 + /* 23.8 + * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of 23.9 + * pending flag. These values may fluctuate (after all, we hold no 23.10 + * locks) but the key insight is that each change will cause 23.11 + * evtchn_upcall_pending to be polled. 23.12 + * 23.13 + * NB2. We save VCPUF_running across the unblock to avoid a needless 23.14 + * IPI for domains that we IPI'd to unblock. 23.15 + */ 23.16 + int running = test_bit(_VCPUF_running, &v->vcpu_flags); 23.17 + vcpu_unblock(v); 23.18 + if ( running ) 23.19 + smp_send_event_check_cpu(v->processor); 23.20 + 23.21 + if(!VMX_DOMAIN(v)) 23.22 vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector); 23.23 } 23.24
24.1 --- a/xen/include/asm-ia64/mm.h Wed Oct 12 17:12:59 2005 -0600 24.2 +++ b/xen/include/asm-ia64/mm.h Thu Oct 13 14:24:45 2005 -0600 24.3 @@ -405,6 +405,7 @@ extern unsigned long totalram_pages; 24.4 extern int nr_swap_pages; 24.5 24.6 extern unsigned long *mpt_table; 24.7 +extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr); 24.8 #undef machine_to_phys_mapping 24.9 #define machine_to_phys_mapping mpt_table 24.10 24.11 @@ -433,10 +434,10 @@ extern unsigned long *mpt_table; 24.12 24.13 #define __gpfn_is_mem(_d, gpfn) \ 24.14 (__gpfn_valid(_d, gpfn) ? \ 24.15 - (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT) & GPFN_IO_MASK) == GPFN_MEM) : 0) 24.16 + ((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == GPFN_MEM) : 0) 24.17 24.18 24.19 -//#define __gpa_to_mpa(_d, gpa) \ 24.20 -// ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) 24.21 +#define __gpa_to_mpa(_d, gpa) \ 24.22 + ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) 24.23 24.24 #endif /* __ASM_IA64_MM_H__ */
25.1 --- a/xen/include/asm-ia64/vmx.h Wed Oct 12 17:12:59 2005 -0600 25.2 +++ b/xen/include/asm-ia64/vmx.h Thu Oct 13 14:24:45 2005 -0600 25.3 @@ -25,6 +25,7 @@ 25.4 #define RR7_SWITCH_SHIFT 12 /* 4k enough */ 25.5 #include <public/io/ioreq.h> 25.6 25.7 + 25.8 extern void identify_vmx_feature(void); 25.9 extern unsigned int vmx_enabled; 25.10 extern void vmx_init_env(void);
26.1 --- a/xen/include/asm-ia64/xenkregs.h Wed Oct 12 17:12:59 2005 -0600 26.2 +++ b/xen/include/asm-ia64/xenkregs.h Thu Oct 13 14:24:45 2005 -0600 26.3 @@ -6,7 +6,8 @@ 26.4 */ 26.5 #define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ 26.6 #define IA64_TR_VHPT 4 /* dtr4: vhpt */ 26.7 -#define IA64_TR_ARCH_INFO 5 26.8 +#define IA64_TR_ARCH_INFO 5 26.9 +#define IA64_TR_PERVP_VHPT 6 26.10 26.11 /* Processor status register bits: */ 26.12 #define IA64_PSR_VM_BIT 46
27.1 --- a/xen/include/public/arch-ia64.h Wed Oct 12 17:12:59 2005 -0600 27.2 +++ b/xen/include/public/arch-ia64.h Thu Oct 13 14:24:45 2005 -0600 27.3 @@ -38,6 +38,33 @@ typedef struct { 27.4 27.5 #define INVALID_MFN (~0UL) 27.6 27.7 +#define MEM_G (1UL << 30) 27.8 +#define MEM_M (1UL << 20) 27.9 + 27.10 +#define MMIO_START (3 * MEM_G) 27.11 +#define MMIO_SIZE (512 * MEM_M) 27.12 + 27.13 +#define VGA_IO_START 0xA0000UL 27.14 +#define VGA_IO_SIZE 0x20000 27.15 + 27.16 +#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) 27.17 +#define LEGACY_IO_SIZE (64*MEM_M) 27.18 + 27.19 +#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) 27.20 +#define IO_PAGE_SIZE PAGE_SIZE 27.21 + 27.22 +#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) 27.23 +#define STORE_PAGE_SIZE PAGE_SIZE 27.24 + 27.25 +#define IO_SAPIC_START 0xfec00000UL 27.26 +#define IO_SAPIC_SIZE 0x100000 27.27 + 27.28 +#define PIB_START 0xfee00000UL 27.29 +#define PIB_SIZE 0x100000 27.30 + 27.31 +#define GFW_START (4*MEM_G -16*MEM_M) 27.32 +#define GFW_SIZE (16*MEM_M) 27.33 + 27.34 /* 27.35 * NB. This may become a 64-bit count with no shift. If this happens then the 27.36 * structure size will still be 8 bytes, so no other alignments will change.
28.1 --- a/xen/include/public/io/ioreq.h Wed Oct 12 17:12:59 2005 -0600 28.2 +++ b/xen/include/public/io/ioreq.h Thu Oct 13 14:24:45 2005 -0600 28.3 @@ -35,6 +35,13 @@ 28.4 #define IOREQ_TYPE_OR 3 28.5 #define IOREQ_TYPE_XOR 4 28.6 28.7 +#ifdef __HYPERVISOR__ 28.8 +#include <public/io/vmx_vlapic.h> 28.9 +#else 28.10 +#include <xen/io/vmx_vlapic.h> 28.11 +#endif 28.12 + 28.13 + 28.14 /* 28.15 * VMExit dispatcher should cooperate with instruction decoder to 28.16 * prepare this structure and notify service OS and DM by sending 28.17 @@ -55,10 +62,6 @@ typedef struct { 28.18 u8 type; /* I/O type */ 28.19 } ioreq_t; 28.20 28.21 -#define MAX_VECTOR 256 28.22 -#define BITS_PER_BYTE 8 28.23 -#define INTR_LEN (MAX_VECTOR/(BITS_PER_BYTE * sizeof(u64))) 28.24 - 28.25 typedef struct { 28.26 u64 pic_intr[INTR_LEN]; 28.27 u64 pic_mask[INTR_LEN]; 28.28 @@ -67,10 +70,11 @@ typedef struct { 28.29 28.30 typedef struct { 28.31 ioreq_t vp_ioreq; 28.32 - unsigned long vp_intr[INTR_LEN]; 28.33 + vl_apic_info apic_intr; 28.34 } vcpu_iodata_t; 28.35 28.36 typedef struct { 28.37 + int vcpu_number; 28.38 global_iodata_t sp_global; 28.39 vcpu_iodata_t vcpu_iodata[1]; 28.40 } shared_iopage_t;
29.1 --- a/xen/include/public/io/vmx_vlapic.h Wed Oct 12 17:12:59 2005 -0600 29.2 +++ b/xen/include/public/io/vmx_vlapic.h Thu Oct 13 14:24:45 2005 -0600 29.3 @@ -1,57 +1,29 @@ 29.4 #ifndef _VMX_VLAPIC_H 29.5 #define _VMX_VLAPIC_H 29.6 29.7 -/* 29.8 - We extended one bit for PIC type 29.9 - */ 29.10 #define VLAPIC_DELIV_MODE_FIXED 0x0 29.11 #define VLAPIC_DELIV_MODE_LPRI 0x1 29.12 #define VLAPIC_DELIV_MODE_SMI 0x2 29.13 +#define VLAPIC_DELIV_MODE_PMI 0x2 29.14 #define VLAPIC_DELIV_MODE_NMI 0x4 29.15 #define VLAPIC_DELIV_MODE_INIT 0x5 29.16 #define VLAPIC_DELIV_MODE_STARTUP 0x6 29.17 #define VLAPIC_DELIV_MODE_EXT 0x7 29.18 #define VLAPIC_DELIV_MODE_MASK 0x8 29.19 29.20 -#define VLAPIC_MSG_LEVEL 4 29.21 - 29.22 -#define INTR_EXT 0 29.23 -#define INTR_APIC 1 29.24 -#define INTR_LAPIC 2 29.25 - 29.26 -#define VL_STATE_EOI 1 29.27 -#define VL_STATE_EXT_LOCK 2 29.28 -#define VL_STATE_MSG_LOCK 3 29.29 -#define VL_STATE_EOI_LOCK 3 29.30 - 29.31 -#define VLOCAL_APIC_MAX_INTS 256 29.32 -#define VLAPIC_INT_COUNT (VLOCAL_APIC_MAX_INTS/(BITS_PER_BYTE * sizeof(u64))) 29.33 -#define VLAPIC_INT_COUNT_32 (VLOCAL_APIC_MAX_INTS/(BITS_PER_BYTE * sizeof(u32))) 29.34 - 29.35 -struct vapic_bus_message{ 29.36 - u8 deliv_mode:4; /* deliver mode, including fixed, LPRI, etc */ 29.37 - u8 level:1; /* level or edge */ 29.38 - u8 trig_mod:1; /* assert or disassert */ 29.39 - u8 reserved:2; 29.40 - u8 vector; 29.41 -}; 29.42 +#define MAX_VECTOR 256 29.43 +#define BITS_PER_BYTE 8 29.44 +#define INTR_LEN (MAX_VECTOR/(BITS_PER_BYTE * sizeof(u64))) 29.45 +#define INTR_LEN_32 (MAX_VECTOR/(BITS_PER_BYTE * sizeof(u32))) 29.46 29.47 typedef struct { 29.48 - /* interrupt for PIC and ext type IOAPIC interrupt */ 29.49 - u64 vl_ext_intr[VLAPIC_INT_COUNT]; 29.50 - u64 vl_ext_intr_mask[VLAPIC_INT_COUNT]; 29.51 - u64 vl_apic_intr[VLAPIC_INT_COUNT]; 29.52 - u64 vl_apic_tmr[VLAPIC_INT_COUNT]; 29.53 - u64 vl_eoi[VLAPIC_INT_COUNT]; 29.54 u32 vl_lapic_id; 29.55 - u32 direct_intr; 29.56 u32 vl_apr; 29.57 u32 vl_logical_dest; 29.58 u32 vl_dest_format; 29.59 u32 vl_arb_id; 29.60 - u32 vl_state; 29.61 - u32 apic_msg_count; 29.62 - struct vapic_bus_message vl_apic_msg[24]; 29.63 -} vlapic_info; 29.64 + u64 irr[INTR_LEN]; 29.65 + u64 tmr[INTR_LEN]; 29.66 +}vl_apic_info; 29.67 29.68 #endif /* _VMX_VLAPIC_H_ */