ia64/xen-unstable
changeset 5093:c91f74efda05
bitkeeper revision 1.1514 (4291f6f7i2aAlgdzvcq6xJ3W4hjYzg)
Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
line diff
1.1 --- a/.rootkeys Mon May 23 15:22:15 2005 +0000 1.2 +++ b/.rootkeys Mon May 23 15:29:59 2005 +0000 1.3 @@ -1068,12 +1068,16 @@ 421098b3ys5GAr4z6_H1jD33oem82g xen/arch/ 1.4 4272a8e4lavI6DrTvqaIhXeR5RuKBw xen/arch/ia64/ivt.S 1.5 421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile 1.6 421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c 1.7 +428b9f38Gp0KcPokG9Nq5v1rGk2FkA xen/arch/ia64/mmio.c 1.8 425ae516maKAsHBJVSzs19cdRgt3Nw xen/arch/ia64/patch/linux-2.6.11/cpumask.h 1.9 425ae516cGqvMzGtihTEsQXAXsuOhQ xen/arch/ia64/patch/linux-2.6.11/efi.c 1.10 425ae516Y1A4q4_Kfre3qnDj7lbHJg xen/arch/ia64/patch/linux-2.6.11/entry.S 1.11 +428bb037eJ4qs48I-tUdhht5_95obA xen/arch/ia64/patch/linux-2.6.11/entry.h 1.12 +428bb037jPbybWNkNymaqkFr83vT6Q xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h 1.13 425ae516txAP-owjzpTJ7ThfzWR8nw xen/arch/ia64/patch/linux-2.6.11/hardirq.h 1.14 425ae516PDO1ESDHXHVeDNvlqUfmdQ xen/arch/ia64/patch/linux-2.6.11/head.S 1.15 425ae516JR7HWvt1zxJ-wLvEWmJGgg xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h 1.16 +428bb037UxfxIhZaslk-qHazO4w0yg xen/arch/ia64/patch/linux-2.6.11/ia64regs.h 1.17 425ae516AHRNmaVuZjJY-9YjmKRDqg xen/arch/ia64/patch/linux-2.6.11/interrupt.h 1.18 425ae516U2wFUzrUJQUpy3z38jZHsQ xen/arch/ia64/patch/linux-2.6.11/io.h 1.19 425ae516GGRmXijPBLC5ii6yWOn0rg xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c 1.20 @@ -1083,11 +1087,12 @@ 425ae5160-9wHxh0tOnIjavEjt6W0A xen/arch/ 1.21 425ae516N7SaORdbodDr90tmtCzYXw xen/arch/ia64/patch/linux-2.6.11/mm_contig.c 1.22 425ae516WDLrfEA4zr40d00z0VIWPg xen/arch/ia64/patch/linux-2.6.11/page.h 1.23 425ae516pVQ75NhdItT593SiWI0lbQ xen/arch/ia64/patch/linux-2.6.11/pal.S 1.24 +428bb037THuiyhERFP8RhRgapNkWXg xen/arch/ia64/patch/linux-2.6.11/pal.h 1.25 425ae516QfmjiF_a-mabAXqV8Imzkg xen/arch/ia64/patch/linux-2.6.11/pgalloc.h 1.26 425ae516EWaNOBEnc1xnphTbRmNZsw xen/arch/ia64/patch/linux-2.6.11/processor.h 1.27 +428bb037KSxe7_UyqseK5bWhGe3KwA xen/arch/ia64/patch/linux-2.6.11/ptrace.h 1.28 425ae516LecDyXlwh3NLBtHZKXmMcA xen/arch/ia64/patch/linux-2.6.11/series 1.29 425ae516RFiPn2CGkpJ21LM-1lJcQg xen/arch/ia64/patch/linux-2.6.11/setup.c 1.30 -425ae516FX_10YaKGMU8Ysf7kkdm_A xen/arch/ia64/patch/linux-2.6.11/swiotlb.c 1.31 425ae516p4ICTkjqNYEfYFxqULj4dw xen/arch/ia64/patch/linux-2.6.11/system.h 1.32 425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c 1.33 425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c 1.34 @@ -1142,6 +1147,7 @@ 41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/ 1.35 41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c 1.36 421098b69pUiIJrqu_w0JMUnZ2uc2A xen/arch/ia64/smp.c 1.37 421098b6_ToSGrf6Pk1Uwg5aMAIBxg xen/arch/ia64/smpboot.c 1.38 +428b9f38JJDW35iDn5DlfXTu700rkQ xen/arch/ia64/tools/README.RunVT 1.39 421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64 1.40 42376c6dfyY0eq8MS2dK3BW2rFuEGg xen/arch/ia64/tools/README.xenia64linux 1.41 421098b6rQ2BQ103qu1n1HNofbS2Og xen/arch/ia64/tools/mkbuildtree 1.42 @@ -1152,6 +1158,20 @@ 4252ace7H2dIMPFeFwczAVoP4yAHxA xen/arch/ 1.43 4252ace74lKUPFnO8PmF0Dtpk7Xkng xen/arch/ia64/tools/privify/privify_elf64.c 1.44 41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c 1.45 421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c 1.46 +428b9f38PglyXM-mJJfo19ycuQrEhw xen/arch/ia64/vlsapic.c 1.47 +428b9f38EmpBsMHL3WbOZoieteBGdQ xen/arch/ia64/vmmu.c 1.48 +428b9f38hU-X5aX0MIY3EU0Yw4PjcA xen/arch/ia64/vmx_entry.S 1.49 +428b9f38S76bWI96g7uPLmE-uAcmdg xen/arch/ia64/vmx_init.c 1.50 +428b9f385AMSyCRYBsckQClQY4ZgHA xen/arch/ia64/vmx_interrupt.c 1.51 +428b9f380IOjPmj0N6eelH-WJjl1xg xen/arch/ia64/vmx_ivt.S 1.52 +428b9f38Y7tp9uyNRdru3lPDXLjOCA xen/arch/ia64/vmx_minstate.h 1.53 +428b9f38H9Pz0ZhRUT0-11A6jceE1w xen/arch/ia64/vmx_phy_mode.c 1.54 +428b9f38pXU56r2OjoFW2Z8H1XY17w xen/arch/ia64/vmx_process.c 1.55 +428b9f38GmZxD-GMDnQB3m7tOoukTA xen/arch/ia64/vmx_utility.c 1.56 +428b9f38Pflg6Z4CtXeVGv7dyEOM4g xen/arch/ia64/vmx_vcpu.c 1.57 +428b9f38Y7p7hXHWx9QF_oYUjdD__g xen/arch/ia64/vmx_virt.c 1.58 +428b9f38EL7qKbbKkhBNr0KzMLS4Gg xen/arch/ia64/vmx_vsa.S 1.59 +428b9f3805WejQ1E-OqAPANPAu8vPw xen/arch/ia64/vtlb.c 1.60 41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S 1.61 4272adaeit9raZ9KnjO_wR4Ii9LJNQ xen/arch/ia64/xenirq.c 1.62 427162263zDUiPmTj-lP4eGyXs5eIg xen/arch/ia64/xenmem.c 1.63 @@ -1291,7 +1311,21 @@ 421098b7GkWOnlzSmPvNAhByOSZ1Dw xen/inclu 1.64 421098b7FK3xgShpnH0I0Ou3O4fJ2Q xen/include/asm-ia64/tlb.h 1.65 421098b78IGdFOGUlPmpS7h_QBmoFg xen/include/asm-ia64/vcpu.h 1.66 421098b7PiAencgmBFGAqALU-V5rqQ xen/include/asm-ia64/vhpt.h 1.67 +428b9f38_b0DgWwkJcBEsTdEmO9WNQ xen/include/asm-ia64/virt_event.h 1.68 +428b9f38B0KbUj3o2FBQJ5tmIIMDHg xen/include/asm-ia64/vmmu.h 1.69 +428b9f38ewjoJ-RL-2lsXFT04H2aag xen/include/asm-ia64/vmx.h 1.70 +428b9f38coGlYeXx-7hpvfCTAPOd7w xen/include/asm-ia64/vmx_mm_def.h 1.71 +428b9f387tov0OtOEeF8fVWSR2v5Pg xen/include/asm-ia64/vmx_pal.h 1.72 +428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h 1.73 +428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h 1.74 +428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h 1.75 +428b9f38lm0ntDBusHggeQXkx1-1HQ xen/include/asm-ia64/vmx_ptrace.h 1.76 +428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h 1.77 +428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h 1.78 +428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h 1.79 +428e120a-H-bqn10zOlnhlzlVEuW8A xen/include/asm-ia64/xenprocessor.h 1.80 421098b7LfwIHQ2lRYWhO4ruEXqIuQ xen/include/asm-ia64/xenserial.h 1.81 +428e120esS-Tp1mX5VoUrsGJDNY_ow xen/include/asm-ia64/xensystem.h 1.82 40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-x86/acpi.h 1.83 3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-x86/apic.h 1.84 3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-x86/apicdef.h
2.1 --- a/xen/arch/ia64/Makefile Mon May 23 15:22:15 2005 +0000 2.2 +++ b/xen/arch/ia64/Makefile Mon May 23 15:29:59 2005 +0000 2.3 @@ -10,6 +10,12 @@ OBJS = xensetup.o setup.o time.o irq.o i 2.4 extable.o linuxextable.o xenirq.o xentime.o \ 2.5 regionreg.o entry.o unaligned.o privop.o vcpu.o \ 2.6 irq_ia64.o irq_lsapic.o vhpt.o xenasm.o dom_fw.o 2.7 + 2.8 +ifeq ($(CONFIG_VTI),y) 2.9 +OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \ 2.10 + vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \ 2.11 + vtlb.o mmio.o vlsapic.o 2.12 +endif 2.13 # perfmon.o 2.14 # unwind.o needed for kernel unwinding (rare) 2.15 2.16 @@ -24,9 +30,22 @@ default: $(OBJS) head.o ia64lib.o xen.ld 2.17 -Map map.out head.o $(ALL_OBJS) -o $(TARGET)-syms 2.18 $(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET) 2.19 2.20 -asm-offsets.s: asm-offsets.c 2.21 +asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp 2.22 $(CC) $(CFLAGS) -S -o $@ $< 2.23 2.24 +$(BASEDIR)/include/asm-ia64/.offsets.h.stamp: 2.25 +# Need such symbol link to make linux headers available 2.26 + [ -e $(BASEDIR)/include/linux ] \ 2.27 + || ln -s $(BASEDIR)/include/xen $(BASEDIR)/include/linux 2.28 + [ -e $(BASEDIR)/include/asm-ia64/xen ] \ 2.29 + || ln -s $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen 2.30 +# Solve circular reference on asm-offsets.h 2.31 + [ -f $(BASEDIR)/include/asm-ia64/asm-offsets.h ] \ 2.32 + || echo "#define IA64_TASK_SIZE 0" > $(BASEDIR)/include/asm-ia64/asm-offsets.h 2.33 +#Bad hack. Force asm-offsets.h out-of-date 2.34 + sleep 1 2.35 + touch $@ 2.36 + 2.37 # I'm sure a Makefile wizard would know a better way to do this 2.38 xen.lds.s: xen.lds.S 2.39 $(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \ 2.40 @@ -36,7 +55,7 @@ ia64lib.o: 2.41 $(MAKE) -C lib && cp lib/ia64lib.o . 2.42 2.43 clean: 2.44 - rm -f *.o *~ core xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp 2.45 + rm -f *.o *~ core xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s 2.46 rm -f lib/*.o 2.47 2.48 # setup.o contains bits of compile.h so it must be blown away
3.1 --- a/xen/arch/ia64/Rules.mk Mon May 23 15:22:15 2005 +0000 3.2 +++ b/xen/arch/ia64/Rules.mk Mon May 23 15:29:59 2005 +0000 3.3 @@ -1,6 +1,7 @@ 3.4 ######################################## 3.5 # ia64-specific definitions 3.6 3.7 +CONFIG_VTI ?= n 3.8 ifneq ($(COMPILE_ARCH),$(TARGET_ARCH)) 3.9 CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux- 3.10 endif 3.11 @@ -17,4 +18,7 @@ CFLAGS += -Wno-pointer-arith -Wredundan 3.12 CFLAGS += -DIA64 -DXEN -DLINUX_2_6 3.13 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127 3.14 CFLAGS += -w -g 3.15 +ifeq ($(CONFIG_VTI),y) 3.16 +CFLAGS += -DCONFIG_VTI 3.17 +endif 3.18 LDFLAGS := -g
4.1 --- a/xen/arch/ia64/acpi.c Mon May 23 15:22:15 2005 +0000 4.2 +++ b/xen/arch/ia64/acpi.c Mon May 23 15:29:59 2005 +0000 4.3 @@ -1,9 +1,16 @@ 4.4 /* 4.5 * acpi.c - Architecture-Specific Low-Level ACPI Support 4.6 * 4.7 - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 4.8 - * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> 4.9 - * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> 4.10 + * Copyright (C) 1999 VA Linux Systems 4.11 + * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com> 4.12 + * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co. 4.13 + * David Mosberger-Tang <davidm@hpl.hp.com> 4.14 + * Copyright (C) 2000 Intel Corp. 4.15 + * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> 4.16 + * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 4.17 + * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> 4.18 + * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> 4.19 + * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> 4.20 * 4.21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4.22 * 4.23 @@ -19,667 +26,651 @@ 4.24 * 4.25 * You should have received a copy of the GNU General Public License 4.26 * along with this program; if not, write to the Free Software 4.27 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 4.28 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 4.29 * 4.30 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4.31 */ 4.32 4.33 -#include <xen/config.h> 4.34 -#include <xen/kernel.h> 4.35 -#include <xen/init.h> 4.36 -#include <xen/types.h> 4.37 -/*#include <xen/stddef.h>*/ 4.38 -#include <xen/slab.h> 4.39 -//#include <xen/pci.h> 4.40 -/*#include <xen/bootmem.h>*/ 4.41 -#include <xen/irq.h> 4.42 -#include <xen/acpi.h> 4.43 -//#include <asm/mpspec.h> 4.44 +#include <linux/config.h> 4.45 +#include <linux/module.h> 4.46 +#include <linux/init.h> 4.47 +#include <linux/kernel.h> 4.48 +#include <linux/sched.h> 4.49 +#include <linux/smp.h> 4.50 +#include <linux/string.h> 4.51 +#include <linux/types.h> 4.52 +#include <linux/irq.h> 4.53 +#include <linux/acpi.h> 4.54 +#include <linux/efi.h> 4.55 +#include <linux/mmzone.h> 4.56 #include <asm/io.h> 4.57 -//#include <asm/apic.h> 4.58 -//#include <asm/apicdef.h> 4.59 +//#include <asm/iosapic.h> 4.60 +#include <asm/machvec.h> 4.61 #include <asm/page.h> 4.62 -/*#include <asm/pgtable.h>*/ 4.63 -#include <asm/pgalloc.h> 4.64 -//#include <asm/io_apic.h> 4.65 -#include <asm/acpi.h> 4.66 -/*#include <asm/save_state.h>*/ 4.67 -//#include <asm/smpboot.h> 4.68 +#include <asm/system.h> 4.69 +#include <asm/numa.h> 4.70 +#include <asm/sal.h> 4.71 +//#include <asm/cyclone.h> 4.72 4.73 +#define BAD_MADT_ENTRY(entry, end) ( \ 4.74 + (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 4.75 + ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 4.76 4.77 #define PREFIX "ACPI: " 4.78 4.79 -int acpi_lapic = 0; 4.80 -int acpi_ioapic = 0; 4.81 +void (*pm_idle) (void); 4.82 +EXPORT_SYMBOL(pm_idle); 4.83 +void (*pm_power_off) (void); 4.84 + 4.85 +unsigned char acpi_kbd_controller_present = 1; 4.86 +unsigned char acpi_legacy_devices; 4.87 + 4.88 +const char * 4.89 +acpi_get_sysname (void) 4.90 +{ 4.91 +#ifdef CONFIG_IA64_GENERIC 4.92 + unsigned long rsdp_phys; 4.93 + struct acpi20_table_rsdp *rsdp; 4.94 + struct acpi_table_xsdt *xsdt; 4.95 + struct acpi_table_header *hdr; 4.96 + 4.97 + rsdp_phys = acpi_find_rsdp(); 4.98 + if (!rsdp_phys) { 4.99 + printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); 4.100 + return "dig"; 4.101 + } 4.102 + 4.103 + rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys); 4.104 + if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 4.105 + printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 4.106 + return "dig"; 4.107 + } 4.108 4.109 -/* -------------------------------------------------------------------------- 4.110 - Boot-time Configuration 4.111 - -------------------------------------------------------------------------- */ 4.112 + xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address); 4.113 + hdr = &xsdt->header; 4.114 + if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 4.115 + printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 4.116 + return "dig"; 4.117 + } 4.118 + 4.119 + if (!strcmp(hdr->oem_id, "HP")) { 4.120 + return "hpzx1"; 4.121 + } 4.122 + else if (!strcmp(hdr->oem_id, "SGI")) { 4.123 + return "sn2"; 4.124 + } 4.125 + 4.126 + return "dig"; 4.127 +#else 4.128 +# if defined (CONFIG_IA64_HP_SIM) 4.129 + return "hpsim"; 4.130 +# elif defined (CONFIG_IA64_HP_ZX1) 4.131 + return "hpzx1"; 4.132 +# elif defined (CONFIG_IA64_SGI_SN2) 4.133 + return "sn2"; 4.134 +# elif defined (CONFIG_IA64_DIG) 4.135 + return "dig"; 4.136 +# else 4.137 +# error Unknown platform. Fix acpi.c. 4.138 +# endif 4.139 +#endif 4.140 +} 4.141 4.142 #ifdef CONFIG_ACPI_BOOT 4.143 -//int acpi_noirq __initdata = 0; /* skip ACPI IRQ initialization */ 4.144 -int acpi_ht __initdata = 1; /* enable HT */ 4.145 + 4.146 +#define ACPI_MAX_PLATFORM_INTERRUPTS 256 4.147 4.148 -enum acpi_irq_model_id acpi_irq_model; 4.149 +#if 0 4.150 +/* Array to record platform interrupt vectors for generic interrupt routing. */ 4.151 +int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { 4.152 + [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 4.153 +}; 4.154 4.155 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; 4.156 4.157 /* 4.158 - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, 4.159 - * to map the target physical address. The problem is that set_fixmap() 4.160 - * provides a single page, and it is possible that the page is not 4.161 - * sufficient. 4.162 - * By using this area, we can map up to MAX_IO_APICS pages temporarily, 4.163 - * i.e. until the next __va_range() call. 4.164 - * 4.165 - * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* 4.166 - * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and 4.167 - * count idx down while incrementing the phys address. 4.168 + * Interrupt routing API for device drivers. Provides interrupt vector for 4.169 + * a generic platform event. Currently only CPEI is implemented. 4.170 */ 4.171 -char *__acpi_map_table(unsigned long phys, unsigned long size) 4.172 +int 4.173 +acpi_request_vector (u32 int_type) 4.174 { 4.175 - unsigned long base, offset, mapped_size; 4.176 - int idx; 4.177 - 4.178 - if (phys + size < 8*1024*1024) 4.179 - return __va(phys); 4.180 + int vector = -1; 4.181 4.182 - offset = phys & (PAGE_SIZE - 1); 4.183 - mapped_size = PAGE_SIZE - offset; 4.184 -#ifndef XEN 4.185 -// where is FIX_ACPI_*? hack for now, FIXME later 4.186 - set_fixmap(FIX_ACPI_END, phys); 4.187 - base = fix_to_virt(FIX_ACPI_END); 4.188 - 4.189 - /* 4.190 - * Most cases can be covered by the below. 4.191 - */ 4.192 - idx = FIX_ACPI_END; 4.193 - while (mapped_size < size) { 4.194 - if (--idx < FIX_ACPI_BEGIN) 4.195 - return 0; /* cannot handle this */ 4.196 - phys += PAGE_SIZE; 4.197 - set_fixmap(idx, phys); 4.198 - mapped_size += PAGE_SIZE; 4.199 - } 4.200 + if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) { 4.201 + /* corrected platform error interrupt */ 4.202 + vector = platform_intr_list[int_type]; 4.203 + } else 4.204 + printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); 4.205 + return vector; 4.206 +} 4.207 #endif 4.208 - 4.209 - return ((unsigned char *) base + offset); 4.210 +char * 4.211 +__acpi_map_table (unsigned long phys_addr, unsigned long size) 4.212 +{ 4.213 + return __va(phys_addr); 4.214 } 4.215 4.216 - 4.217 -#ifdef CONFIG_X86_LOCAL_APIC 4.218 +/* -------------------------------------------------------------------------- 4.219 + Boot-time Table Parsing 4.220 + -------------------------------------------------------------------------- */ 4.221 4.222 -static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 4.223 +static int total_cpus __initdata; 4.224 +static int available_cpus __initdata; 4.225 +struct acpi_table_madt * acpi_madt __initdata; 4.226 +static u8 has_8259; 4.227 4.228 - 4.229 +#if 0 4.230 static int __init 4.231 -acpi_parse_madt ( 4.232 - unsigned long phys_addr, 4.233 - unsigned long size) 4.234 +acpi_parse_lapic_addr_ovr ( 4.235 + acpi_table_entry_header *header, const unsigned long end) 4.236 { 4.237 - struct acpi_table_madt *madt = NULL; 4.238 + struct acpi_table_lapic_addr_ovr *lapic; 4.239 4.240 - if (!phys_addr || !size) 4.241 + lapic = (struct acpi_table_lapic_addr_ovr *) header; 4.242 + 4.243 + if (BAD_MADT_ENTRY(lapic, end)) 4.244 return -EINVAL; 4.245 4.246 - madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size); 4.247 - if (!madt) { 4.248 - printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 4.249 - return -ENODEV; 4.250 - } 4.251 + acpi_table_print_madt_entry(header); 4.252 4.253 - if (madt->lapic_address) 4.254 - acpi_lapic_addr = (u64) madt->lapic_address; 4.255 - 4.256 - printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n", 4.257 - madt->lapic_address); 4.258 - 4.259 - detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id); 4.260 - 4.261 + if (lapic->address) { 4.262 + iounmap((void *) ipi_base_addr); 4.263 + ipi_base_addr = (unsigned long) ioremap(lapic->address, 0); 4.264 + } 4.265 return 0; 4.266 } 4.267 4.268 4.269 static int __init 4.270 -acpi_parse_lapic ( 4.271 - acpi_table_entry_header *header) 4.272 +acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) 4.273 { 4.274 - struct acpi_table_lapic *processor = NULL; 4.275 + struct acpi_table_lsapic *lsapic; 4.276 4.277 - processor = (struct acpi_table_lapic*) header; 4.278 - if (!processor) 4.279 + lsapic = (struct acpi_table_lsapic *) header; 4.280 + 4.281 + if (BAD_MADT_ENTRY(lsapic, end)) 4.282 return -EINVAL; 4.283 4.284 acpi_table_print_madt_entry(header); 4.285 4.286 - mp_register_lapic ( 4.287 - processor->id, /* APIC ID */ 4.288 - processor->flags.enabled); /* Enabled? */ 4.289 + printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid); 4.290 4.291 + if (!lsapic->flags.enabled) 4.292 + printk(" disabled"); 4.293 + else { 4.294 + printk(" enabled"); 4.295 +#ifdef CONFIG_SMP 4.296 + smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; 4.297 + if (hard_smp_processor_id() 4.298 + == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus]) 4.299 + printk(" (BSP)"); 4.300 +#endif 4.301 + ++available_cpus; 4.302 + } 4.303 + 4.304 + printk("\n"); 4.305 + 4.306 + total_cpus++; 4.307 return 0; 4.308 } 4.309 4.310 4.311 static int __init 4.312 -acpi_parse_lapic_addr_ovr ( 4.313 - acpi_table_entry_header *header) 4.314 +acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) 4.315 { 4.316 - struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 4.317 - 4.318 - lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header; 4.319 - if (!lapic_addr_ovr) 4.320 - return -EINVAL; 4.321 - 4.322 - acpi_lapic_addr = lapic_addr_ovr->address; 4.323 + struct acpi_table_lapic_nmi *lacpi_nmi; 4.324 4.325 - return 0; 4.326 -} 4.327 + lacpi_nmi = (struct acpi_table_lapic_nmi*) header; 4.328 4.329 -static int __init 4.330 -acpi_parse_lapic_nmi ( 4.331 - acpi_table_entry_header *header) 4.332 -{ 4.333 - struct acpi_table_lapic_nmi *lapic_nmi = NULL; 4.334 - 4.335 - lapic_nmi = (struct acpi_table_lapic_nmi*) header; 4.336 - if (!lapic_nmi) 4.337 + if (BAD_MADT_ENTRY(lacpi_nmi, end)) 4.338 return -EINVAL; 4.339 4.340 acpi_table_print_madt_entry(header); 4.341 4.342 - if (lapic_nmi->lint != 1) 4.343 - printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); 4.344 - 4.345 - return 0; 4.346 -} 4.347 - 4.348 -#endif /*CONFIG_X86_LOCAL_APIC*/ 4.349 - 4.350 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) 4.351 - 4.352 -static int __init 4.353 -acpi_parse_ioapic ( 4.354 - acpi_table_entry_header *header) 4.355 -{ 4.356 - struct acpi_table_ioapic *ioapic = NULL; 4.357 - 4.358 - ioapic = (struct acpi_table_ioapic*) header; 4.359 - if (!ioapic) 4.360 - return -EINVAL; 4.361 - 4.362 - acpi_table_print_madt_entry(header); 4.363 - 4.364 - mp_register_ioapic ( 4.365 - ioapic->id, 4.366 - ioapic->address, 4.367 - ioapic->global_irq_base); 4.368 - 4.369 + /* TBD: Support lapic_nmi entries */ 4.370 return 0; 4.371 } 4.372 4.373 4.374 static int __init 4.375 -acpi_parse_int_src_ovr ( 4.376 - acpi_table_entry_header *header) 4.377 +acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) 4.378 { 4.379 - struct acpi_table_int_src_ovr *intsrc = NULL; 4.380 + struct acpi_table_iosapic *iosapic; 4.381 4.382 - intsrc = (struct acpi_table_int_src_ovr*) header; 4.383 - if (!intsrc) 4.384 + iosapic = (struct acpi_table_iosapic *) header; 4.385 + 4.386 + if (BAD_MADT_ENTRY(iosapic, end)) 4.387 return -EINVAL; 4.388 4.389 acpi_table_print_madt_entry(header); 4.390 4.391 - mp_override_legacy_irq ( 4.392 - intsrc->bus_irq, 4.393 - intsrc->flags.polarity, 4.394 - intsrc->flags.trigger, 4.395 - intsrc->global_irq); 4.396 + iosapic_init(iosapic->address, iosapic->global_irq_base); 4.397 4.398 return 0; 4.399 } 4.400 4.401 4.402 static int __init 4.403 -acpi_parse_nmi_src ( 4.404 - acpi_table_entry_header *header) 4.405 +acpi_parse_plat_int_src ( 4.406 + acpi_table_entry_header *header, const unsigned long end) 4.407 { 4.408 - struct acpi_table_nmi_src *nmi_src = NULL; 4.409 + struct acpi_table_plat_int_src *plintsrc; 4.410 + int vector; 4.411 + 4.412 + plintsrc = (struct acpi_table_plat_int_src *) header; 4.413 + 4.414 + if (BAD_MADT_ENTRY(plintsrc, end)) 4.415 + return -EINVAL; 4.416 + 4.417 + acpi_table_print_madt_entry(header); 4.418 + 4.419 + /* 4.420 + * Get vector assignment for this interrupt, set attributes, 4.421 + * and program the IOSAPIC routing table. 4.422 + */ 4.423 + vector = iosapic_register_platform_intr(plintsrc->type, 4.424 + plintsrc->global_irq, 4.425 + plintsrc->iosapic_vector, 4.426 + plintsrc->eid, 4.427 + plintsrc->id, 4.428 + (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 4.429 + (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 4.430 + 4.431 + platform_intr_list[plintsrc->type] = vector; 4.432 + return 0; 4.433 +} 4.434 + 4.435 + 4.436 +static int __init 4.437 +acpi_parse_int_src_ovr ( 4.438 + acpi_table_entry_header *header, const unsigned long end) 4.439 +{ 4.440 + struct acpi_table_int_src_ovr *p; 4.441 + 4.442 + p = (struct acpi_table_int_src_ovr *) header; 4.443 + 4.444 + if (BAD_MADT_ENTRY(p, end)) 4.445 + return -EINVAL; 4.446 + 4.447 + acpi_table_print_madt_entry(header); 4.448 + 4.449 + iosapic_override_isa_irq(p->bus_irq, p->global_irq, 4.450 + (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 4.451 + (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 4.452 + return 0; 4.453 +} 4.454 + 4.455 + 4.456 +static int __init 4.457 +acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) 4.458 +{ 4.459 + struct acpi_table_nmi_src *nmi_src; 4.460 4.461 nmi_src = (struct acpi_table_nmi_src*) header; 4.462 - if (!nmi_src) 4.463 + 4.464 + if (BAD_MADT_ENTRY(nmi_src, end)) 4.465 return -EINVAL; 4.466 4.467 acpi_table_print_madt_entry(header); 4.468 4.469 - /* TBD: Support nimsrc entries? */ 4.470 + /* TBD: Support nimsrc entries */ 4.471 + return 0; 4.472 +} 4.473 +/* Hook from generic ACPI tables.c */ 4.474 +void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 4.475 +{ 4.476 + if (!strncmp(oem_id, "IBM", 3) && 4.477 + (!strncmp(oem_table_id, "SERMOW", 6))){ 4.478 + 4.479 + /* Unfortunatly ITC_DRIFT is not yet part of the 4.480 + * official SAL spec, so the ITC_DRIFT bit is not 4.481 + * set by the BIOS on this hardware. 4.482 + */ 4.483 + sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; 4.484 + 4.485 + /*Start cyclone clock*/ 4.486 + cyclone_setup(0); 4.487 + } 4.488 +} 4.489 + 4.490 +static int __init 4.491 +acpi_parse_madt (unsigned long phys_addr, unsigned long size) 4.492 +{ 4.493 + if (!phys_addr || !size) 4.494 + return -EINVAL; 4.495 + 4.496 + acpi_madt = (struct acpi_table_madt *) __va(phys_addr); 4.497 + 4.498 + /* remember the value for reference after free_initmem() */ 4.499 +#ifdef CONFIG_ITANIUM 4.500 + has_8259 = 1; /* Firmware on old Itanium systems is broken */ 4.501 +#else 4.502 + has_8259 = acpi_madt->flags.pcat_compat; 4.503 +#endif 4.504 + iosapic_system_init(has_8259); 4.505 + 4.506 + /* Get base address of IPI Message Block */ 4.507 + 4.508 + if (acpi_madt->lapic_address) 4.509 + ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0); 4.510 + 4.511 + printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr); 4.512 + 4.513 + acpi_madt_oem_check(acpi_madt->header.oem_id, 4.514 + acpi_madt->header.oem_table_id); 4.515 4.516 return 0; 4.517 } 4.518 +#endif 4.519 4.520 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/ 4.521 +#ifdef CONFIG_ACPI_NUMA 4.522 + 4.523 +#undef SLIT_DEBUG 4.524 + 4.525 +#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) 4.526 4.527 +static int __initdata srat_num_cpus; /* number of cpus */ 4.528 +static u32 __initdata pxm_flag[PXM_FLAG_LEN]; 4.529 +#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) 4.530 +#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 4.531 +/* maps to convert between proximity domain and logical node ID */ 4.532 +int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS]; 4.533 +int __initdata nid_to_pxm_map[MAX_NUMNODES]; 4.534 +static struct acpi_table_slit __initdata *slit_table; 4.535 4.536 -static unsigned long __init 4.537 -acpi_scan_rsdp ( 4.538 - unsigned long start, 4.539 - unsigned long length) 4.540 +/* 4.541 + * ACPI 2.0 SLIT (System Locality Information Table) 4.542 + * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf 4.543 + */ 4.544 +void __init 4.545 +acpi_numa_slit_init (struct acpi_table_slit *slit) 4.546 +{ 4.547 + u32 len; 4.548 + 4.549 + len = sizeof(struct acpi_table_header) + 8 4.550 + + slit->localities * slit->localities; 4.551 + if (slit->header.length != len) { 4.552 + printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 4.553 + len, slit->header.length); 4.554 + memset(numa_slit, 10, sizeof(numa_slit)); 4.555 + return; 4.556 + } 4.557 + slit_table = slit; 4.558 +} 4.559 + 4.560 +void __init 4.561 +acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa) 4.562 { 4.563 - unsigned long offset = 0; 4.564 - unsigned long sig_len = sizeof("RSD PTR ") - 1; 4.565 + /* record this node in proximity bitmap */ 4.566 + pxm_bit_set(pa->proximity_domain); 4.567 + 4.568 + node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); 4.569 + /* nid should be overridden as logical node id later */ 4.570 + node_cpuid[srat_num_cpus].nid = pa->proximity_domain; 4.571 + srat_num_cpus++; 4.572 +} 4.573 + 4.574 +void __init 4.575 +acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) 4.576 +{ 4.577 + unsigned long paddr, size; 4.578 + u8 pxm; 4.579 + struct node_memblk_s *p, *q, *pend; 4.580 + 4.581 + pxm = ma->proximity_domain; 4.582 + 4.583 + /* fill node memory chunk structure */ 4.584 + paddr = ma->base_addr_hi; 4.585 + paddr = (paddr << 32) | ma->base_addr_lo; 4.586 + size = ma->length_hi; 4.587 + size = (size << 32) | ma->length_lo; 4.588 + 4.589 + /* Ignore disabled entries */ 4.590 + if (!ma->flags.enabled) 4.591 + return; 4.592 4.593 - /* 4.594 - * Scan all 16-byte boundaries of the physical memory region for the 4.595 - * RSDP signature. 4.596 - */ 4.597 - for (offset = 0; offset < length; offset += 16) { 4.598 - if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) 4.599 - continue; 4.600 - return (start + offset); 4.601 + /* record this node in proximity bitmap */ 4.602 + pxm_bit_set(pxm); 4.603 + 4.604 + /* Insertion sort based on base address */ 4.605 + pend = &node_memblk[num_node_memblks]; 4.606 + for (p = &node_memblk[0]; p < pend; p++) { 4.607 + if (paddr < p->start_paddr) 4.608 + break; 4.609 + } 4.610 + if (p < pend) { 4.611 + for (q = pend - 1; q >= p; q--) 4.612 + *(q + 1) = *q; 4.613 + } 4.614 + p->start_paddr = paddr; 4.615 + p->size = size; 4.616 + p->nid = pxm; 4.617 + num_node_memblks++; 4.618 +} 4.619 + 4.620 +void __init 4.621 +acpi_numa_arch_fixup (void) 4.622 +{ 4.623 + int i, j, node_from, node_to; 4.624 + 4.625 + /* If there's no SRAT, fix the phys_id */ 4.626 + if (srat_num_cpus == 0) { 4.627 + node_cpuid[0].phys_id = hard_smp_processor_id(); 4.628 + return; 4.629 } 4.630 4.631 + /* calculate total number of nodes in system from PXM bitmap */ 4.632 + numnodes = 0; /* init total nodes in system */ 4.633 + 4.634 + memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map)); 4.635 + memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map)); 4.636 + for (i = 0; i < MAX_PXM_DOMAINS; i++) { 4.637 + if (pxm_bit_test(i)) { 4.638 + pxm_to_nid_map[i] = numnodes; 4.639 + node_set_online(numnodes); 4.640 + nid_to_pxm_map[numnodes++] = i; 4.641 + } 4.642 + } 4.643 + 4.644 + /* set logical node id in memory chunk structure */ 4.645 + for (i = 0; i < num_node_memblks; i++) 4.646 + node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid]; 4.647 + 4.648 + /* assign memory bank numbers for each chunk on each node */ 4.649 + for (i = 0; i < numnodes; i++) { 4.650 + int bank; 4.651 + 4.652 + bank = 0; 4.653 + for (j = 0; j < num_node_memblks; j++) 4.654 + if (node_memblk[j].nid == i) 4.655 + node_memblk[j].bank = bank++; 4.656 + } 4.657 + 4.658 + /* set logical node id in cpu structure */ 4.659 + for (i = 0; i < srat_num_cpus; i++) 4.660 + node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; 4.661 + 4.662 + printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes); 4.663 + printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); 4.664 + 4.665 + if (!slit_table) return; 4.666 + memset(numa_slit, -1, sizeof(numa_slit)); 4.667 + for (i=0; i<slit_table->localities; i++) { 4.668 + if (!pxm_bit_test(i)) 4.669 + continue; 4.670 + node_from = pxm_to_nid_map[i]; 4.671 + for (j=0; j<slit_table->localities; j++) { 4.672 + if (!pxm_bit_test(j)) 4.673 + continue; 4.674 + node_to = pxm_to_nid_map[j]; 4.675 + node_distance(node_from, node_to) = 4.676 + slit_table->entry[i*slit_table->localities + j]; 4.677 + } 4.678 + } 4.679 + 4.680 +#ifdef SLIT_DEBUG 4.681 + printk("ACPI 2.0 SLIT locality table:\n"); 4.682 + for (i = 0; i < numnodes; i++) { 4.683 + for (j = 0; j < numnodes; j++) 4.684 + printk("%03d ", node_distance(i,j)); 4.685 + printk("\n"); 4.686 + } 4.687 +#endif 4.688 +} 4.689 +#endif /* CONFIG_ACPI_NUMA */ 4.690 + 4.691 +#if 0 4.692 +unsigned int 4.693 +acpi_register_gsi (u32 gsi, int polarity, int trigger) 4.694 +{ 4.695 + return acpi_register_irq(gsi, polarity, trigger); 4.696 +} 4.697 +EXPORT_SYMBOL(acpi_register_gsi); 4.698 +static int __init 4.699 +acpi_parse_fadt (unsigned long phys_addr, unsigned long size) 4.700 +{ 4.701 + struct acpi_table_header *fadt_header; 4.702 + struct fadt_descriptor_rev2 *fadt; 4.703 + 4.704 + if (!phys_addr || !size) 4.705 + return -EINVAL; 4.706 + 4.707 + fadt_header = (struct acpi_table_header *) __va(phys_addr); 4.708 + if (fadt_header->revision != 3) 4.709 + return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 4.710 + 4.711 + fadt = (struct fadt_descriptor_rev2 *) fadt_header; 4.712 + 4.713 + if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) 4.714 + acpi_kbd_controller_present = 0; 4.715 + 4.716 + if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES) 4.717 + acpi_legacy_devices = 1; 4.718 + 4.719 + acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE); 4.720 return 0; 4.721 } 4.722 - 4.723 +#endif 4.724 4.725 unsigned long __init 4.726 acpi_find_rsdp (void) 4.727 { 4.728 - unsigned long rsdp_phys = 0; 4.729 + unsigned long rsdp_phys = 0; 4.730 4.731 - /* 4.732 - * Scan memory looking for the RSDP signature. First search EBDA (low 4.733 - * memory) paragraphs and then search upper memory (E0000-FFFFF). 4.734 - */ 4.735 - rsdp_phys = acpi_scan_rsdp (0, 0x400); 4.736 - if (!rsdp_phys) 4.737 - rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF); 4.738 - 4.739 + if (efi.acpi20) 4.740 + rsdp_phys = __pa(efi.acpi20); 4.741 + else if (efi.acpi) 4.742 + printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); 4.743 return rsdp_phys; 4.744 } 4.745 4.746 - 4.747 -/* 4.748 - * acpi_boot_init() 4.749 - * called from setup_arch(), always. 4.750 - * 1. maps ACPI tables for later use 4.751 - * 2. enumerates lapics 4.752 - * 3. enumerates io-apics 4.753 - * 4.754 - * side effects: 4.755 - * acpi_lapic = 1 if LAPIC found 4.756 - * acpi_ioapic = 1 if IOAPIC found 4.757 - * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; 4.758 - * if acpi_blacklisted() acpi_disabled = 1; 4.759 - * acpi_irq_model=... 4.760 - * ... 4.761 - * 4.762 - * return value: (currently ignored) 4.763 - * 0: success 4.764 - * !0: failure 4.765 - */ 4.766 +#if 0 4.767 int __init 4.768 acpi_boot_init (void) 4.769 { 4.770 - int result = 0; 4.771 - 4.772 - if (acpi_disabled && !acpi_ht) 4.773 - return(1); 4.774 4.775 /* 4.776 - * The default interrupt routing model is PIC (8259). This gets 4.777 - * overriden if IOAPICs are enumerated (below). 4.778 - */ 4.779 - acpi_irq_model = ACPI_IRQ_MODEL_PIC; 4.780 - 4.781 - /* 4.782 - * Initialize the ACPI boot-time table parser. 4.783 - */ 4.784 - result = acpi_table_init(); 4.785 - if (result) { 4.786 -#ifndef XEN 4.787 -// hack for now, FIXME later 4.788 - acpi_disabled = 1; 4.789 -#endif 4.790 - return result; 4.791 - } 4.792 - 4.793 - result = acpi_blacklisted(); 4.794 - if (result) { 4.795 - printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n"); 4.796 -#ifndef XEN 4.797 -// hack for now, FIXME later 4.798 - acpi_disabled = 1; 4.799 -#endif 4.800 - return result; 4.801 - } 4.802 - 4.803 -#ifdef CONFIG_X86_LOCAL_APIC 4.804 - 4.805 - /* 4.806 * MADT 4.807 * ---- 4.808 * Parse the Multiple APIC Description Table (MADT), if exists. 4.809 - * Note that this table provides platform SMP configuration 4.810 + * Note that this table provides platform SMP configuration 4.811 * information -- the successor to MPS tables. 4.812 */ 4.813 4.814 - result = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 4.815 - if (!result) { 4.816 - return 0; 4.817 - } 4.818 - else if (result < 0) { 4.819 - printk(KERN_ERR PREFIX "Error parsing MADT\n"); 4.820 - return result; 4.821 - } 4.822 - else if (result > 1) 4.823 - printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n"); 4.824 - 4.825 - /* 4.826 - * Local APIC 4.827 - * ---------- 4.828 - * Note that the LAPIC address is obtained from the MADT (32-bit value) 4.829 - * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 4.830 - */ 4.831 - 4.832 - result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr); 4.833 - if (result < 0) { 4.834 - printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); 4.835 - return result; 4.836 + if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) { 4.837 + printk(KERN_ERR PREFIX "Can't find MADT\n"); 4.838 + goto skip_madt; 4.839 } 4.840 4.841 - mp_register_lapic_address(acpi_lapic_addr); 4.842 + /* Local APIC */ 4.843 + 4.844 + if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 4.845 + printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); 4.846 4.847 - result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic); 4.848 - if (!result) { 4.849 - printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 4.850 - /* TBD: Cleanup to allow fallback to MPS */ 4.851 - return -ENODEV; 4.852 - } 4.853 - else if (result < 0) { 4.854 - printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); 4.855 - /* TBD: Cleanup to allow fallback to MPS */ 4.856 - return result; 4.857 - } 4.858 + if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1) 4.859 + printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); 4.860 + 4.861 + if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0) 4.862 + printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 4.863 + 4.864 + /* I/O APIC */ 4.865 4.866 - result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi); 4.867 - if (result < 0) { 4.868 - printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 4.869 - /* TBD: Cleanup to allow fallback to MPS */ 4.870 - return result; 4.871 - } 4.872 + if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 4.873 + printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); 4.874 4.875 - acpi_lapic = 1; 4.876 + /* System-Level Interrupt Routing */ 4.877 4.878 -#endif /*CONFIG_X86_LOCAL_APIC*/ 4.879 - 4.880 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) 4.881 + if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 4.882 + printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); 4.883 4.884 - /* 4.885 - * I/O APIC 4.886 - * -------- 4.887 - */ 4.888 + if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 4.889 + printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); 4.890 + 4.891 + if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 4.892 + printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 4.893 + skip_madt: 4.894 4.895 /* 4.896 - * ACPI interpreter is required to complete interrupt setup, 4.897 - * so if it is off, don't enumerate the io-apics with ACPI. 4.898 - * If MPS is present, it will handle them, 4.899 - * otherwise the system will stay in PIC mode 4.900 - */ 4.901 - if (acpi_disabled || acpi_noirq) { 4.902 - return 1; 4.903 - } 4.904 - 4.905 - /* 4.906 - * if "noapic" boot option, don't look for IO-APICs 4.907 + * FADT says whether a legacy keyboard controller is present. 4.908 + * The FADT also contains an SCI_INT line, by which the system 4.909 + * gets interrupts such as power and sleep buttons. If it's not 4.910 + * on a Legacy interrupt, it needs to be setup. 4.911 */ 4.912 - if (ioapic_setup_disabled()) { 4.913 - printk(KERN_INFO PREFIX "Skipping IOAPIC probe " 4.914 - "due to 'noapic' option.\n"); 4.915 - return 1; 4.916 - } 4.917 - 4.918 + if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1) 4.919 + printk(KERN_ERR PREFIX "Can't find FADT\n"); 4.920 4.921 - result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic); 4.922 - if (!result) { 4.923 - printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 4.924 - return -ENODEV; 4.925 +#ifdef CONFIG_SMP 4.926 + if (available_cpus == 0) { 4.927 + printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); 4.928 + printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); 4.929 + smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); 4.930 + available_cpus = 1; /* We've got at least one of these, no? */ 4.931 } 4.932 - else if (result < 0) { 4.933 - printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); 4.934 - return result; 4.935 - } 4.936 + smp_boot_data.cpu_count = available_cpus; 4.937 4.938 - /* Build a default routing table for legacy (ISA) interrupts. */ 4.939 - mp_config_acpi_legacy_irqs(); 4.940 - 4.941 - result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr); 4.942 - if (result < 0) { 4.943 - printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); 4.944 - /* TBD: Cleanup to allow fallback to MPS */ 4.945 - return result; 4.946 + smp_build_cpu_map(); 4.947 +# ifdef CONFIG_ACPI_NUMA 4.948 + if (srat_num_cpus == 0) { 4.949 + int cpu, i = 1; 4.950 + for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 4.951 + if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) 4.952 + node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; 4.953 } 4.954 + build_cpu_to_node_map(); 4.955 +# endif 4.956 +#endif 4.957 + /* Make boot-up look pretty */ 4.958 + printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); 4.959 + return 0; 4.960 +} 4.961 +int 4.962 +acpi_gsi_to_irq (u32 gsi, unsigned int *irq) 4.963 +{ 4.964 + int vector; 4.965 4.966 - result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src); 4.967 - if (result < 0) { 4.968 - printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 4.969 - /* TBD: Cleanup to allow fallback to MPS */ 4.970 - return result; 4.971 - } 4.972 - 4.973 - acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 4.974 + if (has_8259 && gsi < 16) 4.975 + *irq = isa_irq_to_vector(gsi); 4.976 + else { 4.977 + vector = gsi_to_vector(gsi); 4.978 + if (vector == -1) 4.979 + return -1; 4.980 4.981 - acpi_irq_balance_set(NULL); 4.982 - 4.983 - acpi_ioapic = 1; 4.984 - 4.985 - if (acpi_lapic && acpi_ioapic) 4.986 - smp_found_config = 1; 4.987 - 4.988 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/ 4.989 - 4.990 + *irq = vector; 4.991 + } 4.992 return 0; 4.993 } 4.994 4.995 -#endif /*CONFIG_ACPI_BOOT*/ 4.996 - 4.997 -#ifdef CONFIG_ACPI_BUS 4.998 -/* 4.999 - * "acpi_pic_sci=level" (current default) 4.1000 - * programs the PIC-mode SCI to Level Trigger. 4.1001 - * (NO-OP if the BIOS set Level Trigger already) 4.1002 - * 4.1003 - * If a PIC-mode SCI is not recogznied or gives spurious IRQ7's 4.1004 - * it may require Edge Trigger -- use "acpi_pic_sci=edge" 4.1005 - * (NO-OP if the BIOS set Edge Trigger already) 4.1006 - * 4.1007 - * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers 4.1008 - * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. 4.1009 - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) 4.1010 - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) 4.1011 - */ 4.1012 - 4.1013 -static __initdata int acpi_pic_sci_trigger; /* 0: level, 1: edge */ 4.1014 - 4.1015 -void __init 4.1016 -acpi_pic_sci_set_trigger(unsigned int irq) 4.1017 +int 4.1018 +acpi_register_irq (u32 gsi, u32 polarity, u32 trigger) 4.1019 { 4.1020 - unsigned char mask = 1 << (irq & 7); 4.1021 - unsigned int port = 0x4d0 + (irq >> 3); 4.1022 - unsigned char val = inb(port); 4.1023 - 4.1024 - 4.1025 - printk(PREFIX "IRQ%d SCI:", irq); 4.1026 - if (!(val & mask)) { 4.1027 - printk(" Edge"); 4.1028 - 4.1029 - if (!acpi_pic_sci_trigger) { 4.1030 - printk(" set to Level"); 4.1031 - outb(val | mask, port); 4.1032 - } 4.1033 - } else { 4.1034 - printk(" Level"); 4.1035 - 4.1036 - if (acpi_pic_sci_trigger) { 4.1037 - printk(" set to Edge"); 4.1038 - outb(val | mask, port); 4.1039 - } 4.1040 - } 4.1041 - printk(" Trigger.\n"); 4.1042 -} 4.1043 - 4.1044 -int __init 4.1045 -acpi_pic_sci_setup(char *str) 4.1046 -{ 4.1047 - while (str && *str) { 4.1048 - if (strncmp(str, "level", 5) == 0) 4.1049 - acpi_pic_sci_trigger = 0; /* force level trigger */ 4.1050 - if (strncmp(str, "edge", 4) == 0) 4.1051 - acpi_pic_sci_trigger = 1; /* force edge trigger */ 4.1052 - str = strchr(str, ','); 4.1053 - if (str) 4.1054 - str += strspn(str, ", \t"); 4.1055 - } 4.1056 - return 1; 4.1057 -} 4.1058 - 4.1059 -__setup("acpi_pic_sci=", acpi_pic_sci_setup); 4.1060 - 4.1061 -#endif /* CONFIG_ACPI_BUS */ 4.1062 - 4.1063 - 4.1064 - 4.1065 -/* -------------------------------------------------------------------------- 4.1066 - Low-Level Sleep Support 4.1067 - -------------------------------------------------------------------------- */ 4.1068 - 4.1069 -#ifdef CONFIG_ACPI_SLEEP 4.1070 - 4.1071 -#define DEBUG 4.1072 - 4.1073 -#ifdef DEBUG 4.1074 -#include <xen/serial.h> 4.1075 -#endif 4.1076 - 4.1077 -/* address in low memory of the wakeup routine. */ 4.1078 -unsigned long acpi_wakeup_address = 0; 4.1079 - 4.1080 -/* new page directory that we will be using */ 4.1081 -static pmd_t *pmd; 4.1082 - 4.1083 -/* saved page directory */ 4.1084 -static pmd_t saved_pmd; 4.1085 - 4.1086 -/* page which we'll use for the new page directory */ 4.1087 -static pte_t *ptep; 4.1088 - 4.1089 -extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); 4.1090 + if (has_8259 && gsi < 16) 4.1091 + return isa_irq_to_vector(gsi); 4.1092 4.1093 -/* 4.1094 - * acpi_create_identity_pmd 4.1095 - * 4.1096 - * Create a new, identity mapped pmd. 4.1097 - * 4.1098 - * Do this by creating new page directory, and marking all the pages as R/W 4.1099 - * Then set it as the new Page Middle Directory. 4.1100 - * And, of course, flush the TLB so it takes effect. 4.1101 - * 4.1102 - * We save the address of the old one, for later restoration. 4.1103 - */ 4.1104 -static void acpi_create_identity_pmd (void) 4.1105 -{ 4.1106 - pgd_t *pgd; 4.1107 - int i; 4.1108 - 4.1109 - ptep = (pte_t*)__get_free_page(GFP_KERNEL); 4.1110 - 4.1111 - /* fill page with low mapping */ 4.1112 - for (i = 0; i < PTRS_PER_PTE; i++) 4.1113 - set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED)); 4.1114 - 4.1115 - pgd = pgd_offset(current->active_mm, 0); 4.1116 - pmd = pmd_alloc(current->mm,pgd, 0); 4.1117 - 4.1118 - /* save the old pmd */ 4.1119 - saved_pmd = *pmd; 4.1120 - 4.1121 - /* set the new one */ 4.1122 - set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep))); 4.1123 - 4.1124 - /* flush the TLB */ 4.1125 - local_flush_tlb(); 4.1126 -} 4.1127 - 4.1128 -/* 4.1129 - * acpi_restore_pmd 4.1130 - * 4.1131 - * Restore the old pmd saved by acpi_create_identity_pmd and 4.1132 - * free the page that said function alloc'd 4.1133 - */ 4.1134 -static void acpi_restore_pmd (void) 4.1135 -{ 4.1136 - set_pmd(pmd, saved_pmd); 4.1137 - local_flush_tlb(); 4.1138 - free_page((unsigned long)ptep); 4.1139 + return iosapic_register_intr(gsi, 4.1140 + (polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 4.1141 + (trigger == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 4.1142 } 4.1143 - 4.1144 -/** 4.1145 - * acpi_save_state_mem - save kernel state 4.1146 - * 4.1147 - * Create an identity mapped page table and copy the wakeup routine to 4.1148 - * low memory. 4.1149 - */ 4.1150 -int acpi_save_state_mem (void) 4.1151 -{ 4.1152 - acpi_create_identity_pmd(); 4.1153 - acpi_copy_wakeup_routine(acpi_wakeup_address); 4.1154 - 4.1155 - return 0; 4.1156 -} 4.1157 - 4.1158 -/** 4.1159 - * acpi_save_state_disk - save kernel state to disk 4.1160 - * 4.1161 - */ 4.1162 -int acpi_save_state_disk (void) 4.1163 -{ 4.1164 - return 1; 4.1165 -} 4.1166 - 4.1167 -/* 4.1168 - * acpi_restore_state 4.1169 - */ 4.1170 -void acpi_restore_state_mem (void) 4.1171 -{ 4.1172 - acpi_restore_pmd(); 4.1173 -} 4.1174 - 4.1175 -/** 4.1176 - * acpi_reserve_bootmem - do _very_ early ACPI initialisation 4.1177 - * 4.1178 - * We allocate a page in low memory for the wakeup 4.1179 - * routine for when we come back from a sleep state. The 4.1180 - * runtime allocator allows specification of <16M pages, but not 4.1181 - * <1M pages. 4.1182 - */ 4.1183 -void __init acpi_reserve_bootmem(void) 4.1184 -{ 4.1185 - acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); 4.1186 - printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address); 4.1187 -} 4.1188 - 4.1189 -void do_suspend_lowlevel_s4bios(int resume) 4.1190 -{ 4.1191 - if (!resume) { 4.1192 - save_processor_context(); 4.1193 - acpi_save_register_state((unsigned long)&&acpi_sleep_done); 4.1194 - acpi_enter_sleep_state_s4bios(); 4.1195 - return; 4.1196 - } 4.1197 -acpi_sleep_done: 4.1198 - restore_processor_context(); 4.1199 -} 4.1200 - 4.1201 - 4.1202 -#endif /*CONFIG_ACPI_SLEEP*/ 4.1203 - 4.1204 +EXPORT_SYMBOL(acpi_register_irq); 4.1205 +#endif 4.1206 +#endif /* CONFIG_ACPI_BOOT */
5.1 --- a/xen/arch/ia64/asm-offsets.c Mon May 23 15:22:15 2005 +0000 5.2 +++ b/xen/arch/ia64/asm-offsets.c Mon May 23 15:29:59 2005 +0000 5.3 @@ -9,6 +9,9 @@ 5.4 #include <asm/processor.h> 5.5 #include <asm/ptrace.h> 5.6 #include <public/xen.h> 5.7 +#ifdef CONFIG_VTI 5.8 +#include <asm/tlb.h> 5.9 +#endif // CONFIG_VTI 5.10 5.11 #define task_struct exec_domain 5.12 5.13 @@ -93,6 +96,24 @@ void foo(void) 5.14 DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14)); 5.15 DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2)); 5.16 DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3)); 5.17 +#ifdef CONFIG_VTI 5.18 + DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct xen_regs, r4)); 5.19 + DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct xen_regs, r5)); 5.20 + DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct xen_regs, r6)); 5.21 + DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct xen_regs, r7)); 5.22 + DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct xen_regs, cr_iipa)); 5.23 + DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr)); 5.24 + DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs, eml_unat)); 5.25 + DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs, rfi_pfs)); 5.26 + DEFINE(RFI_IIP_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_iip)); 5.27 + DEFINE(RFI_IPSR_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_ipsr)); 5.28 + DEFINE(RFI_IFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_ifs)); 5.29 + DEFINE(RFI_PFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_pfs)); 5.30 + DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr5)); 5.31 + DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr6)); 5.32 + DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr7)); 5.33 + DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mpta)); 5.34 +#endif //CONFIG_VTI 5.35 DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16)); 5.36 DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17)); 5.37 DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18)); 5.38 @@ -164,6 +185,13 @@ void foo(void) 5.39 5.40 BLANK(); 5.41 5.42 +#ifdef CONFIG_VTI 5.43 + DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain, arch.arch_vmx.vpd)); 5.44 + DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta)); 5.45 + DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t)); 5.46 + 5.47 + BLANK(); 5.48 +#endif //CONFIG_VTI 5.49 //DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip)); 5.50 //DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp)); 5.51 //DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
6.1 --- a/xen/arch/ia64/dom_fw.c Mon May 23 15:22:15 2005 +0000 6.2 +++ b/xen/arch/ia64/dom_fw.c Mon May 23 15:29:59 2005 +0000 6.3 @@ -13,6 +13,7 @@ 6.4 #include <asm/io.h> 6.5 #include <asm/pal.h> 6.6 #include <asm/sal.h> 6.7 +#include <xen/acpi.h> 6.8 6.9 #include <asm/dom_fw.h> 6.10 6.11 @@ -297,6 +298,71 @@ void print_md(efi_memory_desc_t *md) 6.12 #endif 6.13 } 6.14 6.15 +#define LSAPIC_NUM 16 // TEMP 6.16 +static u32 lsapic_flag=1; 6.17 + 6.18 +/* Provide only one LP to guest */ 6.19 +static int 6.20 +acpi_update_lsapic (acpi_table_entry_header *header) 6.21 +{ 6.22 + struct acpi_table_lsapic *lsapic; 6.23 + 6.24 + lsapic = (struct acpi_table_lsapic *) header; 6.25 + if (!lsapic) 6.26 + return -EINVAL; 6.27 + 6.28 + if (lsapic->flags.enabled && lsapic_flag) { 6.29 + printk("enable lsapic entry: 0x%lx\n", (u64)lsapic); 6.30 + lsapic_flag = 0; /* disable all the following processros */ 6.31 + } else if (lsapic->flags.enabled) { 6.32 + printk("DISABLE lsapic entry: 0x%lx\n", (u64)lsapic); 6.33 + lsapic->flags.enabled = 0; 6.34 + } else 6.35 + printk("lsapic entry is already disabled: 0x%lx\n", (u64)lsapic); 6.36 + 6.37 + return 0; 6.38 +} 6.39 + 6.40 +static int 6.41 +acpi_update_madt_checksum (unsigned long phys_addr, unsigned long size) 6.42 +{ 6.43 + u8 checksum=0; 6.44 + u8* ptr; 6.45 + int len; 6.46 + struct acpi_table_madt* acpi_madt; 6.47 + 6.48 + if (!phys_addr || !size) 6.49 + return -EINVAL; 6.50 + 6.51 + acpi_madt = (struct acpi_table_madt *) __va(phys_addr); 6.52 + acpi_madt->header.checksum=0; 6.53 + 6.54 + /* re-calculate MADT checksum */ 6.55 + ptr = (u8*)acpi_madt; 6.56 + len = acpi_madt->header.length; 6.57 + while (len>0){ 6.58 + checksum = (u8)( checksum + (*ptr++) ); 6.59 + len--; 6.60 + } 6.61 + acpi_madt->header.checksum = 0x0 - checksum; 6.62 + 6.63 + return 0; 6.64 +} 6.65 + 6.66 +/* base is physical address of acpi table */ 6.67 +void touch_acpi_table(void) 6.68 +{ 6.69 + u64 count = 0; 6.70 + count = acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, NR_CPUS); 6.71 + if ( count < 1) 6.72 + printk("Error parsing MADT - no LAPIC entires\n"); 6.73 + printk("Total %d lsapic entry\n", count); 6.74 + acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum); 6.75 + 6.76 + return; 6.77 +} 6.78 + 6.79 + 6.80 struct ia64_boot_param * 6.81 dom_fw_init (struct domain *d, char *args, int arglen, char *fw_mem, int fw_mem_size) 6.82 { 6.83 @@ -414,6 +480,9 @@ dom_fw_init (struct domain *d, char *arg 6.84 printf(" MPS=%0xlx",efi_tables[i].table); 6.85 i++; 6.86 } 6.87 + 6.88 + touch_acpi_table(); 6.89 + 6.90 if (efi.acpi20) { 6.91 efi_tables[i].guid = ACPI_20_TABLE_GUID; 6.92 efi_tables[i].table = __pa(efi.acpi20);
7.1 --- a/xen/arch/ia64/domain.c Mon May 23 15:22:15 2005 +0000 7.2 +++ b/xen/arch/ia64/domain.c Mon May 23 15:29:59 2005 +0000 7.3 @@ -3,6 +3,11 @@ 7.4 * 7.5 * Pentium III FXSR, SSE support 7.6 * Gareth Hughes <gareth@valinux.com>, May 2000 7.7 + * 7.8 + * Copyright (C) 2005 Intel Co 7.9 + * Kun Tian (Kevin Tian) <kevin.tian@intel.com> 7.10 + * 7.11 + * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add CONFIG_VTI domain support 7.12 */ 7.13 7.14 #include <xen/config.h> 7.15 @@ -32,12 +37,23 @@ 7.16 #include <asm/asm-offsets.h> /* for IA64_THREAD_INFO_SIZE */ 7.17 7.18 #include <asm/vcpu.h> /* for function declarations */ 7.19 +#ifdef CONFIG_VTI 7.20 +#include <asm/vmx.h> 7.21 +#include <asm/vmx_vcpu.h> 7.22 +#include <asm/pal.h> 7.23 +#endif // CONFIG_VTI 7.24 7.25 #define CONFIG_DOMAIN0_CONTIGUOUS 7.26 unsigned long dom0_start = -1L; 7.27 +#ifdef CONFIG_VTI 7.28 unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable 7.29 //FIXME: alignment should be 256MB, lest Linux use a 256MB page size 7.30 +unsigned long dom0_align = 256*1024*1024; 7.31 +#else // CONFIG_VTI 7.32 +unsigned long dom0_size = 256*1024*1024; //FIXME: Should be configurable 7.33 +//FIXME: alignment should be 256MB, lest Linux use a 256MB page size 7.34 unsigned long dom0_align = 64*1024*1024; 7.35 +#endif // CONFIG_VTI 7.36 #ifdef DOMU_BUILD_STAGING 7.37 unsigned long domU_staging_size = 32*1024*1024; //FIXME: Should be configurable 7.38 unsigned long domU_staging_start; 7.39 @@ -151,6 +167,58 @@ void arch_free_exec_domain_struct(struct 7.40 free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER); 7.41 } 7.42 7.43 +#ifdef CONFIG_VTI 7.44 +void arch_do_createdomain(struct exec_domain *ed) 7.45 +{ 7.46 + struct domain *d = ed->domain; 7.47 + struct thread_info *ti = alloc_thread_info(ed); 7.48 + 7.49 + /* If domain is VMX domain, shared info area is created 7.50 + * by domain and then domain notifies HV by specific hypercall. 7.51 + * If domain is xenolinux, shared info area is created by 7.52 + * HV. 7.53 + * Since we have no idea about whether domain is VMX now, 7.54 + * (dom0 when parse and domN when build), postpone possible 7.55 + * allocation. 7.56 + */ 7.57 + 7.58 + /* FIXME: Because full virtual cpu info is placed in this area, 7.59 + * it's unlikely to put it into one shareinfo page. Later 7.60 + * need split vcpu context from vcpu_info and conforms to 7.61 + * normal xen convention. 7.62 + */ 7.63 + d->shared_info = NULL; 7.64 + ed->vcpu_info = (void *)alloc_xenheap_page(); 7.65 + if (!ed->vcpu_info) { 7.66 + printk("ERROR/HALTING: CAN'T ALLOC PAGE\n"); 7.67 + while (1); 7.68 + } 7.69 + memset(ed->vcpu_info, 0, PAGE_SIZE); 7.70 + 7.71 + /* Clear thread_info to clear some important fields, like preempt_count */ 7.72 + memset(ti, 0, sizeof(struct thread_info)); 7.73 + 7.74 + /* Allocate per-domain vTLB and vhpt */ 7.75 + ed->arch.vtlb = init_domain_tlb(ed); 7.76 + 7.77 + /* Physical->machine page table will be allocated when 7.78 + * final setup, since we have no the maximum pfn number in 7.79 + * this stage 7.80 + */ 7.81 + 7.82 + /* FIXME: This is identity mapped address for xenheap. 7.83 + * Do we need it at all? 7.84 + */ 7.85 + d->xen_vastart = 0xf000000000000000; 7.86 + d->xen_vaend = 0xf300000000000000; 7.87 + d->breakimm = 0x1000; 7.88 + 7.89 + // stay on kernel stack because may get interrupts! 7.90 + // ia64_ret_from_clone (which b0 gets in new_thread) switches 7.91 + // to user stack 7.92 + ed->arch._thread.on_ustack = 0; 7.93 +} 7.94 +#else // CONFIG_VTI 7.95 void arch_do_createdomain(struct exec_domain *ed) 7.96 { 7.97 struct domain *d = ed->domain; 7.98 @@ -193,6 +261,7 @@ void arch_do_createdomain(struct exec_do 7.99 // to user stack 7.100 ed->arch._thread.on_ustack = 0; 7.101 } 7.102 +#endif // CONFIG_VTI 7.103 7.104 void arch_do_boot_vcpu(struct exec_domain *p) 7.105 { 7.106 @@ -216,6 +285,70 @@ void domain_relinquish_resources(struct 7.107 dummy(); 7.108 } 7.109 7.110 +#ifdef CONFIG_VTI 7.111 +void new_thread(struct exec_domain *ed, 7.112 + unsigned long start_pc, 7.113 + unsigned long start_stack, 7.114 + unsigned long start_info) 7.115 +{ 7.116 + struct domain *d = ed->domain; 7.117 + struct switch_stack *sw; 7.118 + struct xen_regs *regs; 7.119 + struct ia64_boot_param *bp; 7.120 + extern char ia64_ret_from_clone; 7.121 + extern char saved_command_line[]; 7.122 + //char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro"; 7.123 + 7.124 + 7.125 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS 7.126 + if (d == dom0) start_pc += dom0_start; 7.127 +#endif 7.128 + regs = (struct xen_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1; 7.129 + sw = (struct switch_stack *) regs - 1; 7.130 + /* Sanity Clear */ 7.131 + memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack)); 7.132 + 7.133 + if (VMX_DOMAIN(ed)) { 7.134 + /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */ 7.135 + regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */ 7.136 + } else { 7.137 + regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) 7.138 + | IA64_PSR_BITS_TO_SET | IA64_PSR_BN 7.139 + & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS); 7.140 + regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2 7.141 + } 7.142 + regs->cr_iip = start_pc; 7.143 + regs->ar_rsc = 0x0; 7.144 + regs->cr_ifs = 0x0; 7.145 + regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT; 7.146 + sw->ar_bspstore = (unsigned long)ed + IA64_RBS_OFFSET; 7.147 + printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n", 7.148 + ed,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,®s->r8); 7.149 + printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr); 7.150 + 7.151 + sw->b0 = (unsigned long) &ia64_ret_from_clone; 7.152 + ed->arch._thread.ksp = (unsigned long) sw - 16; 7.153 + printk("new_thread, about to call init_all_rr\n"); 7.154 + if (VMX_DOMAIN(ed)) { 7.155 + vmx_init_all_rr(ed); 7.156 + } else 7.157 + init_all_rr(ed); 7.158 + // set up boot parameters (and fake firmware) 7.159 + printk("new_thread, about to call dom_fw_setup\n"); 7.160 + VMX_VPD(ed,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME 7.161 + printk("new_thread, done with dom_fw_setup\n"); 7.162 + 7.163 + if (VMX_DOMAIN(ed)) { 7.164 + /* Virtual processor context setup */ 7.165 + VMX_VPD(ed, vpsr) = IA64_PSR_BN; 7.166 + VPD_CR(ed, dcr) = 0; 7.167 + } else { 7.168 + // don't forget to set this! 7.169 + ed->vcpu_info->arch.banknum = 1; 7.170 + } 7.171 +} 7.172 +#else // CONFIG_VTI 7.173 + 7.174 // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread() 7.175 // and linux/arch/ia64/kernel/process.c:kernel_thread() 7.176 void new_thread(struct exec_domain *ed, 7.177 @@ -272,6 +405,7 @@ printk("new_thread, done with dom_fw_set 7.178 // don't forget to set this! 7.179 ed->vcpu_info->arch.banknum = 1; 7.180 } 7.181 +#endif // CONFIG_VTI 7.182 7.183 static struct page * map_new_domain0_page(unsigned long mpaddr) 7.184 { 7.185 @@ -599,6 +733,213 @@ domU_staging_write_32(unsigned long at, 7.186 } 7.187 #endif 7.188 7.189 +#ifdef CONFIG_VTI 7.190 +/* Up to whether domain is vmx one, different context may be setup 7.191 + * here. 7.192 + */ 7.193 +void 7.194 +post_arch_do_create_domain(struct exec_domain *ed, int vmx_domain) 7.195 +{ 7.196 + struct domain *d = ed->domain; 7.197 + 7.198 + if (!vmx_domain) { 7.199 + d->shared_info = (void*)alloc_xenheap_page(); 7.200 + if (!d->shared_info) 7.201 + panic("Allocate share info for non-vmx domain failed.\n"); 7.202 + d->shared_info_va = 0xfffd000000000000; 7.203 + 7.204 + printk("Build shared info for non-vmx domain\n"); 7.205 + build_shared_info(d); 7.206 + /* Setup start info area */ 7.207 + } 7.208 +} 7.209 + 7.210 +/* For VMX domain, this is invoked when kernel model in domain 7.211 + * request actively 7.212 + */ 7.213 +void build_shared_info(struct domain *d) 7.214 +{ 7.215 + int i; 7.216 + 7.217 + /* Set up shared-info area. */ 7.218 + update_dom_time(d); 7.219 + d->shared_info->domain_time = 0; 7.220 + 7.221 + /* Mask all upcalls... */ 7.222 + for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 7.223 + d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1; 7.224 + 7.225 + /* ... */ 7.226 +} 7.227 + 7.228 +extern unsigned long running_on_sim; 7.229 +unsigned int vmx_dom0 = 0; 7.230 +int construct_dom0(struct domain *d, 7.231 + unsigned long image_start, unsigned long image_len, 7.232 + unsigned long initrd_start, unsigned long initrd_len, 7.233 + char *cmdline) 7.234 +{ 7.235 + char *dst; 7.236 + int i, rc; 7.237 + unsigned long pfn, mfn; 7.238 + unsigned long nr_pt_pages; 7.239 + unsigned long count; 7.240 + unsigned long alloc_start, alloc_end; 7.241 + struct pfn_info *page = NULL; 7.242 + start_info_t *si; 7.243 + struct exec_domain *ed = d->exec_domain[0]; 7.244 + struct domain_setup_info dsi; 7.245 + unsigned long p_start; 7.246 + unsigned long pkern_start; 7.247 + unsigned long pkern_entry; 7.248 + unsigned long pkern_end; 7.249 + unsigned long ret; 7.250 + unsigned long progress = 0; 7.251 + 7.252 +//printf("construct_dom0: starting\n"); 7.253 + /* Sanity! */ 7.254 +#ifndef CLONE_DOMAIN0 7.255 + if ( d != dom0 ) 7.256 + BUG(); 7.257 + if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 7.258 + BUG(); 7.259 +#endif 7.260 + 7.261 + printk("##Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d); 7.262 + memset(&dsi, 0, sizeof(struct domain_setup_info)); 7.263 + 7.264 + printk("*** LOADING DOMAIN 0 ***\n"); 7.265 + 7.266 + alloc_start = dom0_start; 7.267 + alloc_end = dom0_start + dom0_size; 7.268 + d->tot_pages = d->max_pages = (alloc_end - alloc_start)/PAGE_SIZE; 7.269 + image_start = __va(ia64_boot_param->initrd_start); 7.270 + image_len = ia64_boot_param->initrd_size; 7.271 + 7.272 + dsi.image_addr = (unsigned long)image_start; 7.273 + dsi.image_len = image_len; 7.274 + rc = parseelfimage(&dsi); 7.275 + if ( rc != 0 ) 7.276 + return rc; 7.277 + 7.278 + /* Temp workaround */ 7.279 + if (running_on_sim) 7.280 + dsi.xen_elf_image = 1; 7.281 + 7.282 + if ((!vmx_enabled) && !dsi.xen_elf_image) { 7.283 + printk("Lack of hardware support for unmodified vmx dom0\n"); 7.284 + panic(""); 7.285 + } 7.286 + 7.287 + if (vmx_enabled && !dsi.xen_elf_image) { 7.288 + printk("Dom0 is vmx domain!\n"); 7.289 + vmx_dom0 = 1; 7.290 + } 7.291 + 7.292 + p_start = dsi.v_start; 7.293 + pkern_start = dsi.v_kernstart; 7.294 + pkern_end = dsi.v_kernend; 7.295 + pkern_entry = dsi.v_kernentry; 7.296 + 7.297 + printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n", 7.298 + p_start,pkern_start,pkern_end,pkern_entry); 7.299 + 7.300 + if ( (p_start & (PAGE_SIZE-1)) != 0 ) 7.301 + { 7.302 + printk("Initial guest OS must load to a page boundary.\n"); 7.303 + return -EINVAL; 7.304 + } 7.305 + 7.306 + printk("METAPHYSICAL MEMORY ARRANGEMENT:\n" 7.307 + " Kernel image: %lx->%lx\n" 7.308 + " Entry address: %lx\n" 7.309 + " Init. ramdisk: (NOT IMPLEMENTED YET)\n", 7.310 + pkern_start, pkern_end, pkern_entry); 7.311 + 7.312 + if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) ) 7.313 + { 7.314 + printk("Initial guest OS requires too much space\n" 7.315 + "(%luMB is greater than %luMB limit)\n", 7.316 + (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20); 7.317 + return -ENOMEM; 7.318 + } 7.319 + 7.320 + // Other sanity check about Dom0 image 7.321 + 7.322 + /* Construct a frame-allocation list for the initial domain, since these 7.323 + * pages are allocated by boot allocator and pfns are not set properly 7.324 + */ 7.325 + for ( mfn = (alloc_start>>PAGE_SHIFT); 7.326 + mfn < (alloc_end>>PAGE_SHIFT); 7.327 + mfn++ ) 7.328 + { 7.329 + page = &frame_table[mfn]; 7.330 + page_set_owner(page, d); 7.331 + page->u.inuse.type_info = 0; 7.332 + page->count_info = PGC_allocated | 1; 7.333 + list_add_tail(&page->list, &d->page_list); 7.334 + 7.335 + /* Construct 1:1 mapping */ 7.336 + machine_to_phys_mapping[mfn] = mfn; 7.337 + } 7.338 + 7.339 + post_arch_do_create_domain(ed, vmx_dom0); 7.340 + 7.341 + /* Load Dom0 image to its own memory */ 7.342 + loaddomainelfimage(d,image_start); 7.343 + 7.344 + /* Copy the initial ramdisk. */ 7.345 + 7.346 + /* Sync d/i cache conservatively */ 7.347 + ret = ia64_pal_cache_flush(4, 0, &progress, NULL); 7.348 + if (ret != PAL_STATUS_SUCCESS) 7.349 + panic("PAL CACHE FLUSH failed for dom0.\n"); 7.350 + printk("Sync i/d cache for dom0 image SUCC\n"); 7.351 + 7.352 + /* Physical mode emulation initialization, including 7.353 + * emulation ID allcation and related memory request 7.354 + */ 7.355 + physical_mode_init(ed); 7.356 + /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt 7.357 + * for dom0 7.358 + */ 7.359 + d->arch.pmt = NULL; 7.360 + 7.361 + /* Give up the VGA console if DOM0 is configured to grab it. */ 7.362 + if (cmdline != NULL) 7.363 + console_endboot(strstr(cmdline, "tty0") != NULL); 7.364 + 7.365 + /* VMX specific construction for Dom0, if hardware supports VMX 7.366 + * and Dom0 is unmodified image 7.367 + */ 7.368 + printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d); 7.369 + if (vmx_dom0) 7.370 + vmx_final_setup_domain(dom0); 7.371 + 7.372 + /* vpd is ready now */ 7.373 + vlsapic_reset(ed); 7.374 + vtm_init(ed); 7.375 + 7.376 + set_bit(_DOMF_constructed, &d->domain_flags); 7.377 + new_thread(ed, pkern_entry, 0, 0); 7.378 + 7.379 + // FIXME: Hack for keyboard input 7.380 +#ifdef CLONE_DOMAIN0 7.381 +if (d == dom0) 7.382 +#endif 7.383 + serial_input_init(); 7.384 + if (d == dom0) { 7.385 + ed->vcpu_info->arch.delivery_mask[0] = -1L; 7.386 + ed->vcpu_info->arch.delivery_mask[1] = -1L; 7.387 + ed->vcpu_info->arch.delivery_mask[2] = -1L; 7.388 + ed->vcpu_info->arch.delivery_mask[3] = -1L; 7.389 + } 7.390 + else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask); 7.391 + 7.392 + return 0; 7.393 +} 7.394 +#else //CONFIG_VTI 7.395 + 7.396 int construct_dom0(struct domain *d, 7.397 unsigned long image_start, unsigned long image_len, 7.398 unsigned long initrd_start, unsigned long initrd_len, 7.399 @@ -771,6 +1112,7 @@ if (d == dom0) 7.400 7.401 return 0; 7.402 } 7.403 +#endif // CONFIG_VTI 7.404 7.405 // FIXME: When dom0 can construct domains, this goes away (or is rewritten) 7.406 int construct_domU(struct domain *d,
8.1 --- a/xen/arch/ia64/lib/Makefile Mon May 23 15:22:15 2005 +0000 8.2 +++ b/xen/arch/ia64/lib/Makefile Mon May 23 15:29:59 2005 +0000 8.3 @@ -9,7 +9,7 @@ OBJS := __divsi3.o __udivsi3.o __modsi3. 8.4 bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o \ 8.5 clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ 8.6 flush.o ip_fast_csum.o do_csum.o copy_user.o \ 8.7 - memset.o strlen.o memcpy.o swiotlb.o 8.8 + memset.o strlen.o memcpy.o 8.9 8.10 default: $(OBJS) 8.11 $(LD) -r -o ia64lib.o $(OBJS)
10.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 10.2 +++ b/xen/arch/ia64/mmio.c Mon May 23 15:29:59 2005 +0000 10.3 @@ -0,0 +1,325 @@ 10.4 + 10.5 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 10.6 +/* 10.7 + * mmio.c: MMIO emulation components. 10.8 + * Copyright (c) 2004, Intel Corporation. 10.9 + * 10.10 + * This program is free software; you can redistribute it and/or modify it 10.11 + * under the terms and conditions of the GNU General Public License, 10.12 + * version 2, as published by the Free Software Foundation. 10.13 + * 10.14 + * This program is distributed in the hope it will be useful, but WITHOUT 10.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10.16 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10.17 + * more details. 10.18 + * 10.19 + * You should have received a copy of the GNU General Public License along with 10.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 10.21 + * Place - Suite 330, Boston, MA 02111-1307 USA. 10.22 + * 10.23 + * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) 10.24 + * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) 10.25 + */ 10.26 + 10.27 +#include <linux/sched.h> 10.28 +#include <asm/tlb.h> 10.29 +#include <asm/vmx_mm_def.h> 10.30 +#include <asm/gcc_intrin.h> 10.31 +#include <xen/interrupt.h> 10.32 +#include <asm/vmx_vcpu.h> 10.33 + 10.34 +struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base) 10.35 +{ 10.36 + int i; 10.37 + for (i=0; mio_base[i].iot != NOT_IO; i++ ) { 10.38 + if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end ) 10.39 + return &mio_base[i]; 10.40 + } 10.41 + return NULL; 10.42 +} 10.43 + 10.44 + 10.45 +extern void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma); 10.46 +static inline void mmio_write(VCPU *vcpu, void *src, u64 dest_pa, size_t s, int ma) 10.47 +{ 10.48 + struct virutal_platform_def *v_plat; 10.49 + struct mmio_list *mio; 10.50 + 10.51 + v_plat = vmx_vcpu_get_plat(vcpu); 10.52 + mio = lookup_mmio(dest_pa, v_plat->mmio); 10.53 + if ( mio == NULL ) 10.54 + panic ("Wrong address for MMIO\n"); 10.55 + 10.56 + switch (mio->iot) { 10.57 + case PIB_MMIO: 10.58 + pib_write(vcpu, src, dest_pa - v_plat->pib_base, s, ma); 10.59 + break; 10.60 + case VGA_BUFF: 10.61 + case CHIPSET_IO: 10.62 + case LOW_MMIO: 10.63 + case LEGACY_IO: 10.64 + case IO_SAPIC: 10.65 + default: 10.66 + break; 10.67 + } 10.68 + return; 10.69 +} 10.70 + 10.71 +static inline void mmio_read(VCPU *vcpu, u64 src_pa, void *dest, size_t s, int ma) 10.72 +{ 10.73 + struct virutal_platform_def *v_plat; 10.74 + struct mmio_list *mio; 10.75 + 10.76 + v_plat = vmx_vcpu_get_plat(vcpu); 10.77 + mio = lookup_mmio(src_pa, v_plat->mmio); 10.78 + if ( mio == NULL ) 10.79 + panic ("Wrong address for MMIO\n"); 10.80 + 10.81 + switch (mio->iot) { 10.82 + case PIB_MMIO: 10.83 + pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma); 10.84 + break; 10.85 + case VGA_BUFF: 10.86 + case CHIPSET_IO: 10.87 + case LOW_MMIO: 10.88 + case LEGACY_IO: 10.89 + case IO_SAPIC: 10.90 + default: 10.91 + break; 10.92 + } 10.93 + return; 10.94 +} 10.95 + 10.96 +/* 10.97 + * Read or write data in guest virtual address mode. 10.98 + */ 10.99 + 10.100 +void 10.101 +memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s) 10.102 +{ 10.103 + uint64_t pa; 10.104 + 10.105 + if (!vtlb->nomap) 10.106 + panic("Normal memory write shouldn't go to this point!"); 10.107 + pa = PPN_2_PA(vtlb->ppn); 10.108 + pa += POFFSET((u64)dest, vtlb->ps); 10.109 + mmio_write (vcpu, src, pa, s, vtlb->ma); 10.110 +} 10.111 + 10.112 + 10.113 +void 10.114 +memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s) 10.115 +{ 10.116 + uint64_t pa = (uint64_t)dest; 10.117 + int ma; 10.118 + 10.119 + if ( pa & (1UL <<63) ) { 10.120 + // UC 10.121 + ma = 4; 10.122 + pa <<=1; 10.123 + pa >>=1; 10.124 + } 10.125 + else { 10.126 + // WBL 10.127 + ma = 0; // using WB for WBL 10.128 + } 10.129 + mmio_write (vcpu, src, pa, s, ma); 10.130 +} 10.131 + 10.132 +void 10.133 +memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s) 10.134 +{ 10.135 + uint64_t pa; 10.136 + 10.137 + if (!vtlb->nomap) 10.138 + panic("Normal memory write shouldn't go to this point!"); 10.139 + pa = PPN_2_PA(vtlb->ppn); 10.140 + pa += POFFSET((u64)src, vtlb->ps); 10.141 + 10.142 + mmio_read(vcpu, pa, dest, s, vtlb->ma); 10.143 +} 10.144 + 10.145 +void 10.146 +memread_p(VCPU *vcpu, void *src, void *dest, size_t s) 10.147 +{ 10.148 + uint64_t pa = (uint64_t)src; 10.149 + int ma; 10.150 + 10.151 + if ( pa & (1UL <<63) ) { 10.152 + // UC 10.153 + ma = 4; 10.154 + pa <<=1; 10.155 + pa >>=1; 10.156 + } 10.157 + else { 10.158 + // WBL 10.159 + ma = 0; // using WB for WBL 10.160 + } 10.161 + mmio_read(vcpu, pa, dest, s, ma); 10.162 +} 10.163 + 10.164 +#define PIB_LOW_HALF(ofst) !(ofst&(1<<20)) 10.165 +#define PIB_OFST_INTA 0x1E0000 10.166 +#define PIB_OFST_XTP 0x1E0008 10.167 + 10.168 + 10.169 +/* 10.170 + * Deliver IPI message. (Only U-VP is supported now) 10.171 + * offset: address offset to IPI space. 10.172 + * value: deliver value. 10.173 + */ 10.174 +static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector) 10.175 +{ 10.176 +#ifdef IPI_DEBUG 10.177 + printf ("deliver_ipi %lx %lx\n",dm,vector); 10.178 +#endif 10.179 + switch ( dm ) { 10.180 + case 0: // INT 10.181 + vmx_vcpu_pend_interrupt (vcpu, vector); 10.182 + break; 10.183 + case 2: // PMI 10.184 + // TODO -- inject guest PMI 10.185 + panic ("Inject guest PMI!\n"); 10.186 + break; 10.187 + case 4: // NMI 10.188 + vmx_vcpu_pend_interrupt (vcpu, 2); 10.189 + break; 10.190 + case 5: // INIT 10.191 + // TODO -- inject guest INIT 10.192 + panic ("Inject guest INIT!\n"); 10.193 + break; 10.194 + case 7: // ExtINT 10.195 + vmx_vcpu_pend_interrupt (vcpu, 0); 10.196 + break; 10.197 + 10.198 + case 1: 10.199 + case 3: 10.200 + case 6: 10.201 + default: 10.202 + panic ("Deliver reserved IPI!\n"); 10.203 + break; 10.204 + } 10.205 +} 10.206 + 10.207 +/* 10.208 + * TODO: Use hash table for the lookup. 10.209 + */ 10.210 +static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid) 10.211 +{ 10.212 + int i; 10.213 + VCPU *vcpu; 10.214 + LID lid; 10.215 + 10.216 + for (i=0; i<MAX_VIRT_CPUS; i++) { 10.217 + vcpu = d->exec_domain[i]; 10.218 + lid.val = VPD_CR(vcpu, lid); 10.219 + if ( lid.id == id && lid.eid == eid ) { 10.220 + return vcpu; 10.221 + } 10.222 + } 10.223 + return NULL; 10.224 +} 10.225 + 10.226 +/* 10.227 + * execute write IPI op. 10.228 + */ 10.229 +static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value) 10.230 +{ 10.231 + VCPU *target_cpu; 10.232 + 10.233 + target_cpu = lid_2_vcpu(vcpu->domain, 10.234 + ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid); 10.235 + if ( target_cpu == NULL ) panic("Unknown IPI cpu\n"); 10.236 + if ( target_cpu == vcpu ) { 10.237 + // IPI to self 10.238 + deliver_ipi (vcpu, ((ipi_d_t)value).dm, 10.239 + ((ipi_d_t)value).vector); 10.240 + return 1; 10.241 + } 10.242 + else { 10.243 + // TODO: send Host IPI to inject guest SMP IPI interruption 10.244 + panic ("No SM-VP supported!\n"); 10.245 + return 0; 10.246 + } 10.247 +} 10.248 + 10.249 +void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma) 10.250 +{ 10.251 + 10.252 + switch (pib_off) { 10.253 + case PIB_OFST_INTA: 10.254 + panic("Undefined write on PIB INTA\n"); 10.255 + break; 10.256 + case PIB_OFST_XTP: 10.257 + if ( s == 1 && ma == 4 /* UC */) { 10.258 + vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src; 10.259 + } 10.260 + else { 10.261 + panic("Undefined write on PIB XTP\n"); 10.262 + } 10.263 + break; 10.264 + default: 10.265 + if ( PIB_LOW_HALF(pib_off) ) { // lower half 10.266 + if ( s != 8 || ma != 0x4 /* UC */ ) { 10.267 + panic("Undefined IPI-LHF write!\n"); 10.268 + } 10.269 + else { 10.270 + write_ipi(vcpu, pib_off, *(uint64_t *)src); 10.271 + // TODO for SM-VP 10.272 + } 10.273 + } 10.274 + else { // upper half 10.275 + printf("IPI-UHF write %lx\n",pib_off); 10.276 + panic("Not support yet for SM-VP\n"); 10.277 + } 10.278 + break; 10.279 + } 10.280 +} 10.281 + 10.282 +void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma) 10.283 +{ 10.284 + switch (pib_off) { 10.285 + case PIB_OFST_INTA: 10.286 + // todo --- emit on processor system bus. 10.287 + if ( s == 1 && ma == 4) { // 1 byte load 10.288 + // TODO: INTA read from IOSAPIC 10.289 + } 10.290 + else { 10.291 + panic("Undefined read on PIB INTA\n"); 10.292 + } 10.293 + break; 10.294 + case PIB_OFST_XTP: 10.295 + if ( s == 1 && ma == 4) { 10.296 + *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp; 10.297 + } 10.298 + else { 10.299 + panic("Undefined read on PIB XTP\n"); 10.300 + } 10.301 + break; 10.302 + default: 10.303 + if ( PIB_LOW_HALF(pib_off) ) { // lower half 10.304 + if ( s != 8 || ma != 4 ) { 10.305 + panic("Undefined IPI-LHF read!\n"); 10.306 + } 10.307 + else { 10.308 +#ifdef IPI_DEBUG 10.309 + printf("IPI-LHF read %lx\n",pib_off); 10.310 +#endif 10.311 + *(uint64_t *)dest = 0; // TODO for SM-VP 10.312 + } 10.313 + } 10.314 + else { // upper half 10.315 + if ( s != 1 || ma != 4 ) { 10.316 + panic("Undefined PIB-UHF read!\n"); 10.317 + } 10.318 + else { 10.319 +#ifdef IPI_DEBUG 10.320 + printf("IPI-UHF read %lx\n",pib_off); 10.321 +#endif 10.322 + *(uint8_t *)dest = 0; // TODO for SM-VP 10.323 + } 10.324 + } 10.325 + break; 10.326 + } 10.327 +} 10.328 +
11.1 --- a/xen/arch/ia64/patch/linux-2.6.11/entry.S Mon May 23 15:22:15 2005 +0000 11.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.S Mon May 23 15:29:59 2005 +0000 11.3 @@ -1,5 +1,5 @@ 11.4 ---- ../../linux-2.6.11/arch/ia64/kernel/entry.S 2005-03-02 00:37:50.000000000 -0700 11.5 -+++ arch/ia64/entry.S 2005-04-29 14:54:13.000000000 -0600 11.6 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.S 2005-03-01 23:37:50.000000000 -0800 11.7 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/entry.S 2005-05-18 12:40:51.000000000 -0700 11.8 @@ -46,6 +46,7 @@ 11.9 11.10 #include "minstate.h" 11.11 @@ -73,19 +73,23 @@ 11.12 11.13 GLOBAL_ENTRY(ia64_ret_from_clone) 11.14 PT_REGS_UNWIND_INFO(0) 11.15 -@@ -604,6 +626,11 @@ 11.16 +@@ -604,6 +626,15 @@ 11.17 */ 11.18 br.call.sptk.many rp=ia64_invoke_schedule_tail 11.19 } 11.20 +#ifdef XEN 11.21 + // new domains are cloned but not exec'ed so switch to user mode here 11.22 + cmp.ne pKStk,pUStk=r0,r0 11.23 ++#ifdef CONFIG_VTI 11.24 ++ br.cond.spnt ia64_leave_hypervisor 11.25 ++#else // CONFIG_VTI 11.26 + br.cond.spnt ia64_leave_kernel 11.27 ++#endif // CONFIG_VTI 11.28 +#else 11.29 .ret8: 11.30 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 11.31 ;; 11.32 -@@ -614,6 +641,7 @@ 11.33 +@@ -614,6 +645,7 @@ 11.34 ;; 11.35 cmp.ne p6,p0=r2,r0 11.36 (p6) br.cond.spnt .strace_check_retval 11.37 @@ -93,7 +97,7 @@ 11.38 ;; // added stop bits to prevent r8 dependency 11.39 END(ia64_ret_from_clone) 11.40 // fall through 11.41 -@@ -700,19 +728,25 @@ 11.42 +@@ -700,19 +732,25 @@ 11.43 .work_processed_syscall: 11.44 adds r2=PT(LOADRS)+16,r12 11.45 adds r3=PT(AR_BSPSTORE)+16,r12 11.46 @@ -119,7 +123,7 @@ 11.47 ;; 11.48 // start restoring the state saved on the kernel stack (struct pt_regs): 11.49 ld8 r9=[r2],PT(CR_IPSR)-PT(R9) 11.50 -@@ -757,7 +791,11 @@ 11.51 +@@ -757,7 +795,11 @@ 11.52 ;; 11.53 ld8.fill r12=[r2] // restore r12 (sp) 11.54 ld8.fill r15=[r3] // restore r15 11.55 @@ -131,7 +135,7 @@ 11.56 ;; 11.57 (pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 11.58 (pUStk) st1 [r14]=r17 11.59 -@@ -814,9 +852,18 @@ 11.60 +@@ -814,9 +856,18 @@ 11.61 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 11.62 #endif 11.63 .work_processed_kernel: 11.64 @@ -150,7 +154,19 @@ 11.65 adds r21=PT(PR)+16,r12 11.66 ;; 11.67 11.68 -@@ -838,7 +885,9 @@ 11.69 +@@ -828,17 +879,20 @@ 11.70 + ld8 r28=[r2],8 // load b6 11.71 + adds r29=PT(R24)+16,r12 11.72 + 11.73 +- ld8.fill r16=[r3],PT(AR_CSD)-PT(R16) 11.74 ++ ld8.fill r16=[r3] 11.75 + adds r30=PT(AR_CCV)+16,r12 11.76 + (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? 11.77 + ;; 11.78 ++ adds r3=PT(AR_CSD)-PT(R16),r3 11.79 + ld8.fill r24=[r29] 11.80 + ld8 r15=[r30] // load ar.ccv 11.81 + (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending? 11.82 ;; 11.83 ld8 r29=[r2],16 // load b7 11.84 ld8 r30=[r3],16 // load ar.csd 11.85 @@ -160,7 +176,7 @@ 11.86 ;; 11.87 ld8 r31=[r2],16 // load ar.ssd 11.88 ld8.fill r8=[r3],16 11.89 -@@ -934,7 +983,11 @@ 11.90 +@@ -934,7 +988,11 @@ 11.91 shr.u r18=r19,16 // get byte size of existing "dirty" partition 11.92 ;; 11.93 mov r16=ar.bsp // get existing backing store pointer 11.94 @@ -172,7 +188,7 @@ 11.95 ;; 11.96 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 11.97 (pKStk) br.cond.dpnt skip_rbs_switch 11.98 -@@ -1069,6 +1122,7 @@ 11.99 +@@ -1069,6 +1127,7 @@ 11.100 mov pr=r31,-1 // I0 11.101 rfi // B 11.102 11.103 @@ -180,7 +196,7 @@ 11.104 /* 11.105 * On entry: 11.106 * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) 11.107 -@@ -1130,6 +1184,7 @@ 11.108 +@@ -1130,6 +1189,7 @@ 11.109 ld8 r8=[r2] 11.110 ld8 r10=[r3] 11.111 br.cond.sptk.many .work_processed_syscall // re-check 11.112 @@ -188,7 +204,7 @@ 11.113 11.114 END(ia64_leave_kernel) 11.115 11.116 -@@ -1166,6 +1221,7 @@ 11.117 +@@ -1166,6 +1226,7 @@ 11.118 br.ret.sptk.many rp 11.119 END(ia64_invoke_schedule_tail) 11.120 11.121 @@ -196,7 +212,7 @@ 11.122 /* 11.123 * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to 11.124 * be set up by the caller. We declare 8 input registers so the system call 11.125 -@@ -1264,6 +1320,7 @@ 11.126 +@@ -1264,6 +1325,7 @@ 11.127 mov ar.unat=r9 11.128 br.many b7 11.129 END(sys_rt_sigreturn) 11.130 @@ -204,7 +220,7 @@ 11.131 11.132 GLOBAL_ENTRY(ia64_prepare_handle_unaligned) 11.133 .prologue 11.134 -@@ -1278,6 +1335,7 @@ 11.135 +@@ -1278,6 +1340,7 @@ 11.136 br.cond.sptk.many rp // goes to ia64_leave_kernel 11.137 END(ia64_prepare_handle_unaligned) 11.138 11.139 @@ -212,7 +228,7 @@ 11.140 // 11.141 // unw_init_running(void (*callback)(info, arg), void *arg) 11.142 // 11.143 -@@ -1585,3 +1643,4 @@ 11.144 +@@ -1585,3 +1648,4 @@ 11.145 data8 sys_ni_syscall 11.146 11.147 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 12.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.h Mon May 23 15:29:59 2005 +0000 12.3 @@ -0,0 +1,37 @@ 12.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.h 2005-03-01 23:38:07.000000000 -0800 12.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/arch/ia64/entry.h 2005-05-18 14:00:53.000000000 -0700 12.6 +@@ -7,6 +7,12 @@ 12.7 + #define PRED_LEAVE_SYSCALL 1 /* TRUE iff leave from syscall */ 12.8 + #define PRED_KERNEL_STACK 2 /* returning to kernel-stacks? */ 12.9 + #define PRED_USER_STACK 3 /* returning to user-stacks? */ 12.10 ++#ifdef CONFIG_VTI 12.11 ++#define PRED_EMUL 2 /* Need to save r4-r7 for inst emulation */ 12.12 ++#define PRED_NON_EMUL 3 /* No need to save r4-r7 for normal path */ 12.13 ++#define PRED_BN0 6 /* Guest is in bank 0 */ 12.14 ++#define PRED_BN1 7 /* Guest is in bank 1 */ 12.15 ++#endif // CONFIG_VTI 12.16 + #define PRED_SYSCALL 4 /* inside a system call? */ 12.17 + #define PRED_NON_SYSCALL 5 /* complement of PRED_SYSCALL */ 12.18 + 12.19 +@@ -17,12 +23,21 @@ 12.20 + # define pLvSys PASTE(p,PRED_LEAVE_SYSCALL) 12.21 + # define pKStk PASTE(p,PRED_KERNEL_STACK) 12.22 + # define pUStk PASTE(p,PRED_USER_STACK) 12.23 ++#ifdef CONFIG_VTI 12.24 ++# define pEml PASTE(p,PRED_EMUL) 12.25 ++# define pNonEml PASTE(p,PRED_NON_EMUL) 12.26 ++# define pBN0 PASTE(p,PRED_BN0) 12.27 ++# define pBN1 PASTE(p,PRED_BN1) 12.28 ++#endif // CONFIG_VTI 12.29 + # define pSys PASTE(p,PRED_SYSCALL) 12.30 + # define pNonSys PASTE(p,PRED_NON_SYSCALL) 12.31 + #endif 12.32 + 12.33 + #define PT(f) (IA64_PT_REGS_##f##_OFFSET) 12.34 + #define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET) 12.35 ++#ifdef CONFIG_VTI 12.36 ++#define VPD(f) (VPD_##f##_START_OFFSET) 12.37 ++#endif // CONFIG_VTI 12.38 + 12.39 + #define PT_REGS_SAVES(off) \ 12.40 + .unwabi 3, 'i'; \
13.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 13.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h Mon May 23 15:29:59 2005 +0000 13.3 @@ -0,0 +1,69 @@ 13.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/gcc_intrin.h 2005-03-01 23:38:08.000000000 -0800 13.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/gcc_intrin.h 2005-05-18 14:00:53.000000000 -0700 13.6 +@@ -368,6 +368,66 @@ 13.7 + #define ia64_mf() asm volatile ("mf" ::: "memory") 13.8 + #define ia64_mfa() asm volatile ("mf.a" ::: "memory") 13.9 + 13.10 ++#ifdef CONFIG_VTI 13.11 ++/* 13.12 ++ * Flushrs instruction stream. 13.13 ++ */ 13.14 ++#define ia64_flushrs() asm volatile ("flushrs;;":::"memory") 13.15 ++ 13.16 ++#define ia64_loadrs() asm volatile ("loadrs;;":::"memory") 13.17 ++ 13.18 ++#define ia64_get_rsc() \ 13.19 ++({ \ 13.20 ++ unsigned long val; \ 13.21 ++ asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory"); \ 13.22 ++ val; \ 13.23 ++}) 13.24 ++ 13.25 ++#define ia64_set_rsc(val) \ 13.26 ++ asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory") 13.27 ++ 13.28 ++#define ia64_get_bspstore() \ 13.29 ++({ \ 13.30 ++ unsigned long val; \ 13.31 ++ asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory"); \ 13.32 ++ val; \ 13.33 ++}) 13.34 ++ 13.35 ++#define ia64_set_bspstore(val) \ 13.36 ++ asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory") 13.37 ++ 13.38 ++#define ia64_get_rnat() \ 13.39 ++({ \ 13.40 ++ unsigned long val; \ 13.41 ++ asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory"); \ 13.42 ++ val; \ 13.43 ++}) 13.44 ++ 13.45 ++#define ia64_set_rnat(val) \ 13.46 ++ asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory") 13.47 ++ 13.48 ++#define ia64_ttag(addr) \ 13.49 ++({ \ 13.50 ++ __u64 ia64_intri_res; \ 13.51 ++ asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ 13.52 ++ ia64_intri_res; \ 13.53 ++}) 13.54 ++ 13.55 ++#define ia64_get_dcr() \ 13.56 ++({ \ 13.57 ++ __u64 result; \ 13.58 ++ asm volatile ("mov %0=cr.dcr" : "=r"(result) : ); \ 13.59 ++ result; \ 13.60 ++}) 13.61 ++ 13.62 ++#define ia64_set_dcr(val) \ 13.63 ++({ \ 13.64 ++ asm volatile ("mov cr.dcr=%0" :: "r"(val) ); \ 13.65 ++}) 13.66 ++ 13.67 ++#endif // CONFIG_VTI 13.68 ++ 13.69 ++ 13.70 + #define ia64_invala() asm volatile ("invala" ::: "memory") 13.71 + 13.72 + #define ia64_thash(addr) \
14.1 --- a/xen/arch/ia64/patch/linux-2.6.11/head.S Mon May 23 15:22:15 2005 +0000 14.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/head.S Mon May 23 15:29:59 2005 +0000 14.3 @@ -1,6 +1,62 @@ 14.4 ---- ../../linux-2.6.11/arch/ia64/kernel/head.S 2005-03-02 00:38:13.000000000 -0700 14.5 -+++ arch/ia64/head.S 2005-04-28 10:51:19.000000000 -0600 14.6 -@@ -187,7 +187,11 @@ 14.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/head.S 2005-03-01 23:38:13.000000000 -0800 14.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/head.S 2005-05-18 12:40:50.000000000 -0700 14.9 +@@ -76,21 +76,21 @@ 14.10 + * We initialize all of them to prevent inadvertently assuming 14.11 + * something about the state of address translation early in boot. 14.12 + */ 14.13 +- mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.14 ++ movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.15 + movl r7=(0<<61) 14.16 +- mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.17 ++ movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.18 + movl r9=(1<<61) 14.19 +- mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.20 ++ movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.21 + movl r11=(2<<61) 14.22 +- mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.23 ++ movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.24 + movl r13=(3<<61) 14.25 +- mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.26 ++ movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.27 + movl r15=(4<<61) 14.28 +- mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.29 ++ movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 14.30 + movl r17=(5<<61) 14.31 +- mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) 14.32 ++ movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) 14.33 + movl r19=(6<<61) 14.34 +- mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) 14.35 ++ movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) 14.36 + movl r21=(7<<61) 14.37 + ;; 14.38 + mov rr[r7]=r6 14.39 +@@ -129,8 +129,13 @@ 14.40 + /* 14.41 + * Switch into virtual mode: 14.42 + */ 14.43 ++#ifdef CONFIG_VTI 14.44 ++ movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \ 14.45 ++ |IA64_PSR_DI) 14.46 ++#else // CONFIG_VTI 14.47 + movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ 14.48 + |IA64_PSR_DI) 14.49 ++#endif // CONFIG_VTI 14.50 + ;; 14.51 + mov cr.ipsr=r16 14.52 + movl r17=1f 14.53 +@@ -143,7 +148,11 @@ 14.54 + 1: // now we are in virtual mode 14.55 + 14.56 + // set IVT entry point---can't access I/O ports without it 14.57 ++#ifdef CONFIG_VTI 14.58 ++ movl r3=vmx_ia64_ivt 14.59 ++#else // CONFIG_VTI 14.60 + movl r3=ia64_ivt 14.61 ++#endif // CONFIG_VTI 14.62 + ;; 14.63 + mov cr.iva=r3 14.64 + movl r2=FPSR_DEFAULT 14.65 +@@ -187,7 +196,11 @@ 14.66 dep r18=0,r3,0,12 14.67 ;; 14.68 or r18=r17,r18 14.69 @@ -12,7 +68,23 @@ 14.70 ;; 14.71 mov r17=rr[r2] 14.72 shr.u r16=r3,IA64_GRANULE_SHIFT 14.73 -@@ -227,7 +231,11 @@ 14.74 +@@ -207,8 +220,15 @@ 14.75 + 14.76 + .load_current: 14.77 + // load the "current" pointer (r13) and ar.k6 with the current task 14.78 ++#ifdef CONFIG_VTI 14.79 ++ mov r21=r2 // virtual address 14.80 ++ ;; 14.81 ++ bsw.1 14.82 ++ ;; 14.83 ++#else // CONFIG_VTI 14.84 + mov IA64_KR(CURRENT)=r2 // virtual address 14.85 + mov IA64_KR(CURRENT_STACK)=r16 14.86 ++#endif // CONFIG_VTI 14.87 + mov r13=r2 14.88 + /* 14.89 + * Reserve space at the top of the stack for "struct pt_regs". Kernel threads 14.90 +@@ -227,7 +247,11 @@ 14.91 ;; 14.92 mov ar.rsc=0x3 // place RSE in eager mode 14.93 14.94 @@ -24,7 +96,7 @@ 14.95 (isBP) movl r2=ia64_boot_param 14.96 ;; 14.97 (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader 14.98 -@@ -254,7 +262,9 @@ 14.99 +@@ -254,7 +278,9 @@ 14.100 br.call.sptk.many b0=console_print 14.101 14.102 self: hint @pause 14.103 @@ -34,7 +106,7 @@ 14.104 END(_start) 14.105 14.106 GLOBAL_ENTRY(ia64_save_debug_regs) 14.107 -@@ -850,7 +860,11 @@ 14.108 +@@ -850,7 +876,11 @@ 14.109 * intermediate precision so that we can produce a full 64-bit result. 14.110 */ 14.111 GLOBAL_ENTRY(sched_clock)
15.1 --- a/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h Mon May 23 15:22:15 2005 +0000 15.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h Mon May 23 15:29:59 2005 +0000 15.3 @@ -1,10 +1,5 @@ 15.4 - hpsim_ssc.h | 19 +++++++++++++++++++ 15.5 - 1 files changed, 19 insertions(+) 15.6 - 15.7 -Index: linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h 15.8 -=================================================================== 15.9 ---- linux-2.6.11.orig/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-02 01:38:17.000000000 -0600 15.10 -+++ linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-19 13:34:01.705520375 -0600 15.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-01 23:38:17.000000000 -0800 15.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/hpsim_ssc.h 2005-05-18 12:40:19.000000000 -0700 15.13 @@ -33,4 +33,23 @@ 15.14 */ 15.15 extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
16.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 16.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ia64regs.h Mon May 23 15:29:59 2005 +0000 16.3 @@ -0,0 +1,38 @@ 16.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ia64regs.h 2005-03-01 23:38:07.000000000 -0800 16.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ia64regs.h 2005-05-18 14:00:53.000000000 -0700 16.6 +@@ -87,6 +87,35 @@ 16.7 + #define _IA64_REG_CR_LRR0 4176 16.8 + #define _IA64_REG_CR_LRR1 4177 16.9 + 16.10 ++#ifdef CONFIG_VTI 16.11 ++#define IA64_REG_CR_DCR 0 16.12 ++#define IA64_REG_CR_ITM 1 16.13 ++#define IA64_REG_CR_IVA 2 16.14 ++#define IA64_REG_CR_PTA 8 16.15 ++#define IA64_REG_CR_IPSR 16 16.16 ++#define IA64_REG_CR_ISR 17 16.17 ++#define IA64_REG_CR_IIP 19 16.18 ++#define IA64_REG_CR_IFA 20 16.19 ++#define IA64_REG_CR_ITIR 21 16.20 ++#define IA64_REG_CR_IIPA 22 16.21 ++#define IA64_REG_CR_IFS 23 16.22 ++#define IA64_REG_CR_IIM 24 16.23 ++#define IA64_REG_CR_IHA 25 16.24 ++#define IA64_REG_CR_LID 64 16.25 ++#define IA64_REG_CR_IVR 65 16.26 ++#define IA64_REG_CR_TPR 66 16.27 ++#define IA64_REG_CR_EOI 67 16.28 ++#define IA64_REG_CR_IRR0 68 16.29 ++#define IA64_REG_CR_IRR1 69 16.30 ++#define IA64_REG_CR_IRR2 70 16.31 ++#define IA64_REG_CR_IRR3 71 16.32 ++#define IA64_REG_CR_ITV 72 16.33 ++#define IA64_REG_CR_PMV 73 16.34 ++#define IA64_REG_CR_CMCV 74 16.35 ++#define IA64_REG_CR_LRR0 80 16.36 ++#define IA64_REG_CR_LRR1 81 16.37 ++#endif // CONFIG_VTI 16.38 ++ 16.39 + /* Indirect Registers for getindreg() and setindreg() */ 16.40 + 16.41 + #define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
17.1 --- a/xen/arch/ia64/patch/linux-2.6.11/interrupt.h Mon May 23 15:22:15 2005 +0000 17.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/interrupt.h Mon May 23 15:29:59 2005 +0000 17.3 @@ -1,11 +1,6 @@ 17.4 - interrupt.h | 2 ++ 17.5 - 1 files changed, 2 insertions(+) 17.6 - 17.7 -Index: linux-2.6.11/include/linux/interrupt.h 17.8 -=================================================================== 17.9 ---- linux-2.6.11.orig/include/linux/interrupt.h 2005-03-02 01:38:09.000000000 -0600 17.10 -+++ linux-2.6.11/include/linux/interrupt.h 2005-03-19 13:41:00.739901125 -0600 17.11 -@@ -33,6 +33,7 @@ typedef int irqreturn_t; 17.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/linux/interrupt.h 2005-03-01 23:38:09.000000000 -0800 17.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/linux/interrupt.h 2005-05-18 12:40:50.000000000 -0700 17.14 +@@ -33,6 +33,7 @@ 17.15 #define IRQ_HANDLED (1) 17.16 #define IRQ_RETVAL(x) ((x) != 0) 17.17 17.18 @@ -13,7 +8,7 @@ Index: linux-2.6.11/include/linux/interr 17.19 struct irqaction { 17.20 irqreturn_t (*handler)(int, void *, struct pt_regs *); 17.21 unsigned long flags; 17.22 -@@ -49,6 +50,7 @@ extern int request_irq(unsigned int, 17.23 +@@ -49,6 +50,7 @@ 17.24 irqreturn_t (*handler)(int, void *, struct pt_regs *), 17.25 unsigned long, const char *, void *); 17.26 extern void free_irq(unsigned int, void *); 17.27 @@ -21,3 +16,12 @@ Index: linux-2.6.11/include/linux/interr 17.28 17.29 17.30 #ifdef CONFIG_GENERIC_HARDIRQS 17.31 +@@ -121,7 +123,7 @@ 17.32 + }; 17.33 + 17.34 + asmlinkage void do_softirq(void); 17.35 +-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); 17.36 ++//extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); 17.37 + extern void softirq_init(void); 17.38 + #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) 17.39 + extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
18.1 --- a/xen/arch/ia64/patch/linux-2.6.11/io.h Mon May 23 15:22:15 2005 +0000 18.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/io.h Mon May 23 15:29:59 2005 +0000 18.3 @@ -1,16 +1,11 @@ 18.4 - io.h | 4 ++++ 18.5 - 1 files changed, 4 insertions(+) 18.6 - 18.7 -Index: linux-2.6.11/include/asm-ia64/io.h 18.8 -=================================================================== 18.9 ---- linux-2.6.11.orig/include/asm-ia64/io.h 2005-03-02 01:38:34.000000000 -0600 18.10 -+++ linux-2.6.11/include/asm-ia64/io.h 2005-03-19 13:42:06.541900818 -0600 18.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/io.h 2005-03-01 23:38:34.000000000 -0800 18.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/io.h 2005-05-18 12:40:50.000000000 -0700 18.13 @@ -23,7 +23,11 @@ 18.14 #define __SLOW_DOWN_IO do { } while (0) 18.15 #define SLOW_DOWN_IO do { } while (0) 18.16 18.17 +#ifdef XEN 18.18 -+#define __IA64_UNCACHED_OFFSET 0xdffc000000000000UL /* region 6 */ 18.19 ++#define __IA64_UNCACHED_OFFSET 0xd000000000000000UL /* region 6 */ 18.20 +#else 18.21 #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */ 18.22 +#endif
19.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Mon May 23 15:22:15 2005 +0000 19.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Mon May 23 15:29:59 2005 +0000 19.3 @@ -1,5 +1,5 @@ 19.4 ---- ../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c 2005-03-02 00:38:07.000000000 -0700 19.5 -+++ arch/ia64/irq_ia64.c 2005-04-29 16:05:30.000000000 -0600 19.6 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c 2005-03-01 23:38:07.000000000 -0800 19.7 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/irq_ia64.c 2005-05-18 12:40:51.000000000 -0700 19.8 @@ -106,6 +106,9 @@ 19.9 unsigned long saved_tpr; 19.10 19.11 @@ -20,3 +20,99 @@ 19.12 __do_IRQ(local_vector_to_irq(vector), regs); 19.13 19.14 /* 19.15 +@@ -167,6 +173,95 @@ 19.16 + irq_exit(); 19.17 + } 19.18 + 19.19 ++#ifdef CONFIG_VTI 19.20 ++/* 19.21 ++ * That's where the IVT branches when we get an external 19.22 ++ * interrupt. This branches to the correct hardware IRQ handler via 19.23 ++ * function ptr. 19.24 ++ */ 19.25 ++void 19.26 ++vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) 19.27 ++{ 19.28 ++ unsigned long saved_tpr; 19.29 ++ int wake_dom0 = 0; 19.30 ++ 19.31 ++ 19.32 ++#if IRQ_DEBUG 19.33 ++ { 19.34 ++ unsigned long bsp, sp; 19.35 ++ 19.36 ++ /* 19.37 ++ * Note: if the interrupt happened while executing in 19.38 ++ * the context switch routine (ia64_switch_to), we may 19.39 ++ * get a spurious stack overflow here. This is 19.40 ++ * because the register and the memory stack are not 19.41 ++ * switched atomically. 19.42 ++ */ 19.43 ++ bsp = ia64_getreg(_IA64_REG_AR_BSP); 19.44 ++ sp = ia64_getreg(_IA64_REG_AR_SP); 19.45 ++ 19.46 ++ if ((sp - bsp) < 1024) { 19.47 ++ static unsigned char count; 19.48 ++ static long last_time; 19.49 ++ 19.50 ++ if (jiffies - last_time > 5*HZ) 19.51 ++ count = 0; 19.52 ++ if (++count < 5) { 19.53 ++ last_time = jiffies; 19.54 ++ printk("ia64_handle_irq: DANGER: less than " 19.55 ++ "1KB of free stack space!!\n" 19.56 ++ "(bsp=0x%lx, sp=%lx)\n", bsp, sp); 19.57 ++ } 19.58 ++ } 19.59 ++ } 19.60 ++#endif /* IRQ_DEBUG */ 19.61 ++ 19.62 ++ /* 19.63 ++ * Always set TPR to limit maximum interrupt nesting depth to 19.64 ++ * 16 (without this, it would be ~240, which could easily lead 19.65 ++ * to kernel stack overflows). 19.66 ++ */ 19.67 ++ irq_enter(); 19.68 ++ saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 19.69 ++ ia64_srlz_d(); 19.70 ++ while (vector != IA64_SPURIOUS_INT_VECTOR) { 19.71 ++ if (!IS_RESCHEDULE(vector)) { 19.72 ++ ia64_setreg(_IA64_REG_CR_TPR, vector); 19.73 ++ ia64_srlz_d(); 19.74 ++ 19.75 ++ if (vector != IA64_TIMER_VECTOR) { 19.76 ++ /* FIXME: Leave IRQ re-route later */ 19.77 ++ vmx_vcpu_pend_interrupt(dom0->exec_domain[0],vector); 19.78 ++ wake_dom0 = 1; 19.79 ++ } 19.80 ++ else { // FIXME: Handle Timer only now 19.81 ++ __do_IRQ(local_vector_to_irq(vector), regs); 19.82 ++ } 19.83 ++ 19.84 ++ /* 19.85 ++ * Disable interrupts and send EOI: 19.86 ++ */ 19.87 ++ local_irq_disable(); 19.88 ++ ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); 19.89 ++ } 19.90 ++ else { 19.91 ++ printf("Oops: RESCHEDULE IPI absorbed by HV\n"); 19.92 ++ } 19.93 ++ ia64_eoi(); 19.94 ++ vector = ia64_get_ivr(); 19.95 ++ } 19.96 ++ /* 19.97 ++ * This must be done *after* the ia64_eoi(). For example, the keyboard softirq 19.98 ++ * handler needs to be able to wait for further keyboard interrupts, which can't 19.99 ++ * come through until ia64_eoi() has been done. 19.100 ++ */ 19.101 ++ irq_exit(); 19.102 ++ if ( wake_dom0 && current != dom0 ) 19.103 ++ domain_wake(dom0->exec_domain[0]); 19.104 ++} 19.105 ++#endif 19.106 ++ 19.107 ++ 19.108 + #ifdef CONFIG_HOTPLUG_CPU 19.109 + /* 19.110 + * This function emulates a interrupt processing when a cpu is about to be
20.1 --- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h Mon May 23 15:22:15 2005 +0000 20.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/kregs.h Mon May 23 15:29:59 2005 +0000 20.3 @@ -1,18 +1,65 @@ 20.4 - kregs.h | 4 ++++ 20.5 - 1 files changed, 4 insertions(+) 20.6 - 20.7 -Index: linux-2.6.11/include/asm-ia64/kregs.h 20.8 -=================================================================== 20.9 ---- linux-2.6.11.orig/include/asm-ia64/kregs.h 2005-03-02 01:37:49.000000000 -0600 20.10 -+++ linux-2.6.11/include/asm-ia64/kregs.h 2005-03-19 13:44:24.362628092 -0600 20.11 -@@ -31,6 +31,10 @@ 20.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h 2005-03-01 23:37:49.000000000 -0800 20.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h 2005-05-18 12:40:50.000000000 -0700 20.14 +@@ -29,8 +29,20 @@ 20.15 + */ 20.16 + #define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */ 20.17 #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ 20.18 ++#ifdef CONFIG_VTI 20.19 ++#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen image in domain space */ 20.20 ++#endif // CONFIG_VTI 20.21 #define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */ 20.22 #define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */ 20.23 +#ifdef XEN 20.24 +#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ 20.25 +#define IA64_TR_VHPT 4 /* dtr4: vhpt */ 20.26 ++#ifdef CONFIG_VTI 20.27 ++#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table in domain space */ 20.28 ++#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch stub */ 20.29 ++#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest physical memory 256M */ 20.30 ++#endif // CONFIG_VTI 20.31 +#endif 20.32 20.33 /* Processor status register bits: */ 20.34 #define IA64_PSR_BE_BIT 1 20.35 +@@ -66,6 +78,9 @@ 20.36 + #define IA64_PSR_ED_BIT 43 20.37 + #define IA64_PSR_BN_BIT 44 20.38 + #define IA64_PSR_IA_BIT 45 20.39 ++#ifdef CONFIG_VTI 20.40 ++#define IA64_PSR_VM_BIT 46 20.41 ++#endif // CONFIG_VTI 20.42 + 20.43 + /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an 20.44 + execve(). Only list flags here that need to be cleared/set for BOTH clone2() and 20.45 +@@ -107,6 +122,9 @@ 20.46 + #define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT) 20.47 + #define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT) 20.48 + #define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT) 20.49 ++#ifdef CONFIG_VTI 20.50 ++#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) 20.51 ++#endif // CONFIG_VTI 20.52 + 20.53 + /* User mask bits: */ 20.54 + #define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH) 20.55 +@@ -160,4 +178,21 @@ 20.56 + #define IA64_ISR_CODE_LFETCH 4 20.57 + #define IA64_ISR_CODE_PROBEF 5 20.58 + 20.59 ++#ifdef CONFIG_VTI 20.60 ++/* Interruption Function State */ 20.61 ++#define IA64_IFS_V_BIT 63 20.62 ++#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) 20.63 ++ 20.64 ++/* Page Table Address */ 20.65 ++#define IA64_PTA_VE_BIT 0 20.66 ++#define IA64_PTA_SIZE_BIT 2 20.67 ++#define IA64_PTA_VF_BIT 8 20.68 ++#define IA64_PTA_BASE_BIT 15 20.69 ++ 20.70 ++#define IA64_PTA_VE (__IA64_UL(1) << IA64_PTA_VE_BIT) 20.71 ++#define IA64_PTA_SIZE (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT) 20.72 ++#define IA64_PTA_VF (__IA64_UL(1) << IA64_PTA_VF_BIT) 20.73 ++#define IA64_PTA_BASE (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT))) 20.74 ++#endif // CONFIG_VTI 20.75 ++ 20.76 + #endif /* _ASM_IA64_kREGS_H */
21.1 --- a/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h Mon May 23 15:22:15 2005 +0000 21.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h Mon May 23 15:29:59 2005 +0000 21.3 @@ -1,10 +1,5 @@ 21.4 - mca_asm.h | 11 +++++++++++ 21.5 - 1 files changed, 11 insertions(+) 21.6 - 21.7 -Index: linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h 21.8 -=================================================================== 21.9 ---- linux-2.6.11-xendiffs.orig/include/asm-ia64/mca_asm.h 2005-03-02 01:38:38.000000000 -0600 21.10 -+++ linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h 2005-04-06 22:41:57.392411032 -0500 21.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/mca_asm.h 2005-03-01 23:38:38.000000000 -0800 21.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/mca_asm.h 2005-05-18 12:40:19.000000000 -0700 21.13 @@ -26,8 +26,13 @@ 21.14 * direct mapped to physical addresses. 21.15 * 1. Lop off bits 61 thru 63 in the virtual address
22.1 --- a/xen/arch/ia64/patch/linux-2.6.11/page.h Mon May 23 15:22:15 2005 +0000 22.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/page.h Mon May 23 15:29:59 2005 +0000 22.3 @@ -1,6 +1,14 @@ 22.4 ---- ../../linux-2.6.11/include/asm-ia64/page.h 2005-03-02 00:37:48.000000000 -0700 22.5 -+++ include/asm-ia64/page.h 2005-05-02 11:25:33.000000000 -0600 22.6 -@@ -95,9 +95,15 @@ 22.7 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/page.h 2005-03-01 23:37:48.000000000 -0800 22.8 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/page.h 2005-05-20 09:36:02.000000000 -0700 22.9 +@@ -32,6 +32,7 @@ 22.10 + #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) 22.11 + 22.12 + #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ 22.13 ++ 22.14 + #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) 22.15 + 22.16 + #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ 22.17 +@@ -95,9 +96,15 @@ 22.18 #endif 22.19 22.20 #ifndef CONFIG_DISCONTIGMEM 22.21 @@ -16,7 +24,7 @@ 22.22 #else 22.23 extern struct page *vmem_map; 22.24 extern unsigned long max_low_pfn; 22.25 -@@ -109,6 +115,11 @@ 22.26 +@@ -109,6 +116,11 @@ 22.27 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 22.28 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 22.29 22.30 @@ -28,7 +36,7 @@ 22.31 typedef union ia64_va { 22.32 struct { 22.33 unsigned long off : 61; /* intra-region offset */ 22.34 -@@ -124,8 +135,23 @@ 22.35 +@@ -124,8 +136,23 @@ 22.36 * expressed in this way to ensure they result in a single "dep" 22.37 * instruction. 22.38 */ 22.39 @@ -52,7 +60,7 @@ 22.40 22.41 #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) 22.42 #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) 22.43 -@@ -197,7 +223,11 @@ 22.44 +@@ -197,7 +224,11 @@ 22.45 # define __pgprot(x) (x) 22.46 #endif /* !STRICT_MM_TYPECHECKS */ 22.47
23.1 --- a/xen/arch/ia64/patch/linux-2.6.11/pal.S Mon May 23 15:22:15 2005 +0000 23.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/pal.S Mon May 23 15:29:59 2005 +0000 23.3 @@ -1,11 +1,6 @@ 23.4 - pal.S | 8 ++++++++ 23.5 - 1 files changed, 8 insertions(+) 23.6 - 23.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S 23.8 -=================================================================== 23.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/pal.S 2005-03-02 01:38:33.000000000 -0600 23.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S 2005-04-06 22:43:53.817885390 -0500 23.11 -@@ -166,7 +166,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) 23.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/pal.S 2005-03-01 23:38:33.000000000 -0800 23.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/pal.S 2005-05-18 12:40:19.000000000 -0700 23.14 +@@ -166,7 +166,11 @@ 23.15 adds r8 = 1f-1b,r8 // calculate return address for call 23.16 ;; 23.17 mov loc4=ar.rsc // save RSE configuration 23.18 @@ -17,7 +12,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 23.19 tpa r8=r8 // convert rp to physical 23.20 ;; 23.21 mov b7 = loc2 // install target to branch reg 23.22 -@@ -225,7 +229,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) 23.23 +@@ -225,7 +229,11 @@ 23.24 mov loc3 = psr // save psr 23.25 ;; 23.26 mov loc4=ar.rsc // save RSE configuration
24.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 24.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/pal.h Mon May 23 15:29:59 2005 +0000 24.3 @@ -0,0 +1,12 @@ 24.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/pal.h 2005-03-01 23:38:13.000000000 -0800 24.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/pal.h 2005-05-18 14:00:53.000000000 -0700 24.6 +@@ -1559,6 +1559,9 @@ 24.7 + return iprv.status; 24.8 + } 24.9 + 24.10 ++#ifdef CONFIG_VTI 24.11 ++#include <asm/vmx_pal.h> 24.12 ++#endif // CONFIG_VTI 24.13 + #endif /* __ASSEMBLY__ */ 24.14 + 24.15 + #endif /* _ASM_IA64_PAL_H */
25.1 --- a/xen/arch/ia64/patch/linux-2.6.11/processor.h Mon May 23 15:22:15 2005 +0000 25.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/processor.h Mon May 23 15:29:59 2005 +0000 25.3 @@ -1,17 +1,30 @@ 25.4 - processor.h | 4 ++++ 25.5 - 1 files changed, 4 insertions(+) 25.6 - 25.7 -Index: linux-2.6.11/include/asm-ia64/processor.h 25.8 -=================================================================== 25.9 ---- linux-2.6.11.orig/include/asm-ia64/processor.h 2005-03-02 01:37:58.000000000 -0600 25.10 -+++ linux-2.6.11/include/asm-ia64/processor.h 2005-03-19 14:26:01.062135543 -0600 25.11 -@@ -408,12 +408,16 @@ extern void ia64_setreg_unknown_kr (void 25.12 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/processor.h 2005-03-01 23:37:58.000000000 -0800 25.13 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/processor.h 2005-05-20 09:36:02.000000000 -0700 25.14 +@@ -94,7 +94,11 @@ 25.15 + #ifdef CONFIG_NUMA 25.16 + #include <asm/nodedata.h> 25.17 + #endif 25.18 ++#ifdef XEN 25.19 ++#include <asm/xenprocessor.h> 25.20 ++#endif 25.21 + 25.22 ++#ifndef XEN 25.23 + /* like above but expressed as bitfields for more efficient access: */ 25.24 + struct ia64_psr { 25.25 + __u64 reserved0 : 1; 25.26 +@@ -133,6 +137,7 @@ 25.27 + __u64 bn : 1; 25.28 + __u64 reserved4 : 19; 25.29 + }; 25.30 ++#endif 25.31 + 25.32 + /* 25.33 + * CPU type, hardware bug flags, and per-CPU state. Frequently used 25.34 +@@ -408,12 +413,14 @@ 25.35 */ 25.36 25.37 /* Return TRUE if task T owns the fph partition of the CPU we're running on. */ 25.38 -+#ifdef XEN 25.39 -+#define ia64_is_local_fpu_owner(t) 0 25.40 -+#else 25.41 ++#ifndef XEN 25.42 #define ia64_is_local_fpu_owner(t) \ 25.43 ({ \ 25.44 struct task_struct *__ia64_islfo_task = (t); \
26.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 26.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h Mon May 23 15:29:59 2005 +0000 26.3 @@ -0,0 +1,20 @@ 26.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ptrace.h 2005-03-01 23:38:38.000000000 -0800 26.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ptrace.h 2005-05-18 14:00:53.000000000 -0700 26.6 +@@ -95,6 +95,9 @@ 26.7 + * (because the memory stack pointer MUST ALWAYS be aligned this way) 26.8 + * 26.9 + */ 26.10 ++#ifdef CONFIG_VTI 26.11 ++#include "vmx_ptrace.h" 26.12 ++#else //CONFIG_VTI 26.13 + struct pt_regs { 26.14 + /* The following registers are saved by SAVE_MIN: */ 26.15 + unsigned long b6; /* scratch */ 26.16 +@@ -170,6 +173,7 @@ 26.17 + struct ia64_fpreg f10; /* scratch */ 26.18 + struct ia64_fpreg f11; /* scratch */ 26.19 + }; 26.20 ++#endif // CONFIG_VTI 26.21 + 26.22 + /* 26.23 + * This structure contains the addition registers that need to
27.1 --- a/xen/arch/ia64/patch/linux-2.6.11/setup.c Mon May 23 15:22:15 2005 +0000 27.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/setup.c Mon May 23 15:29:59 2005 +0000 27.3 @@ -1,6 +1,16 @@ 27.4 ---- ../../linux-2.6.11/arch/ia64/kernel/setup.c 2005-03-02 00:37:49.000000000 -0700 27.5 -+++ arch/ia64/setup.c 2005-05-02 10:04:03.000000000 -0600 27.6 -@@ -127,7 +127,16 @@ 27.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/setup.c 2005-03-01 23:37:49.000000000 -0800 27.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/setup.c 2005-05-18 12:40:50.000000000 -0700 27.9 +@@ -51,6 +51,9 @@ 27.10 + #include <asm/smp.h> 27.11 + #include <asm/system.h> 27.12 + #include <asm/unistd.h> 27.13 ++#ifdef CONFIG_VTI 27.14 ++#include <asm/vmx.h> 27.15 ++#endif // CONFIG_VTI 27.16 + 27.17 + #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 27.18 + # error "struct cpuinfo_ia64 too big!" 27.19 +@@ -127,7 +130,16 @@ 27.20 range_end = min(end, rsvd_region[i].start); 27.21 27.22 if (range_start < range_end) 27.23 @@ -17,7 +27,7 @@ 27.24 27.25 /* nothing more available in this segment */ 27.26 if (range_end == end) return 0; 27.27 -@@ -185,7 +194,12 @@ 27.28 +@@ -185,7 +197,12 @@ 27.29 n++; 27.30 27.31 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 27.32 @@ -30,7 +40,7 @@ 27.33 n++; 27.34 27.35 #ifdef CONFIG_BLK_DEV_INITRD 27.36 -@@ -299,7 +313,11 @@ 27.37 +@@ -299,7 +316,11 @@ 27.38 } 27.39 27.40 void __init 27.41 @@ -42,7 +52,7 @@ 27.42 { 27.43 unw_init(); 27.44 27.45 -@@ -308,8 +326,14 @@ 27.46 +@@ -308,8 +329,14 @@ 27.47 *cmdline_p = __va(ia64_boot_param->command_line); 27.48 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 27.49 27.50 @@ -57,7 +67,7 @@ 27.51 27.52 #ifdef CONFIG_IA64_GENERIC 27.53 { 27.54 -@@ -351,8 +375,17 @@ 27.55 +@@ -351,8 +378,18 @@ 27.56 # endif 27.57 #endif /* CONFIG_APCI_BOOT */ 27.58 27.59 @@ -71,11 +81,23 @@ 27.60 +late_setup_arch (char **cmdline_p) 27.61 +{ 27.62 +#undef CONFIG_ACPI_BOOT 27.63 ++ acpi_table_init(); 27.64 +#endif 27.65 /* process SAL system table: */ 27.66 ia64_sal_init(efi.sal_systab); 27.67 27.68 -@@ -492,12 +525,14 @@ 27.69 +@@ -360,6 +397,10 @@ 27.70 + cpu_physical_id(0) = hard_smp_processor_id(); 27.71 + #endif 27.72 + 27.73 ++#ifdef CONFIG_VTI 27.74 ++ identify_vmx_feature(); 27.75 ++#endif // CONFIG_VTI 27.76 ++ 27.77 + cpu_init(); /* initialize the bootstrap CPU */ 27.78 + 27.79 + #ifdef CONFIG_ACPI_BOOT 27.80 +@@ -492,12 +533,14 @@ 27.81 { 27.82 } 27.83 27.84 @@ -90,7 +112,20 @@ 27.85 27.86 void 27.87 identify_cpu (struct cpuinfo_ia64 *c) 27.88 -@@ -659,7 +694,11 @@ 27.89 +@@ -551,6 +594,12 @@ 27.90 + } 27.91 + c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 27.92 + c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 27.93 ++ 27.94 ++#ifdef CONFIG_VTI 27.95 ++ /* If vmx feature is on, do necessary initialization for vmx */ 27.96 ++ if (vmx_enabled) 27.97 ++ vmx_init_env(); 27.98 ++#endif 27.99 + } 27.100 + 27.101 + void 27.102 +@@ -659,7 +708,11 @@ 27.103 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 27.104 atomic_inc(&init_mm.mm_count); 27.105 current->active_mm = &init_mm;
28.1 --- a/xen/arch/ia64/patch/linux-2.6.11/swiotlb.c Mon May 23 15:22:15 2005 +0000 28.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 28.3 @@ -1,29 +0,0 @@ 28.4 ---- ../../linux-2.6.11/arch/ia64/lib/swiotlb.c 2005-03-02 00:38:17.000000000 -0700 28.5 -+++ arch/ia64/lib/swiotlb.c 2005-05-02 13:04:15.000000000 -0600 28.6 -@@ -49,6 +49,15 @@ 28.7 - */ 28.8 - #define IO_TLB_SHIFT 11 28.9 - 28.10 -+#ifdef XEN 28.11 -+#define __order_to_size(_order) (1 << (_order+PAGE_SHIFT)) 28.12 -+#define alloc_bootmem_low_pages(_x) alloc_xenheap_pages(get_order(_x)) 28.13 -+#define alloc_bootmem_low(_x) alloc_xenheap_pages(get_order(_x)) 28.14 -+#define alloc_bootmem(_x) alloc_xenheap_pages(get_order(_x)) 28.15 -+#define __get_free_pages(_x,_y) alloc_xenheap_pages(__order_to_size(_y)) 28.16 -+#define free_pages(_x,_y) free_xenheap_pages(_x,_y) 28.17 -+#endif 28.18 -+ 28.19 - int swiotlb_force; 28.20 - 28.21 - /* 28.22 -@@ -388,8 +397,10 @@ 28.23 - * When the mapping is small enough return a static buffer to limit 28.24 - * the damage, or panic when the transfer is too big. 28.25 - */ 28.26 -+#ifndef XEN 28.27 - printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at " 28.28 - "device %s\n", size, dev ? dev->bus_id : "?"); 28.29 -+#endif 28.30 - 28.31 - if (size > io_tlb_overflow && do_panic) { 28.32 - if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
29.1 --- a/xen/arch/ia64/patch/linux-2.6.11/system.h Mon May 23 15:22:15 2005 +0000 29.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/system.h Mon May 23 15:29:59 2005 +0000 29.3 @@ -1,32 +1,38 @@ 29.4 ---- ../../linux-2.6.11/include/asm-ia64/system.h 2005-03-02 00:38:07.000000000 -0700 29.5 -+++ include/asm-ia64/system.h 2005-05-02 10:18:30.000000000 -0600 29.6 -@@ -24,8 +24,15 @@ 29.7 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/system.h 2005-03-01 23:38:07.000000000 -0800 29.8 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/system.h 2005-05-20 09:36:02.000000000 -0700 29.9 +@@ -18,14 +18,19 @@ 29.10 + #include <asm/page.h> 29.11 + #include <asm/pal.h> 29.12 + #include <asm/percpu.h> 29.13 ++#ifdef XEN 29.14 ++#include <asm/xensystem.h> 29.15 ++#endif 29.16 + 29.17 + #define GATE_ADDR __IA64_UL_CONST(0xa000000000000000) 29.18 + /* 29.19 * 0xa000000000000000+2*PERCPU_PAGE_SIZE 29.20 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) 29.21 */ 29.22 -+#ifdef XEN 29.23 -+#define KERNEL_START 0xf000000004000000 29.24 -+#define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE 29.25 -+#define SHAREDINFO_ADDR 0xf100000000000000 29.26 -+#define VHPT_ADDR 0xf200000000000000 29.27 -+#else 29.28 ++#ifndef XEN 29.29 #define KERNEL_START __IA64_UL_CONST(0xa000000100000000) 29.30 #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 29.31 +#endif 29.32 29.33 #ifndef __ASSEMBLY__ 29.34 29.35 -@@ -218,9 +225,13 @@ 29.36 +@@ -218,6 +223,7 @@ 29.37 # define PERFMON_IS_SYSWIDE() (0) 29.38 #endif 29.39 29.40 -+#ifdef XEN 29.41 -+#define IA64_HAS_EXTRA_STATE(t) 0 29.42 -+#else 29.43 ++#ifndef XEN 29.44 #define IA64_HAS_EXTRA_STATE(t) \ 29.45 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ 29.46 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) 29.47 -+#endif 29.48 +@@ -230,6 +236,7 @@ 29.49 + ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 29.50 + (last) = ia64_switch_to((next)); \ 29.51 + } while (0) 29.52 ++#endif 29.53 29.54 - #define __switch_to(prev,next,last) do { \ 29.55 - if (IA64_HAS_EXTRA_STATE(prev)) \ 29.56 + #ifdef CONFIG_SMP 29.57 + /*
30.1 --- a/xen/arch/ia64/patch/linux-2.6.11/unaligned.c Mon May 23 15:22:15 2005 +0000 30.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/unaligned.c Mon May 23 15:29:59 2005 +0000 30.3 @@ -1,8 +1,145 @@ 30.4 ---- ../../linux-2.6.11/arch/ia64/kernel/unaligned.c 2005-03-02 00:38:25.000000000 -0700 30.5 -+++ arch/ia64/unaligned.c 2005-05-10 15:46:09.000000000 -0600 30.6 -@@ -437,7 +437,11 @@ 30.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/unaligned.c 2005-03-01 23:38:25.000000000 -0800 30.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/unaligned.c 2005-05-18 12:40:50.000000000 -0700 30.9 +@@ -201,7 +201,11 @@ 30.10 + 30.11 + RPT(r1), RPT(r2), RPT(r3), 30.12 + 30.13 ++#ifdef CONFIG_VTI 30.14 ++ RPT(r4), RPT(r5), RPT(r6), RPT(r7), 30.15 ++#else //CONFIG_VTI 30.16 + RSW(r4), RSW(r5), RSW(r6), RSW(r7), 30.17 ++#endif //CONFIG_VTI 30.18 + 30.19 + RPT(r8), RPT(r9), RPT(r10), RPT(r11), 30.20 + RPT(r12), RPT(r13), RPT(r14), RPT(r15), 30.21 +@@ -291,6 +295,121 @@ 30.22 + return reg; 30.23 } 30.24 30.25 ++#ifdef CONFIG_VTI 30.26 ++static void 30.27 ++set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat) 30.28 ++{ 30.29 ++ struct switch_stack *sw = (struct switch_stack *) regs - 1; 30.30 ++ unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end; 30.31 ++ unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; 30.32 ++ unsigned long rnats, nat_mask; 30.33 ++ unsigned long old_rsc,new_rsc; 30.34 ++ unsigned long on_kbs,rnat; 30.35 ++ long sof = (regs->cr_ifs) & 0x7f; 30.36 ++ long sor = 8 * ((regs->cr_ifs >> 14) & 0xf); 30.37 ++ long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; 30.38 ++ long ridx = r1 - 32; 30.39 ++ 30.40 ++ if (ridx >= sof) { 30.41 ++ /* this should never happen, as the "rsvd register fault" has higher priority */ 30.42 ++ DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof); 30.43 ++ return; 30.44 ++ } 30.45 ++ 30.46 ++ if (ridx < sor) 30.47 ++ ridx = rotate_reg(sor, rrb_gr, ridx); 30.48 ++ 30.49 ++ old_rsc=ia64_get_rsc(); 30.50 ++ new_rsc=old_rsc&(~0x3); 30.51 ++ ia64_set_rsc(new_rsc); 30.52 ++ 30.53 ++ bspstore = ia64_get_bspstore(); 30.54 ++ bsp =kbs + (regs->loadrs >> 19);//16+3 30.55 ++ 30.56 ++ addr = ia64_rse_skip_regs(bsp, -sof + ridx); 30.57 ++ nat_mask = 1UL << ia64_rse_slot_num(addr); 30.58 ++ rnat_addr = ia64_rse_rnat_addr(addr); 30.59 ++ 30.60 ++ if(addr >= bspstore){ 30.61 ++ 30.62 ++ ia64_flushrs (); 30.63 ++ ia64_mf (); 30.64 ++ *addr = val; 30.65 ++ bspstore = ia64_get_bspstore(); 30.66 ++ rnat = ia64_get_rnat (); 30.67 ++ if(bspstore < rnat_addr){ 30.68 ++ rnat=rnat&(~nat_mask); 30.69 ++ }else{ 30.70 ++ *rnat_addr = (*rnat_addr)&(~nat_mask); 30.71 ++ } 30.72 ++ ia64_mf(); 30.73 ++ ia64_loadrs(); 30.74 ++ ia64_set_rnat(rnat); 30.75 ++ }else{ 30.76 ++ 30.77 ++ rnat = ia64_get_rnat (); 30.78 ++ *addr = val; 30.79 ++ if(bspstore < rnat_addr){ 30.80 ++ rnat=rnat&(~nat_mask); 30.81 ++ }else{ 30.82 ++ *rnat_addr = (*rnat_addr)&(~nat_mask); 30.83 ++ } 30.84 ++ ia64_set_bspstore (bspstore); 30.85 ++ ia64_set_rnat(rnat); 30.86 ++ } 30.87 ++ ia64_set_rsc(old_rsc); 30.88 ++} 30.89 ++ 30.90 ++ 30.91 ++static void 30.92 ++get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat) 30.93 ++{ 30.94 ++ struct switch_stack *sw = (struct switch_stack *) regs - 1; 30.95 ++ unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore; 30.96 ++ unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; 30.97 ++ unsigned long rnats, nat_mask; 30.98 ++ unsigned long on_kbs; 30.99 ++ unsigned long old_rsc, new_rsc; 30.100 ++ long sof = (regs->cr_ifs) & 0x7f; 30.101 ++ long sor = 8 * ((regs->cr_ifs >> 14) & 0xf); 30.102 ++ long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; 30.103 ++ long ridx = r1 - 32; 30.104 ++ 30.105 ++ if (ridx >= sof) { 30.106 ++ /* read of out-of-frame register returns an undefined value; 0 in our case. */ 30.107 ++ DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof); 30.108 ++ panic("wrong stack register number"); 30.109 ++ } 30.110 ++ 30.111 ++ if (ridx < sor) 30.112 ++ ridx = rotate_reg(sor, rrb_gr, ridx); 30.113 ++ 30.114 ++ old_rsc=ia64_get_rsc(); 30.115 ++ new_rsc=old_rsc&(~(0x3)); 30.116 ++ ia64_set_rsc(new_rsc); 30.117 ++ 30.118 ++ bspstore = ia64_get_bspstore(); 30.119 ++ bsp =kbs + (regs->loadrs >> 19); //16+3; 30.120 ++ 30.121 ++ addr = ia64_rse_skip_regs(bsp, -sof + ridx); 30.122 ++ nat_mask = 1UL << ia64_rse_slot_num(addr); 30.123 ++ rnat_addr = ia64_rse_rnat_addr(addr); 30.124 ++ 30.125 ++ if(addr >= bspstore){ 30.126 ++ 30.127 ++ ia64_flushrs (); 30.128 ++ ia64_mf (); 30.129 ++ bspstore = ia64_get_bspstore(); 30.130 ++ } 30.131 ++ *val=*addr; 30.132 ++ if(bspstore < rnat_addr){ 30.133 ++ *nat=!!(ia64_get_rnat()&nat_mask); 30.134 ++ }else{ 30.135 ++ *nat = !!((*rnat_addr)&nat_mask); 30.136 ++ } 30.137 ++ ia64_set_rsc(old_rsc); 30.138 ++} 30.139 ++#else // CONFIG_VTI 30.140 + static void 30.141 + set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat) 30.142 + { 30.143 +@@ -435,9 +554,14 @@ 30.144 + *nat = 0; 30.145 + return; 30.146 + } 30.147 ++#endif // CONFIG_VTI 30.148 + 30.149 30.150 +#ifdef XEN 30.151 +void 30.152 @@ -12,7 +149,19 @@ 30.153 setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs) 30.154 { 30.155 struct switch_stack *sw = (struct switch_stack *) regs - 1; 30.156 -@@ -522,7 +526,11 @@ 30.157 +@@ -466,7 +590,11 @@ 30.158 + unat = &sw->ar_unat; 30.159 + } else { 30.160 + addr = (unsigned long)regs; 30.161 ++#ifdef CONFIG_VTI 30.162 ++ unat = ®s->eml_unat; 30.163 ++#else //CONFIG_VTI 30.164 + unat = &sw->caller_unat; 30.165 ++#endif //CONFIG_VTI 30.166 + } 30.167 + DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n", 30.168 + addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum)); 30.169 +@@ -522,7 +650,11 @@ 30.170 */ 30.171 if (regnum >= IA64_FIRST_ROTATING_FR) { 30.172 ia64_sync_fph(current); 30.173 @@ -24,7 +173,7 @@ 30.174 } else { 30.175 /* 30.176 * pt_regs or switch_stack ? 30.177 -@@ -581,7 +589,11 @@ 30.178 +@@ -581,7 +713,11 @@ 30.179 */ 30.180 if (regnum >= IA64_FIRST_ROTATING_FR) { 30.181 ia64_flush_fph(current); 30.182 @@ -36,7 +185,7 @@ 30.183 } else { 30.184 /* 30.185 * f0 = 0.0, f1= 1.0. Those registers are constant and are thus 30.186 -@@ -611,7 +623,11 @@ 30.187 +@@ -611,7 +747,11 @@ 30.188 } 30.189 30.190 30.191 @@ -48,7 +197,19 @@ 30.192 getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs) 30.193 { 30.194 struct switch_stack *sw = (struct switch_stack *) regs - 1; 30.195 -@@ -1294,6 +1310,9 @@ 30.196 +@@ -640,7 +780,11 @@ 30.197 + unat = &sw->ar_unat; 30.198 + } else { 30.199 + addr = (unsigned long)regs; 30.200 ++#ifdef CONFIG_VTI 30.201 ++ unat = ®s->eml_unat;; 30.202 ++#else //CONFIG_VTI 30.203 + unat = &sw->caller_unat; 30.204 ++#endif //CONFIG_VTI 30.205 + } 30.206 + 30.207 + DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum)); 30.208 +@@ -1294,6 +1438,9 @@ 30.209 void 30.210 ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) 30.211 { 30.212 @@ -58,7 +219,7 @@ 30.213 struct ia64_psr *ipsr = ia64_psr(regs); 30.214 mm_segment_t old_fs = get_fs(); 30.215 unsigned long bundle[2]; 30.216 -@@ -1502,4 +1521,5 @@ 30.217 +@@ -1502,4 +1649,5 @@ 30.218 si.si_imm = 0; 30.219 force_sig_info(SIGBUS, &si, current); 30.220 goto done;
31.1 --- a/xen/arch/ia64/process.c Mon May 23 15:22:15 2005 +0000 31.2 +++ b/xen/arch/ia64/process.c Mon May 23 15:29:59 2005 +0000 31.3 @@ -64,11 +64,16 @@ long do_iopl(domid_t domain, unsigned in 31.4 void schedule_tail(struct exec_domain *next) 31.5 { 31.6 unsigned long rr7; 31.7 - printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); 31.8 - printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); 31.9 + //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); 31.10 + //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); 31.11 +#ifdef CONFIG_VTI 31.12 + /* rr7 will be postponed to last point when resuming back to guest */ 31.13 + vmx_load_all_rr(current); 31.14 +#else // CONFIG_VTI 31.15 if (rr7 = load_region_regs(current)) { 31.16 printk("schedule_tail: change to rr7 not yet implemented\n"); 31.17 } 31.18 +#endif // CONFIG_VTI 31.19 } 31.20 31.21 extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa); 31.22 @@ -346,8 +351,8 @@ void ia64_do_page_fault (unsigned long a 31.23 // FIXME should validate mpaddr here 31.24 if (d == dom0) { 31.25 if (address < dom0_start || address >= dom0_start + dom0_size) { 31.26 - printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip); 31.27 - printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip); 31.28 + //printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip); 31.29 + //printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip); 31.30 tdpfoo(); 31.31 } 31.32 }
32.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 32.2 +++ b/xen/arch/ia64/tools/README.RunVT Mon May 23 15:29:59 2005 +0000 32.3 @@ -0,0 +1,59 @@ 32.4 +INSTRUCTIONS FOR Running IPF/Xen with VT-enabled Tiger4 pltform 32.5 + 32.6 +Note: the Domain0 must be an unmodified Linux 32.7 + 32.8 +1) Perform operations in README.xenia64 to get a flattened Xen IPF source tree 32.9 + 32.10 +2) Build an unmodified Linux 2.6 kernel 32.11 + a) tar xvfz linux-2.6.11.tar.gz 32.12 + b) cp arch/ia64/configs/tiger_defconfig .config 32.13 + c) Build linux. 32.14 + 1) yes "" | make oldconfig 32.15 + 2) make 32.16 + 32.17 +3) Build IPF VT-enabled Xen image 32.18 + edit xen/arch/ia64/Rules.mk for 32.19 + CONFIG_VTI ?= y to enable VT-enable build 32.20 +4) Setup ELILO.CONF 32.21 + image=xen 32.22 + label=xen 32.23 + initrd=vmlinux2.6.11 // unmodified Linux kernel image 32.24 + read-only 32.25 + append="nomca root=/dev/sda3" 32.26 + 32.27 +STATUS as 4/28/05 - Features implemented for Domain0 32.28 + 32.29 +0. Runs unmodified Linux kernel as Domain0 32.30 + Validated with Linux 2.6.11 to run Xwindow and NIC on UP logical processor 32.31 + 32.32 +1. Take advantage of VT-enabled processor 32.33 + a. Processor intercepts guest privileged instruction and deliver Opcode/Cause to Hypervisor 32.34 + b. One VPD (Virtual Processor Descriptor) per Virtual Processor 32.35 + c. Domains are in a different virtual address space from hypervisor. Domains have one less VA bit than hypervisor, where hypervisor runs in 0xF00000... address protected by the processor from Domains. 32.36 + 32.37 +2. vTLB and guest_VHPT 32.38 + a. vTLB extending machine TLB entries through hypervisor internal data structure 32.39 + vTLB caches Domains installed TR's and TC's, and then installs TC's for Domains instead. 32.40 + vTLB implements collision chains 32.41 + b. Processor walks hypervisor internal VHPT, not the domain VHPT. On TLB miss, vTLB is consulted first to put hypervisor cached entry into VHPT without inject TLB miss to domain. 32.42 + 32.43 +3. Region ID fix-partitioning 32.44 + a. currently hard partition 24bits of RIDs into 16 partitions by using top 4bit. 32.45 + b. Hypervisor uses the very last partition RIDs, i.e., 0xFxxxxx RIDs 32.46 + c. Effectively supports Domain0 and 14 other DomainN 32.47 + 32.48 +4. HyperVisor is mapped with 2 sets of RIDs during runtime, its own RIDs and the active Domain RIDs 32.49 + a. Domain RIDs are used by processor to access guest_VHPT during Domain runtime 32.50 + b. Hypervisor RIDs are used when Hypervisor is running 32.51 + c. Implies there are some Region registers transition on entering/exiting hypervisor 32.52 + 32.53 +5. Linux styled pt_regs with minor modification for VT and instruction emulation 32.54 + a. Part of Domain registers are saved/restored from VPD 32.55 + b. Extended pt_regs to include r4~r7 and Domain's iipa & isr for possible instruction emulation, so no need to save a complete set of switch_stack on IVT entry 32.56 + 32.57 +6. Linux styled per virtual processor memory/RSE stacks, which is the same as non-VT domain0 32.58 + 32.59 +7. Handles splitted I/DCache design 32.60 + Newer IPF processors has split I/Dcaches. The design takes this into consideration when Xen recopy Domain0 to target address for execution 32.61 + 32.62 +
33.1 --- a/xen/arch/ia64/tools/mkbuildtree Mon May 23 15:22:15 2005 +0000 33.2 +++ b/xen/arch/ia64/tools/mkbuildtree Mon May 23 15:29:59 2005 +0000 33.3 @@ -46,13 +46,13 @@ mkdir include/asm-generic 33.4 mkdir include/asm-ia64/linux 33.5 mkdir include/asm-ia64/linux/byteorder 33.6 # use "gcc -Iinclude/asm-ia64" to find these linux includes 33.7 -ln -s $XEN/include/xen $XEN/include/linux 33.8 -ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen 33.9 +#ln -s $XEN/include/xen $XEN/include/linux 33.10 +#ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen 33.11 33.12 # prepare for building asm-offsets (circular dependency) 33.13 -echo '#define IA64_TASK_SIZE 0' > include/asm-ia64/asm-offsets.h 33.14 -sleep 2 33.15 -touch arch/ia64/asm-offsets.c 33.16 +#echo '#define IA64_TASK_SIZE 0' > include/asm-ia64/asm-offsets.h 33.17 +#sleep 2 33.18 +#touch arch/ia64/asm-offsets.c 33.19 33.20 # patches to xen/common files 33.21 #xen_patch common/domain.c domain.c 33.22 @@ -107,7 +107,7 @@ cp_patch arch/ia64/mm/tlb.c arch/ia64/tl 33.23 #cp_patch arch/ia64/hp/sim/hpsim_irq.c arch/ia64/hpsim_irq.c hpsim_irq.c 33.24 33.25 softlink arch/ia64/kernel/efi_stub.S arch/ia64/efi_stub.S 33.26 -softlink arch/ia64/kernel/entry.h arch/ia64/entry.h 33.27 +cp_patch arch/ia64/kernel/entry.h arch/ia64/entry.h entry.h 33.28 softlink arch/ia64/kernel/ia64_ksyms.c arch/ia64/ia64_ksyms.c 33.29 softlink arch/ia64/kernel/irq_lsapic.c arch/ia64/irq_lsapic.c 33.30 softlink arch/ia64/kernel/machvec.c arch/ia64/machvec.c 33.31 @@ -141,8 +141,6 @@ softlink arch/ia64/lib/strlen.S arch/ia6 33.32 softlink arch/ia64/lib/strlen_user.S arch/ia64/lib/strlen_user.S 33.33 softlink arch/ia64/lib/strncpy_from_user.S arch/ia64/lib/strncpy_from_user.S 33.34 softlink arch/ia64/lib/strnlen_user.S arch/ia64/lib/strnlen_user.S 33.35 -#softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c 33.36 -cp_patch arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c swiotlb.c 33.37 softlink arch/ia64/lib/xor.S arch/ia64/lib/xor.S 33.38 33.39 softlink lib/cmdline.c arch/ia64/cmdline.c 33.40 @@ -172,8 +170,8 @@ cp_patch arch/ia64/hp/sim/hpsim_ssc.h in 33.41 33.42 #cp_patch include/asm-ia64/current.h include/asm-ia64/current.h current.h 33.43 softlink include/asm-ia64/current.h include/asm-ia64/current.h 33.44 -#cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h 33.45 -softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h 33.46 +cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h 33.47 +#softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h 33.48 #cp_patch include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h hardirq.h 33.49 softlink include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h 33.50 #cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h 33.51 @@ -217,7 +215,7 @@ softlink include/asm-ia64/errno.h includ 33.52 softlink include/asm-ia64/fpu.h include/asm-ia64/fpu.h 33.53 softlink include/asm-ia64/hdreg.h include/asm-ia64/hdreg.h 33.54 #softlink include/asm-ia64/ia32.h include/asm-ia64/ia32.h 33.55 -softlink include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h 33.56 +cp_patch include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h ia64regs.h 33.57 softlink include/asm-ia64/intrinsics.h include/asm-ia64/intrinsics.h 33.58 softlink include/asm-ia64/ioctl.h include/asm-ia64/ioctl.h 33.59 softlink include/asm-ia64/linkage.h include/asm-ia64/linkage.h 33.60 @@ -229,7 +227,7 @@ softlink include/asm-ia64/mca.h include/ 33.61 softlink include/asm-ia64/meminit.h include/asm-ia64/meminit.h 33.62 softlink include/asm-ia64/mman.h include/asm-ia64/mman.h 33.63 softlink include/asm-ia64/numa.h include/asm-ia64/numa.h 33.64 -softlink include/asm-ia64/pal.h include/asm-ia64/pal.h 33.65 +cp_patch include/asm-ia64/pal.h include/asm-ia64/pal.h pal.h 33.66 softlink include/asm-ia64/param.h include/asm-ia64/param.h 33.67 softlink include/asm-ia64/patch.h include/asm-ia64/patch.h 33.68 softlink include/asm-ia64/pci.h include/asm-ia64/pci.h 33.69 @@ -237,7 +235,7 @@ softlink include/asm-ia64/percpu.h inclu 33.70 #softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h 33.71 cp_patch include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h pgalloc.h 33.72 softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h 33.73 -softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h 33.74 +cp_patch include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h ptrace.h 33.75 softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h 33.76 softlink include/asm-ia64/rse.h include/asm-ia64/rse.h 33.77 softlink include/asm-ia64/rwsem.h include/asm-ia64/rwsem.h
34.1 --- a/xen/arch/ia64/vcpu.c Mon May 23 15:22:15 2005 +0000 34.2 +++ b/xen/arch/ia64/vcpu.c Mon May 23 15:29:59 2005 +0000 34.3 @@ -14,6 +14,9 @@ 34.4 #include <asm/tlb.h> 34.5 #include <asm/processor.h> 34.6 #include <asm/delay.h> 34.7 +#ifdef CONFIG_VTI 34.8 +#include <asm/vmx_vcpu.h> 34.9 +#endif // CONFIG_VTI 34.10 34.11 typedef union { 34.12 struct ia64_psr ia64_psr; 34.13 @@ -523,12 +526,19 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN 34.14 printf("vcpu_pend_interrupt: bad vector\n"); 34.15 return; 34.16 } 34.17 +#ifdef CONFIG_VTI 34.18 + if ( VMX_DOMAIN(vcpu) ) { 34.19 + set_bit(vector,VPD_CR(vcpu,irr)); 34.20 + } else 34.21 +#endif // CONFIG_VTI 34.22 + { 34.23 if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; 34.24 if (test_bit(vector,PSCBX(vcpu,irr))) { 34.25 //printf("vcpu_pend_interrupt: overrun\n"); 34.26 } 34.27 set_bit(vector,PSCBX(vcpu,irr)); 34.28 PSCB(vcpu,pending_interruption) = 1; 34.29 + } 34.30 } 34.31 34.32 void early_tick(VCPU *vcpu) 34.33 @@ -619,7 +629,8 @@ extern unsigned long privop_trace; 34.34 //privop_trace=1; 34.35 //TODO: Implement this 34.36 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n"); 34.37 - *pval = 0; 34.38 + //*pval = 0; 34.39 + *pval = ia64_getreg(_IA64_REG_CR_LID); 34.40 return IA64_NO_FAULT; 34.41 } 34.42
35.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 35.2 +++ b/xen/arch/ia64/vlsapic.c Mon May 23 15:29:59 2005 +0000 35.3 @@ -0,0 +1,504 @@ 35.4 + 35.5 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 35.6 +/* 35.7 + * vlsapic.c: virtual lsapic model including ITC timer. 35.8 + * Copyright (c) 2005, Intel Corporation. 35.9 + * 35.10 + * This program is free software; you can redistribute it and/or modify it 35.11 + * under the terms and conditions of the GNU General Public License, 35.12 + * version 2, as published by the Free Software Foundation. 35.13 + * 35.14 + * This program is distributed in the hope it will be useful, but WITHOUT 35.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 35.16 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 35.17 + * more details. 35.18 + * 35.19 + * You should have received a copy of the GNU General Public License along with 35.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 35.21 + * Place - Suite 330, Boston, MA 02111-1307 USA. 35.22 + * 35.23 + * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) 35.24 + */ 35.25 + 35.26 +#include <linux/sched.h> 35.27 +#include <public/arch-ia64.h> 35.28 +#include <asm/ia64_int.h> 35.29 +#include <asm/vcpu.h> 35.30 +#include <asm/regionreg.h> 35.31 +#include <asm/tlb.h> 35.32 +#include <asm/processor.h> 35.33 +#include <asm/delay.h> 35.34 +#include <asm/vmx_vcpu.h> 35.35 +#include <asm/vmx_vcpu.h> 35.36 +#include <asm/regs.h> 35.37 +#include <asm/gcc_intrin.h> 35.38 +#include <asm/vmx_mm_def.h> 35.39 +#include <asm/vmx.h> 35.40 +#include <asm/hw_irq.h> 35.41 +#include <asm/vmx_pal_vsa.h> 35.42 +#include <asm/kregs.h> 35.43 + 35.44 +//u64 fire_itc; 35.45 +//u64 fire_itc2; 35.46 +//u64 fire_itm; 35.47 +//u64 fire_itm2; 35.48 +/* 35.49 + * Update the checked last_itc. 35.50 + */ 35.51 +static void update_last_itc(vtime_t *vtm, uint64_t cur_itc) 35.52 +{ 35.53 + vtm->last_itc = cur_itc; 35.54 +} 35.55 + 35.56 +/* 35.57 + * ITC value saw in guest (host+offset+drift). 35.58 + */ 35.59 +static uint64_t now_itc(vtime_t *vtm) 35.60 +{ 35.61 + uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc(); 35.62 + 35.63 + if ( vtm->vtm_local_drift ) { 35.64 +// guest_itc -= vtm->vtm_local_drift; 35.65 + } 35.66 + if ( (long)(guest_itc - vtm->last_itc) > 0 ) { 35.67 + return guest_itc; 35.68 + 35.69 + } 35.70 + else { 35.71 + /* guest ITC backwarded due after LP switch */ 35.72 + return vtm->last_itc; 35.73 + } 35.74 +} 35.75 + 35.76 +/* 35.77 + * Interval time components reset. 35.78 + */ 35.79 +static void vtm_reset(VCPU *vcpu) 35.80 +{ 35.81 + uint64_t cur_itc; 35.82 + vtime_t *vtm; 35.83 + 35.84 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.85 + vtm->vtm_offset = 0; 35.86 + vtm->vtm_local_drift = 0; 35.87 + VPD_CR(vcpu, itm) = 0; 35.88 + VPD_CR(vcpu, itv) = 0x10000; 35.89 + cur_itc = ia64_get_itc(); 35.90 + vtm->last_itc = vtm->vtm_offset + cur_itc; 35.91 +} 35.92 + 35.93 +/* callback function when vtm_timer expires */ 35.94 +static void vtm_timer_fn(unsigned long data) 35.95 +{ 35.96 + vtime_t *vtm; 35.97 + VCPU *vcpu = (VCPU*)data; 35.98 + u64 cur_itc,vitm; 35.99 + 35.100 + UINT64 vec; 35.101 + 35.102 + vec = VPD_CR(vcpu, itv) & 0xff; 35.103 + vmx_vcpu_pend_interrupt(vcpu, vec); 35.104 + 35.105 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.106 + cur_itc = now_itc(vtm); 35.107 + vitm =VPD_CR(vcpu, itm); 35.108 + //fire_itc2 = cur_itc; 35.109 + //fire_itm2 = vitm; 35.110 + update_last_itc(vtm,cur_itc); // pseudo read to update vITC 35.111 + vtm->timer_hooked = 0; 35.112 +} 35.113 + 35.114 +void vtm_init(VCPU *vcpu) 35.115 +{ 35.116 + vtime_t *vtm; 35.117 + uint64_t itc_freq; 35.118 + 35.119 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.120 + 35.121 + itc_freq = local_cpu_data->itc_freq; 35.122 + vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000; 35.123 + vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000; 35.124 + /* set up the actimer */ 35.125 + init_ac_timer(&(vtm->vtm_timer)); 35.126 + vtm->timer_hooked = 0; 35.127 + vtm->vtm_timer.cpu = 0; /* Init value for SMP case */ 35.128 + vtm->vtm_timer.data = (unsigned long)vcpu; 35.129 + vtm->vtm_timer.function = vtm_timer_fn; 35.130 + vtm_reset(vcpu); 35.131 +} 35.132 + 35.133 +/* 35.134 + * Action when guest read ITC. 35.135 + */ 35.136 +uint64_t vtm_get_itc(VCPU *vcpu) 35.137 +{ 35.138 + uint64_t guest_itc, spsr; 35.139 + vtime_t *vtm; 35.140 + 35.141 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.142 + // FIXME: should use local_irq_disable & local_irq_enable ?? 35.143 + local_irq_save(spsr); 35.144 + guest_itc = now_itc(vtm); 35.145 + update_last_itc(vtm, guest_itc); 35.146 + 35.147 + local_irq_restore(spsr); 35.148 + return guest_itc; 35.149 +} 35.150 + 35.151 +void vtm_set_itc(VCPU *vcpu, uint64_t new_itc) 35.152 +{ 35.153 + uint64_t spsr; 35.154 + vtime_t *vtm; 35.155 + 35.156 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.157 + local_irq_save(spsr); 35.158 + vtm->vtm_offset = new_itc - ia64_get_itc(); 35.159 + vtm->last_itc = new_itc; 35.160 + vtm_interruption_update(vcpu, vtm); 35.161 + local_irq_restore(spsr); 35.162 +} 35.163 + 35.164 +void vtm_set_itv(VCPU *vcpu) 35.165 +{ 35.166 + uint64_t spsr,itv; 35.167 + vtime_t *vtm; 35.168 + 35.169 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.170 + local_irq_save(spsr); 35.171 + itv = VPD_CR(vcpu, itv); 35.172 + if ( ITV_IRQ_MASK(itv) && vtm->timer_hooked ) { 35.173 + rem_ac_timer(&(vtm->vtm_timer)); 35.174 + vtm->timer_hooked = 0; 35.175 + } 35.176 + vtm_interruption_update(vcpu, vtm); 35.177 + local_irq_restore(spsr); 35.178 +} 35.179 + 35.180 + 35.181 +/* 35.182 + * Update interrupt or hook the vtm ac_timer for fire 35.183 + * At this point vtm_timer should be removed if itv is masked. 35.184 + */ 35.185 +/* Interrupt must be disabled at this point */ 35.186 + 35.187 +extern u64 tick_to_ns(u64 tick); 35.188 +#define TIMER_SLOP (50*1000) /* ns */ /* copy from ac_timer.c */ 35.189 +void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm) 35.190 +{ 35.191 + uint64_t cur_itc,vitm,vitv; 35.192 + uint64_t expires; 35.193 + long diff_now, diff_last; 35.194 + uint64_t spsr; 35.195 + 35.196 + vitv = VPD_CR(vcpu, itv); 35.197 + if ( ITV_IRQ_MASK(vitv) ) { 35.198 + return; 35.199 + } 35.200 + 35.201 + vitm =VPD_CR(vcpu, itm); 35.202 + local_irq_save(spsr); 35.203 + cur_itc =now_itc(vtm); 35.204 + diff_last = vtm->last_itc - vitm; 35.205 + diff_now = cur_itc - vitm; 35.206 + update_last_itc (vtm,cur_itc); 35.207 + 35.208 + if ( diff_last >= 0 ) { 35.209 + // interrupt already fired. 35.210 + if ( vtm->timer_hooked ) { 35.211 + rem_ac_timer(&(vtm->vtm_timer)); 35.212 + vtm->timer_hooked = 0; 35.213 + } 35.214 + } 35.215 + else if ( diff_now >= 0 ) { 35.216 + // ITV is fired. 35.217 + vmx_vcpu_pend_interrupt(vcpu, vitv&0xff); 35.218 + } 35.219 + /* Both last_itc & cur_itc < itm, wait for fire condition */ 35.220 + else if ( vtm->timer_hooked ) { 35.221 + expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP; 35.222 + mod_ac_timer (&(vtm->vtm_timer), expires); 35.223 + printf("mod vtm_timer\n"); 35.224 +//fire_itc = cur_itc; 35.225 +//fire_itm = vitm; 35.226 + } 35.227 + else { 35.228 + vtm->vtm_timer.expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP; 35.229 + vtm->vtm_timer.cpu = vcpu->processor; 35.230 + add_ac_timer(&(vtm->vtm_timer)); 35.231 + vtm->timer_hooked = 1; 35.232 +//fire_itc = cur_itc; 35.233 +//fire_itm = vitm; 35.234 + } 35.235 + local_irq_restore(spsr); 35.236 +} 35.237 + 35.238 +/* 35.239 + * Action for vtm when the domain is scheduled out. 35.240 + * Remove the ac_timer for vtm. 35.241 + */ 35.242 +void vtm_domain_out(VCPU *vcpu) 35.243 +{ 35.244 + vtime_t *vtm; 35.245 + uint64_t spsr; 35.246 + 35.247 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.248 + local_irq_save(spsr); 35.249 + if ( vtm->timer_hooked ) { 35.250 + rem_ac_timer(&(vtm->vtm_timer)); 35.251 + vtm->timer_hooked = 0; 35.252 + } 35.253 + local_irq_restore(spsr); 35.254 +} 35.255 + 35.256 +/* 35.257 + * Action for vtm when the domain is scheduled in. 35.258 + * Fire vtm IRQ or add the ac_timer for vtm. 35.259 + */ 35.260 +void vtm_domain_in(VCPU *vcpu) 35.261 +{ 35.262 + vtime_t *vtm; 35.263 + 35.264 + vtm=&(vcpu->arch.arch_vmx.vtm); 35.265 + vtm_interruption_update(vcpu, vtm); 35.266 +} 35.267 + 35.268 + 35.269 + 35.270 +/* 35.271 + * Next for vLSapic 35.272 + */ 35.273 + 35.274 +#define NMI_VECTOR 2 35.275 +#define ExtINT_VECTOR 0 35.276 + 35.277 +#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i]) 35.278 +/* 35.279 + * LID-CR64: Keep in vpd. 35.280 + * IVR-CR65: (RO) see guest_read_ivr(). 35.281 + * TPR-CR66: Keep in vpd, acceleration enabled. 35.282 + * EOI-CR67: see guest_write_eoi(). 35.283 + * IRR0-3 - CR68-71: (RO) Keep in vpd irq_pending[] 35.284 + * can move to vpd for optimization. 35.285 + * ITV: in time virtualization. 35.286 + * PMV: Keep in vpd initialized as 0x10000. 35.287 + * CMCV: Keep in vpd initialized as 0x10000. 35.288 + * LRR0-1: Keep in vpd, initialized as 0x10000. 35.289 + * 35.290 + */ 35.291 + 35.292 +void vlsapic_reset(VCPU *vcpu) 35.293 +{ 35.294 + int i; 35.295 + VPD_CR(vcpu, lid) = 0; 35.296 + VPD_CR(vcpu, ivr) = 0; 35.297 + VPD_CR(vcpu,tpr) = 0x10000; 35.298 + VPD_CR(vcpu, eoi) = 0; 35.299 + VPD_CR(vcpu, irr[0]) = 0; 35.300 + VPD_CR(vcpu, irr[1]) = 0; 35.301 + VPD_CR(vcpu, irr[2]) = 0; 35.302 + VPD_CR(vcpu, irr[3]) = 0; 35.303 + VPD_CR(vcpu, pmv) = 0x10000; 35.304 + VPD_CR(vcpu, cmcv) = 0x10000; 35.305 + VPD_CR(vcpu, lrr0) = 0x10000; // default reset value? 35.306 + VPD_CR(vcpu, lrr1) = 0x10000; // default reset value? 35.307 + for ( i=0; i<4; i++) { 35.308 + VLSAPIC_INSVC(vcpu,i) = 0; 35.309 + } 35.310 +} 35.311 + 35.312 +/* 35.313 + * Find highest signaled bits in 4 words (long). 35.314 + * 35.315 + * return 0-255: highest bits. 35.316 + * -1 : Not found. 35.317 + */ 35.318 +static __inline__ int highest_bits(uint64_t *dat) 35.319 +{ 35.320 + uint64_t bits, bitnum=-1; 35.321 + int i; 35.322 + 35.323 + /* loop for all 256 bits */ 35.324 + for ( i=3; i >= 0 ; i -- ) { 35.325 + bits = dat[i]; 35.326 + if ( bits ) { 35.327 + bitnum = ia64_fls(bits); 35.328 + return i*64+bitnum; 35.329 + } 35.330 + } 35.331 + return -1; 35.332 +} 35.333 + 35.334 +/* 35.335 + * Return 0-255 for pending irq. 35.336 + * -1 when no pending. 35.337 + */ 35.338 +static int highest_pending_irq(VCPU *vcpu) 35.339 +{ 35.340 + if ( VPD_CR(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR; 35.341 + if ( VPD_CR(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR; 35.342 + return highest_bits(&VPD_CR(vcpu, irr[0])); 35.343 +} 35.344 + 35.345 +static int highest_inservice_irq(VCPU *vcpu) 35.346 +{ 35.347 + if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR; 35.348 + if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR; 35.349 + return highest_bits(&(VLSAPIC_INSVC(vcpu, 0))); 35.350 +} 35.351 + 35.352 +/* 35.353 + * The pending irq is higher than the inservice one. 35.354 + * 35.355 + */ 35.356 +static int is_higher_irq(int pending, int inservice) 35.357 +{ 35.358 + return ( (pending >> 4) > (inservice>>4) || 35.359 + ((pending != -1) && (inservice == -1)) ); 35.360 +} 35.361 + 35.362 +static int is_higher_class(int pending, int mic) 35.363 +{ 35.364 + return ( (pending >> 4) > mic ); 35.365 +} 35.366 + 35.367 +static int is_invalid_irq(int vec) 35.368 +{ 35.369 + return (vec == 1 || ((vec <= 14 && vec >= 3))); 35.370 +} 35.371 + 35.372 +/* See Table 5-8 in SDM vol2 for the definition */ 35.373 +static int 35.374 +irq_masked(VCPU *vcpu, int h_pending, int h_inservice) 35.375 +{ 35.376 + uint64_t vtpr; 35.377 + 35.378 + vtpr = VPD_CR(vcpu, tpr); 35.379 + 35.380 + if ( h_pending == NMI_VECTOR && h_inservice != NMI_VECTOR ) 35.381 + // Non Maskable Interrupt 35.382 + return 0; 35.383 + 35.384 + if ( h_pending == ExtINT_VECTOR && h_inservice >= 16) 35.385 + return (vtpr>>16)&1; // vtpr.mmi 35.386 + 35.387 + if ( !(vtpr&(1UL<<16)) && 35.388 + is_higher_irq(h_pending, h_inservice) && 35.389 + is_higher_class(h_pending, (vtpr>>4)&0xf) ) 35.390 + return 0; 35.391 + 35.392 + return 1; 35.393 +} 35.394 + 35.395 +void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector) 35.396 +{ 35.397 + uint64_t spsr; 35.398 + 35.399 + if (vector & ~0xff) { 35.400 + printf("vmx_vcpu_pend_interrupt: bad vector\n"); 35.401 + return; 35.402 + } 35.403 + local_irq_save(spsr); 35.404 + VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63); 35.405 + local_irq_restore(spsr); 35.406 +} 35.407 + 35.408 +/* 35.409 + * If the new pending interrupt is enabled and not masked, we directly inject 35.410 + * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when 35.411 + * the interrupt becomes unmasked, it gets injected. 35.412 + * RETURN: 35.413 + * TRUE: Interrupt is injected. 35.414 + * FALSE: Not injected but may be in VHPI when vac.a_int=1 35.415 + * 35.416 + * Optimization: We defer setting the VHPI until the EOI time, if a higher 35.417 + * priority interrupt is in-service. The idea is to reduce the 35.418 + * number of unnecessary calls to inject_vhpi. 35.419 + */ 35.420 +int vmx_check_pending_irq(VCPU *vcpu) 35.421 +{ 35.422 + uint64_t spsr; 35.423 + int h_pending, h_inservice; 35.424 + int injected=0; 35.425 + uint64_t isr; 35.426 + IA64_PSR vpsr; 35.427 + 35.428 + local_irq_save(spsr); 35.429 + h_pending = highest_pending_irq(vcpu); 35.430 + if ( h_pending == -1 ) goto chk_irq_exit; 35.431 + h_inservice = highest_inservice_irq(vcpu); 35.432 + 35.433 + vpsr.val = vmx_vcpu_get_psr(vcpu); 35.434 + if ( vpsr.i && 35.435 + !irq_masked(vcpu, h_pending, h_inservice) ) { 35.436 + //inject_guest_irq(v); 35.437 + isr = vpsr.val & IA64_PSR_RI; 35.438 + if ( !vpsr.ic ) 35.439 + panic("Interrupt when IC=0\n"); 35.440 + vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ 35.441 + injected = 1; 35.442 + } 35.443 + else if ( VMX_VPD(vcpu,vac).a_int && 35.444 + is_higher_irq(h_pending,h_inservice) ) { 35.445 + vmx_inject_vhpi(vcpu,h_pending); 35.446 + } 35.447 + 35.448 +chk_irq_exit: 35.449 + local_irq_restore(spsr); 35.450 + return injected; 35.451 +} 35.452 + 35.453 +void guest_write_eoi(VCPU *vcpu) 35.454 +{ 35.455 + int vec; 35.456 + uint64_t spsr; 35.457 + 35.458 + vec = highest_inservice_irq(vcpu); 35.459 + if ( vec < 0 ) panic("Wrong vector to EOI\n"); 35.460 + local_irq_save(spsr); 35.461 + VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63)); 35.462 + local_irq_restore(spsr); 35.463 + VPD_CR(vcpu, eoi)=0; // overwrite the data 35.464 +} 35.465 + 35.466 +uint64_t guest_read_vivr(VCPU *vcpu) 35.467 +{ 35.468 + int vec, next, h_inservice; 35.469 + uint64_t spsr; 35.470 + 35.471 + local_irq_save(spsr); 35.472 + vec = highest_pending_irq(vcpu); 35.473 + h_inservice = highest_inservice_irq(vcpu); 35.474 + if ( vec < 0 || irq_masked(vcpu, vec, h_inservice) ) { 35.475 + local_irq_restore(spsr); 35.476 + return IA64_SPURIOUS_INT_VECTOR; 35.477 + } 35.478 + 35.479 + VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63)); 35.480 + VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63)); 35.481 + 35.482 + h_inservice = highest_inservice_irq(vcpu); 35.483 + next = highest_pending_irq(vcpu); 35.484 + if ( VMX_VPD(vcpu,vac).a_int && 35.485 + (is_higher_irq(next, h_inservice) || (next == -1)) ) 35.486 + vmx_inject_vhpi(vcpu, next); 35.487 + local_irq_restore(spsr); 35.488 + return (uint64_t)vec; 35.489 +} 35.490 + 35.491 +void vmx_inject_vhpi(VCPU *vcpu, u8 vec) 35.492 +{ 35.493 + VMX_VPD(vcpu,vhpi) = vec / 16; 35.494 + 35.495 + 35.496 + // non-maskable 35.497 + if ( vec == NMI_VECTOR ) // NMI 35.498 + VMX_VPD(vcpu,vhpi) = 32; 35.499 + else if (vec == ExtINT_VECTOR) //ExtINT 35.500 + VMX_VPD(vcpu,vhpi) = 16; 35.501 + else if (vec == -1) 35.502 + VMX_VPD(vcpu,vhpi) = 0; /* Nothing pending */ 35.503 + 35.504 + ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 35.505 + (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0); 35.506 +} 35.507 +
36.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 36.2 +++ b/xen/arch/ia64/vmmu.c Mon May 23 15:29:59 2005 +0000 36.3 @@ -0,0 +1,801 @@ 36.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 36.5 +/* 36.6 + * vmmu.c: virtual memory management unit components. 36.7 + * Copyright (c) 2005, Intel Corporation. 36.8 + * 36.9 + * This program is free software; you can redistribute it and/or modify it 36.10 + * under the terms and conditions of the GNU General Public License, 36.11 + * version 2, as published by the Free Software Foundation. 36.12 + * 36.13 + * This program is distributed in the hope it will be useful, but WITHOUT 36.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 36.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 36.16 + * more details. 36.17 + * 36.18 + * You should have received a copy of the GNU General Public License along with 36.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 36.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 36.21 + * 36.22 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 36.23 + * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) 36.24 + */ 36.25 +#include <linux/sched.h> 36.26 +#include <asm/tlb.h> 36.27 +#include <asm/gcc_intrin.h> 36.28 +#include <asm/vcpu.h> 36.29 +#include <xen/interrupt.h> 36.30 +#include <asm/vmx_vcpu.h> 36.31 +#include <asm/vmx_mm_def.h> 36.32 +#include <asm/vmx.h> 36.33 +#include <asm/hw_irq.h> 36.34 +#include <asm/vmx_pal_vsa.h> 36.35 +#include <asm/kregs.h> 36.36 + 36.37 +/* 36.38 + * Architecture ppn is in 4KB unit while XEN 36.39 + * page may be different(1<<PAGE_SHIFT). 36.40 + */ 36.41 +static inline u64 arch_ppn_to_xen_ppn(u64 appn) 36.42 +{ 36.43 + return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT; 36.44 +} 36.45 + 36.46 +static inline u64 xen_ppn_to_arch_ppn(u64 xppn) 36.47 +{ 36.48 + return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT; 36.49 +} 36.50 + 36.51 + 36.52 +/* 36.53 + * Get the machine page frame number in 16KB unit 36.54 + * Input: 36.55 + * d: 36.56 + */ 36.57 +u64 get_mfn(domid_t domid, u64 gpfn, u64 pages) 36.58 +{ 36.59 + struct domain *d; 36.60 + u64 i, xen_gppn, xen_mppn, mpfn; 36.61 + 36.62 + if ( domid == DOMID_SELF ) { 36.63 + d = current->domain; 36.64 + } 36.65 + else { 36.66 + d = find_domain_by_id(domid); 36.67 + } 36.68 + xen_gppn = arch_ppn_to_xen_ppn(gpfn); 36.69 + xen_mppn = __gpfn_to_mfn(d, xen_gppn); 36.70 +/* 36.71 + for (i=0; i<pages; i++) { 36.72 + if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) { 36.73 + return INVALID_MFN; 36.74 + } 36.75 + } 36.76 +*/ 36.77 + mpfn= xen_ppn_to_arch_ppn(xen_mppn); 36.78 + mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn); 36.79 + return mpfn; 36.80 + 36.81 +} 36.82 + 36.83 +/* 36.84 + * The VRN bits of va stand for which rr to get. 36.85 + */ 36.86 +rr_t vmmu_get_rr(VCPU *vcpu, u64 va) 36.87 +{ 36.88 + rr_t vrr; 36.89 + vmx_vcpu_get_rr(vcpu, va, &vrr.value); 36.90 + return vrr; 36.91 +} 36.92 + 36.93 + 36.94 +void recycle_message(thash_cb_t *hcb, u64 para) 36.95 +{ 36.96 + printk("hcb=%p recycled with %lx\n",hcb,para); 36.97 +} 36.98 + 36.99 + 36.100 +/* 36.101 + * Purge all guest TCs in logical processor. 36.102 + * Instead of purging all LP TCs, we should only purge 36.103 + * TCs that belong to this guest. 36.104 + */ 36.105 +void 36.106 +purge_machine_tc_by_domid(domid_t domid) 36.107 +{ 36.108 +#ifndef PURGE_GUEST_TC_ONLY 36.109 + // purge all TCs 36.110 + struct ia64_pal_retval result; 36.111 + u64 addr; 36.112 + u32 count1,count2; 36.113 + u32 stride1,stride2; 36.114 + u32 i,j; 36.115 + u64 psr; 36.116 + 36.117 + 36.118 + result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0); 36.119 + if ( result.status != 0 ) { 36.120 + panic ("PAL_PTCE_INFO failed\n"); 36.121 + } 36.122 + addr = result.v0; 36.123 + count1 = HIGH_32BITS(result.v1); 36.124 + count2 = LOW_32BITS (result.v1); 36.125 + stride1 = HIGH_32BITS(result.v2); 36.126 + stride2 = LOW_32BITS (result.v2); 36.127 + 36.128 + local_irq_save(psr); 36.129 + for (i=0; i<count1; i++) { 36.130 + for (j=0; j<count2; j++) { 36.131 + ia64_ptce(addr); 36.132 + addr += stride2; 36.133 + } 36.134 + addr += stride1; 36.135 + } 36.136 + local_irq_restore(psr); 36.137 +#else 36.138 + // purge all TCs belong to this guest. 36.139 +#endif 36.140 +} 36.141 + 36.142 +static thash_cb_t *init_domain_vhpt(struct exec_domain *d) 36.143 +{ 36.144 + struct pfn_info *page; 36.145 + void *vbase,*vcur; 36.146 + vhpt_special *vs; 36.147 + thash_cb_t *vhpt; 36.148 + PTA pta_value; 36.149 + 36.150 + page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER); 36.151 + if ( page == NULL ) { 36.152 + panic("No enough contiguous memory for init_domain_mm\n"); 36.153 + } 36.154 + vbase = page_to_virt(page); 36.155 + printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase); 36.156 + memset(vbase, 0, VCPU_TLB_SIZE); 36.157 + vcur = (void*)((u64)vbase + VCPU_TLB_SIZE); 36.158 + vhpt = --((thash_cb_t*)vcur); 36.159 + vhpt->ht = THASH_VHPT; 36.160 + vhpt->vcpu = d; 36.161 + vhpt->hash_func = machine_thash; 36.162 + vs = --((vhpt_special *)vcur); 36.163 + 36.164 + /* Setup guest pta */ 36.165 + pta_value.val = 0; 36.166 + pta_value.ve = 1; 36.167 + pta_value.vf = 1; 36.168 + pta_value.size = VCPU_TLB_SHIFT - 1; /* 2M */ 36.169 + pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT; 36.170 + d->arch.arch_vmx.mpta = pta_value.val; 36.171 + 36.172 + vhpt->vs = vs; 36.173 + vhpt->vs->get_mfn = get_mfn; 36.174 + vhpt->vs->tag_func = machine_ttag; 36.175 + vhpt->hash = vbase; 36.176 + vhpt->hash_sz = VCPU_TLB_SIZE/2; 36.177 + vhpt->cch_buf = (u64)vbase + vhpt->hash_sz; 36.178 + vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf; 36.179 + vhpt->recycle_notifier = recycle_message; 36.180 + thash_init(vhpt,VCPU_TLB_SHIFT-1); 36.181 + return vhpt; 36.182 +} 36.183 + 36.184 + 36.185 +thash_cb_t *init_domain_tlb(struct exec_domain *d) 36.186 +{ 36.187 + struct pfn_info *page; 36.188 + void *vbase,*vcur; 36.189 + tlb_special_t *ts; 36.190 + thash_cb_t *tlb; 36.191 + 36.192 + page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER); 36.193 + if ( page == NULL ) { 36.194 + panic("No enough contiguous memory for init_domain_mm\n"); 36.195 + } 36.196 + vbase = page_to_virt(page); 36.197 + printk("Allocate domain tlb at 0x%lx\n", (u64)vbase); 36.198 + memset(vbase, 0, VCPU_TLB_SIZE); 36.199 + vcur = (void*)((u64)vbase + VCPU_TLB_SIZE); 36.200 + tlb = --((thash_cb_t*)vcur); 36.201 + tlb->ht = THASH_TLB; 36.202 + tlb->vcpu = d; 36.203 + ts = --((tlb_special_t *)vcur); 36.204 + tlb->ts = ts; 36.205 + tlb->ts->vhpt = init_domain_vhpt(d); 36.206 + tlb->hash_func = machine_thash; 36.207 + tlb->hash = vbase; 36.208 + tlb->hash_sz = VCPU_TLB_SIZE/2; 36.209 + tlb->cch_buf = (u64)vbase + tlb->hash_sz; 36.210 + tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf; 36.211 + tlb->recycle_notifier = recycle_message; 36.212 + thash_init(tlb,VCPU_TLB_SHIFT-1); 36.213 + return tlb; 36.214 +} 36.215 + 36.216 +/* Allocate physical to machine mapping table for domN 36.217 + * FIXME: Later this interface may be removed, if that table is provided 36.218 + * by control panel. Dom0 has gpfn identical to mfn, which doesn't need 36.219 + * this interface at all. 36.220 + */ 36.221 +void 36.222 +alloc_pmt(struct domain *d) 36.223 +{ 36.224 + struct pfn_info *page; 36.225 + 36.226 + /* Only called once */ 36.227 + ASSERT(d->arch.pmt); 36.228 + 36.229 + page = alloc_domheap_pages(NULL, get_order(d->max_pages)); 36.230 + ASSERT(page); 36.231 + 36.232 + d->arch.pmt = page_to_virt(page); 36.233 + memset(d->arch.pmt, 0x55, d->max_pages * 8); 36.234 +} 36.235 + 36.236 +/* 36.237 + * Insert guest TLB to machine TLB. 36.238 + * data: In TLB format 36.239 + */ 36.240 +void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb) 36.241 +{ 36.242 + u64 saved_itir, saved_ifa, saved_rr; 36.243 + u64 pages; 36.244 + thash_data_t mtlb; 36.245 + rr_t vrr; 36.246 + unsigned int cl = tlb->cl; 36.247 + 36.248 + mtlb.ifa = tlb->vadr; 36.249 + mtlb.itir = tlb->itir & ~ITIR_RV_MASK; 36.250 + vrr = vmmu_get_rr(d,mtlb.ifa); 36.251 + //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value); 36.252 + pages = PSIZE(vrr.ps) >> PAGE_SHIFT; 36.253 + mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; 36.254 + mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages); 36.255 + if (mtlb.ppn == INVALID_MFN) 36.256 + panic("Machine tlb insert with invalid mfn number.\n"); 36.257 + 36.258 + __asm __volatile("rsm psr.ic|psr.i;; srlz.i" ); 36.259 + 36.260 + saved_itir = ia64_getreg(_IA64_REG_CR_ITIR); 36.261 + saved_ifa = ia64_getreg(_IA64_REG_CR_IFA); 36.262 + saved_rr = ia64_get_rr(mtlb.ifa); 36.263 + 36.264 + ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir); 36.265 + ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa); 36.266 + /* Only access memory stack which is mapped by TR, 36.267 + * after rr is switched. 36.268 + */ 36.269 + ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value)); 36.270 + ia64_srlz_d(); 36.271 + if ( cl == ISIDE_TLB ) { 36.272 + ia64_itci(mtlb.page_flags); 36.273 + ia64_srlz_i(); 36.274 + } 36.275 + else { 36.276 + ia64_itcd(mtlb.page_flags); 36.277 + ia64_srlz_d(); 36.278 + } 36.279 + ia64_set_rr(mtlb.ifa,saved_rr); 36.280 + ia64_srlz_d(); 36.281 + ia64_setreg(_IA64_REG_CR_IFA, saved_ifa); 36.282 + ia64_setreg(_IA64_REG_CR_ITIR, saved_itir); 36.283 + __asm __volatile("ssm psr.ic|psr.i;; srlz.i" ); 36.284 +} 36.285 + 36.286 +u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps) 36.287 +{ 36.288 + u64 saved_pta, saved_rr0; 36.289 + u64 hash_addr, tag; 36.290 + unsigned long psr; 36.291 + struct exec_domain *ed = current; 36.292 + rr_t vrr; 36.293 + 36.294 + 36.295 + saved_pta = ia64_getreg(_IA64_REG_CR_PTA); 36.296 + saved_rr0 = ia64_get_rr(0); 36.297 + vrr.value = saved_rr0; 36.298 + vrr.rid = rid; 36.299 + vrr.ps = ps; 36.300 + 36.301 + va = (va << 3) >> 3; // set VRN to 0. 36.302 + // TODO: Set to enforce lazy mode 36.303 + local_irq_save(psr); 36.304 + ia64_setreg(_IA64_REG_CR_PTA, pta.val); 36.305 + ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value)); 36.306 + ia64_srlz_d(); 36.307 + 36.308 + hash_addr = ia64_thash(va); 36.309 + ia64_setreg(_IA64_REG_CR_PTA, saved_pta); 36.310 + 36.311 + ia64_set_rr(0, saved_rr0); 36.312 + ia64_srlz_d(); 36.313 + local_irq_restore(psr); 36.314 + return hash_addr; 36.315 +} 36.316 + 36.317 +u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps) 36.318 +{ 36.319 + u64 saved_pta, saved_rr0; 36.320 + u64 hash_addr, tag; 36.321 + u64 psr; 36.322 + struct exec_domain *ed = current; 36.323 + rr_t vrr; 36.324 + 36.325 + // TODO: Set to enforce lazy mode 36.326 + saved_pta = ia64_getreg(_IA64_REG_CR_PTA); 36.327 + saved_rr0 = ia64_get_rr(0); 36.328 + vrr.value = saved_rr0; 36.329 + vrr.rid = rid; 36.330 + vrr.ps = ps; 36.331 + 36.332 + va = (va << 3) >> 3; // set VRN to 0. 36.333 + local_irq_save(psr); 36.334 + ia64_setreg(_IA64_REG_CR_PTA, pta.val); 36.335 + ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value)); 36.336 + ia64_srlz_d(); 36.337 + 36.338 + tag = ia64_ttag(va); 36.339 + ia64_setreg(_IA64_REG_CR_PTA, saved_pta); 36.340 + 36.341 + ia64_set_rr(0, saved_rr0); 36.342 + ia64_srlz_d(); 36.343 + local_irq_restore(psr); 36.344 + return tag; 36.345 +} 36.346 + 36.347 +/* 36.348 + * Purge machine tlb. 36.349 + * INPUT 36.350 + * rr: guest rr. 36.351 + * va: only bits 0:60 is valid 36.352 + * size: bits format (1<<size) for the address range to purge. 36.353 + * 36.354 + */ 36.355 +void machine_tlb_purge(u64 rid, u64 va, u64 ps) 36.356 +{ 36.357 + u64 saved_rr0; 36.358 + u64 psr; 36.359 + rr_t vrr; 36.360 + 36.361 + va = (va << 3) >> 3; // set VRN to 0. 36.362 + saved_rr0 = ia64_get_rr(0); 36.363 + vrr.value = saved_rr0; 36.364 + vrr.rid = rid; 36.365 + vrr.ps = ps; 36.366 + local_irq_save(psr); 36.367 + ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) ); 36.368 + ia64_srlz_d(); 36.369 + ia64_ptcl(va, ps << 2); 36.370 + ia64_set_rr( 0, saved_rr0 ); 36.371 + ia64_srlz_d(); 36.372 + local_irq_restore(psr); 36.373 +} 36.374 + 36.375 + 36.376 +int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref) 36.377 +{ 36.378 + ia64_rr vrr; 36.379 + PTA vpta; 36.380 + IA64_PSR vpsr; 36.381 + 36.382 + vpsr.val = vmx_vcpu_get_psr(vcpu); 36.383 + vrr = vmx_vcpu_rr(vcpu, vadr); 36.384 + vmx_vcpu_get_pta(vcpu,&vpta.val); 36.385 + 36.386 + if ( vrr.ve & vpta.ve ) { 36.387 + switch ( ref ) { 36.388 + case DATA_REF: 36.389 + case NA_REF: 36.390 + return vpsr.dt; 36.391 + case INST_REF: 36.392 + return vpsr.dt && vpsr.it && vpsr.ic; 36.393 + case RSE_REF: 36.394 + return vpsr.dt && vpsr.rt; 36.395 + 36.396 + } 36.397 + } 36.398 + return 0; 36.399 +} 36.400 + 36.401 + 36.402 +int unimplemented_gva(VCPU *vcpu,u64 vadr) 36.403 +{ 36.404 + int bit=vcpu->domain->arch.imp_va_msb; 36.405 + u64 ladr =(vadr<<3)>>(3+bit); 36.406 + if(!ladr||ladr==(1U<<(61-bit))-1){ 36.407 + return 0; 36.408 + }else{ 36.409 + return 1; 36.410 + } 36.411 +} 36.412 + 36.413 + 36.414 +/* 36.415 + * Prefetch guest bundle code. 36.416 + * INPUT: 36.417 + * code: buffer pointer to hold the read data. 36.418 + * num: number of dword (8byts) to read. 36.419 + */ 36.420 +int 36.421 +fetch_code(VCPU *vcpu, u64 gip, u64 *code) 36.422 +{ 36.423 + u64 gpip; // guest physical IP 36.424 + u64 mpa; 36.425 + thash_data_t *tlb; 36.426 + rr_t vrr; 36.427 + u64 mfn; 36.428 + 36.429 + if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode 36.430 + gpip = gip; 36.431 + } 36.432 + else { 36.433 + vmx_vcpu_get_rr(vcpu, gip, &vrr.value); 36.434 + tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 36.435 + vrr.rid, gip, ISIDE_TLB ); 36.436 + if ( tlb == NULL ) panic("No entry found in ITLB\n"); 36.437 + gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) ); 36.438 + } 36.439 + mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT); 36.440 + if ( mfn == INVALID_MFN ) return 0; 36.441 + 36.442 + mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT); 36.443 + *code = *(u64*)__va(mpa); 36.444 + return 1; 36.445 +} 36.446 + 36.447 +IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa) 36.448 +{ 36.449 + 36.450 + thash_data_t data, *ovl; 36.451 + thash_cb_t *hcb; 36.452 + search_section_t sections; 36.453 + rr_t vrr; 36.454 + 36.455 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.456 + data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 36.457 + data.itir=itir; 36.458 + data.vadr=PAGEALIGN(ifa,data.ps); 36.459 + data.section=THASH_TLB_TC; 36.460 + data.cl=ISIDE_TLB; 36.461 + vmx_vcpu_get_rr(vcpu, ifa, &vrr); 36.462 + data.rid = vrr.rid; 36.463 + 36.464 + sections.v = THASH_SECTION_TR; 36.465 + 36.466 + ovl = thash_find_overlap(hcb, &data, sections); 36.467 + while (ovl) { 36.468 + // generate MCA. 36.469 + panic("Tlb conflict!!"); 36.470 + return; 36.471 + } 36.472 + sections.v = THASH_SECTION_TC; 36.473 + thash_purge_entries(hcb, &data, sections); 36.474 + thash_insert(hcb, &data, ifa); 36.475 + return IA64_NO_FAULT; 36.476 +} 36.477 + 36.478 + 36.479 + 36.480 + 36.481 +IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa) 36.482 +{ 36.483 + 36.484 + thash_data_t data, *ovl; 36.485 + thash_cb_t *hcb; 36.486 + search_section_t sections; 36.487 + rr_t vrr; 36.488 + 36.489 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.490 + data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 36.491 + data.itir=itir; 36.492 + data.vadr=PAGEALIGN(ifa,data.ps); 36.493 + data.section=THASH_TLB_TC; 36.494 + data.cl=DSIDE_TLB; 36.495 + vmx_vcpu_get_rr(vcpu, ifa, &vrr); 36.496 + data.rid = vrr.rid; 36.497 + sections.v = THASH_SECTION_TR; 36.498 + 36.499 + ovl = thash_find_overlap(hcb, &data, sections); 36.500 + if (ovl) { 36.501 + // generate MCA. 36.502 + panic("Tlb conflict!!"); 36.503 + return; 36.504 + } 36.505 + sections.v = THASH_SECTION_TC; 36.506 + thash_purge_entries(hcb, &data, sections); 36.507 + thash_insert(hcb, &data, ifa); 36.508 + return IA64_NO_FAULT; 36.509 +} 36.510 + 36.511 +IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va) 36.512 +{ 36.513 + 36.514 + thash_data_t data, *ovl; 36.515 + thash_cb_t *hcb; 36.516 + search_section_t sections; 36.517 + rr_t vrr; 36.518 + 36.519 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.520 + data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 36.521 + data.itir=0; 36.522 + data.ps = ps; 36.523 + data.vadr=PAGEALIGN(va,ps); 36.524 + data.section=THASH_TLB_FM; 36.525 + data.cl=DSIDE_TLB; 36.526 + vmx_vcpu_get_rr(vcpu, va, &vrr); 36.527 + data.rid = vrr.rid; 36.528 + sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM; 36.529 + 36.530 + ovl = thash_find_overlap(hcb, &data, sections); 36.531 + if (ovl) { 36.532 + // generate MCA. 36.533 + panic("Foreignmap Tlb conflict!!"); 36.534 + return; 36.535 + } 36.536 + thash_insert(hcb, &data, va); 36.537 + return IA64_NO_FAULT; 36.538 +} 36.539 + 36.540 + 36.541 +IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx) 36.542 +{ 36.543 + 36.544 + thash_data_t data, *ovl; 36.545 + thash_cb_t *hcb; 36.546 + search_section_t sections; 36.547 + rr_t vrr; 36.548 + 36.549 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.550 + data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 36.551 + data.itir=itir; 36.552 + data.vadr=PAGEALIGN(ifa,data.ps); 36.553 + data.section=THASH_TLB_TR; 36.554 + data.cl=ISIDE_TLB; 36.555 + vmx_vcpu_get_rr(vcpu, ifa, &vrr); 36.556 + data.rid = vrr.rid; 36.557 + sections.v = THASH_SECTION_TR; 36.558 + 36.559 + ovl = thash_find_overlap(hcb, &data, sections); 36.560 + if (ovl) { 36.561 + // generate MCA. 36.562 + panic("Tlb conflict!!"); 36.563 + return; 36.564 + } 36.565 + sections.v=THASH_SECTION_TC; 36.566 + thash_purge_entries(hcb, &data, sections); 36.567 + thash_tr_insert(hcb, &data, ifa, idx); 36.568 + return IA64_NO_FAULT; 36.569 +} 36.570 + 36.571 +IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx) 36.572 +{ 36.573 + 36.574 + thash_data_t data, *ovl; 36.575 + thash_cb_t *hcb; 36.576 + search_section_t sections; 36.577 + rr_t vrr; 36.578 + 36.579 + 36.580 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.581 + data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 36.582 + data.itir=itir; 36.583 + data.vadr=PAGEALIGN(ifa,data.ps); 36.584 + data.section=THASH_TLB_TR; 36.585 + data.cl=DSIDE_TLB; 36.586 + vmx_vcpu_get_rr(vcpu, ifa, &vrr); 36.587 + data.rid = vrr.rid; 36.588 + sections.v = THASH_SECTION_TR; 36.589 + 36.590 + ovl = thash_find_overlap(hcb, &data, sections); 36.591 + while (ovl) { 36.592 + // generate MCA. 36.593 + panic("Tlb conflict!!"); 36.594 + return; 36.595 + } 36.596 + sections.v=THASH_SECTION_TC; 36.597 + thash_purge_entries(hcb, &data, sections); 36.598 + thash_tr_insert(hcb, &data, ifa, idx); 36.599 + return IA64_NO_FAULT; 36.600 +} 36.601 + 36.602 + 36.603 + 36.604 +IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps) 36.605 +{ 36.606 + thash_cb_t *hcb; 36.607 + ia64_rr rr; 36.608 + search_section_t sections; 36.609 + 36.610 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.611 + rr=vmx_vcpu_rr(vcpu,vadr); 36.612 + sections.v = THASH_SECTION_TR | THASH_SECTION_TC; 36.613 + thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB); 36.614 + return IA64_NO_FAULT; 36.615 +} 36.616 + 36.617 +IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps) 36.618 +{ 36.619 + thash_cb_t *hcb; 36.620 + ia64_rr rr; 36.621 + search_section_t sections; 36.622 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.623 + rr=vmx_vcpu_rr(vcpu,vadr); 36.624 + sections.v = THASH_SECTION_TR | THASH_SECTION_TC; 36.625 + thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB); 36.626 + return IA64_NO_FAULT; 36.627 +} 36.628 + 36.629 +IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps) 36.630 +{ 36.631 + thash_cb_t *hcb; 36.632 + ia64_rr vrr; 36.633 + search_section_t sections; 36.634 + thash_data_t data, *ovl; 36.635 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.636 + vrr=vmx_vcpu_rr(vcpu,vadr); 36.637 + sections.v = THASH_SECTION_TC; 36.638 + vadr = PAGEALIGN(vadr, ps); 36.639 + 36.640 + thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB); 36.641 + thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB); 36.642 + return IA64_NO_FAULT; 36.643 +} 36.644 + 36.645 + 36.646 +IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr) 36.647 +{ 36.648 + thash_cb_t *hcb; 36.649 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.650 + thash_purge_all(hcb); 36.651 + return IA64_NO_FAULT; 36.652 +} 36.653 + 36.654 +IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps) 36.655 +{ 36.656 + vmx_vcpu_ptc_l(vcpu, vadr, ps); 36.657 + return IA64_ILLOP_FAULT; 36.658 +} 36.659 + 36.660 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps) 36.661 +{ 36.662 + vmx_vcpu_ptc_l(vcpu, vadr, ps); 36.663 + return IA64_NO_FAULT; 36.664 +} 36.665 + 36.666 + 36.667 +IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval) 36.668 +{ 36.669 + PTA vpta; 36.670 + ia64_rr vrr; 36.671 + u64 vhpt_offset,tmp; 36.672 + vmx_vcpu_get_pta(vcpu, &vpta.val); 36.673 + vrr=vmx_vcpu_rr(vcpu, vadr); 36.674 + if(vpta.vf){ 36.675 + panic("THASH,Don't support long format VHPT"); 36.676 + *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0); 36.677 + }else{ 36.678 + vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1); 36.679 + *pval = (vadr&VRN_MASK)| 36.680 + (vpta.val<<3>>(vpta.size+3)<<(vpta.size))| 36.681 + vhpt_offset; 36.682 + } 36.683 + return IA64_NO_FAULT; 36.684 +} 36.685 + 36.686 + 36.687 +IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval) 36.688 +{ 36.689 + ia64_rr vrr; 36.690 + PTA vpta; 36.691 + vmx_vcpu_get_pta(vcpu, &vpta.val); 36.692 + vrr=vmx_vcpu_rr(vcpu, vadr); 36.693 + if(vpta.vf){ 36.694 + panic("THASH,Don't support long format VHPT"); 36.695 + *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0); 36.696 + }else{ 36.697 + *pval = 1; 36.698 + } 36.699 + return IA64_NO_FAULT; 36.700 +} 36.701 + 36.702 + 36.703 + 36.704 +IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr) 36.705 +{ 36.706 + thash_data_t *data; 36.707 + thash_cb_t *hcb; 36.708 + ia64_rr vrr; 36.709 + ISR visr,pt_isr; 36.710 + REGS *regs; 36.711 + u64 vhpt_adr; 36.712 + IA64_PSR vpsr; 36.713 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.714 + vrr=vmx_vcpu_rr(vcpu,vadr); 36.715 + regs=vcpu_regs(vcpu); 36.716 + pt_isr.val=regs->cr_isr; 36.717 + visr.val=0; 36.718 + visr.ei=pt_isr.ei; 36.719 + visr.ir=pt_isr.ir; 36.720 + vpsr.val = vmx_vcpu_get_psr(vcpu); 36.721 + if(vpsr.ic==0){ 36.722 + visr.ni=1; 36.723 + } 36.724 + visr.na=1; 36.725 + data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB); 36.726 + if(data){ 36.727 + if(data->p==0){ 36.728 + visr.na=1; 36.729 + vmx_vcpu_set_isr(vcpu,visr.val); 36.730 + page_not_present(vcpu, vadr); 36.731 + return IA64_FAULT; 36.732 + }else if(data->ma == VA_MATTR_NATPAGE){ 36.733 + visr.na = 1; 36.734 + vmx_vcpu_set_isr(vcpu, visr.val); 36.735 + dnat_page_consumption(vcpu, vadr); 36.736 + return IA64_FAULT; 36.737 + }else{ 36.738 + *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1)); 36.739 + return IA64_NO_FAULT; 36.740 + } 36.741 + }else{ 36.742 + if(!vhpt_enabled(vcpu, vadr, NA_REF)){ 36.743 + if(vpsr.ic){ 36.744 + vmx_vcpu_set_isr(vcpu, visr.val); 36.745 + alt_dtlb(vcpu, vadr); 36.746 + return IA64_FAULT; 36.747 + } 36.748 + else{ 36.749 + nested_dtlb(vcpu); 36.750 + return IA64_FAULT; 36.751 + } 36.752 + } 36.753 + else{ 36.754 + vmx_vcpu_thash(vcpu, vadr, &vhpt_adr); 36.755 + vrr=vmx_vcpu_rr(vcpu,vhpt_adr); 36.756 + data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB); 36.757 + if(data){ 36.758 + if(vpsr.ic){ 36.759 + vmx_vcpu_set_isr(vcpu, visr.val); 36.760 + dtlb_fault(vcpu, vadr); 36.761 + return IA64_FAULT; 36.762 + } 36.763 + else{ 36.764 + nested_dtlb(vcpu); 36.765 + return IA64_FAULT; 36.766 + } 36.767 + } 36.768 + else{ 36.769 + if(vpsr.ic){ 36.770 + vmx_vcpu_set_isr(vcpu, visr.val); 36.771 + dvhpt_fault(vcpu, vadr); 36.772 + return IA64_FAULT; 36.773 + } 36.774 + else{ 36.775 + nested_dtlb(vcpu); 36.776 + return IA64_FAULT; 36.777 + } 36.778 + } 36.779 + } 36.780 + } 36.781 +} 36.782 + 36.783 +IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key) 36.784 +{ 36.785 + thash_data_t *data; 36.786 + thash_cb_t *hcb; 36.787 + ia64_rr rr; 36.788 + PTA vpta; 36.789 + vmx_vcpu_get_pta(vcpu, &vpta.val); 36.790 + if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){ 36.791 + *key=1; 36.792 + return IA64_NO_FAULT; 36.793 + } 36.794 + hcb = vmx_vcpu_get_vtlb(vcpu); 36.795 + rr=vmx_vcpu_rr(vcpu,vadr); 36.796 + data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB); 36.797 + if(!data||!data->p){ 36.798 + *key=1; 36.799 + }else{ 36.800 + *key=data->key; 36.801 + } 36.802 + return IA64_NO_FAULT; 36.803 +} 36.804 +
37.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 37.2 +++ b/xen/arch/ia64/vmx_entry.S Mon May 23 15:29:59 2005 +0000 37.3 @@ -0,0 +1,611 @@ 37.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 37.5 +/* 37.6 + * vmx_entry.S: 37.7 + * Copyright (c) 2005, Intel Corporation. 37.8 + * 37.9 + * This program is free software; you can redistribute it and/or modify it 37.10 + * under the terms and conditions of the GNU General Public License, 37.11 + * version 2, as published by the Free Software Foundation. 37.12 + * 37.13 + * This program is distributed in the hope it will be useful, but WITHOUT 37.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 37.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 37.16 + * more details. 37.17 + * 37.18 + * You should have received a copy of the GNU General Public License along with 37.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 37.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 37.21 + * 37.22 + * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) 37.23 + * Kun Tian (Kevin Tian) (kevin.tian@intel.com) 37.24 + */ 37.25 + 37.26 +#ifndef VCPU_TLB_SHIFT 37.27 +#define VCPU_TLB_SHIFT 22 37.28 +#endif 37.29 +#include <linux/config.h> 37.30 +#include <asm/asmmacro.h> 37.31 +#include <asm/cache.h> 37.32 +#include <asm/kregs.h> 37.33 +#include <asm/offsets.h> 37.34 +#include <asm/pgtable.h> 37.35 +#include <asm/percpu.h> 37.36 +#include <asm/processor.h> 37.37 +#include <asm/thread_info.h> 37.38 +#include <asm/unistd.h> 37.39 + 37.40 +#include "vmx_minstate.h" 37.41 + 37.42 +/* 37.43 + * prev_task <- vmx_ia64_switch_to(struct task_struct *next) 37.44 + * With Ingo's new scheduler, interrupts are disabled when this routine gets 37.45 + * called. The code starting at .map relies on this. The rest of the code 37.46 + * doesn't care about the interrupt masking status. 37.47 + * 37.48 + * Since we allocate domain stack in xenheap, there's no need to map new 37.49 + * domain's stack since all xenheap is mapped by TR. Another different task 37.50 + * for vmx_ia64_switch_to is to switch to bank0 and change current pointer. 37.51 + */ 37.52 +GLOBAL_ENTRY(vmx_ia64_switch_to) 37.53 + .prologue 37.54 + alloc r16=ar.pfs,1,0,0,0 37.55 + DO_SAVE_SWITCH_STACK 37.56 + .body 37.57 + 37.58 + bsw.0 // Switch to bank0, because bank0 r21 is current pointer 37.59 + ;; 37.60 + adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 37.61 + movl r25=init_task 37.62 + adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0 37.63 + ;; 37.64 + st8 [r22]=sp // save kernel stack pointer of old task 37.65 + ;; 37.66 + /* 37.67 + * TR always mapped this task's page, we can skip doing it again. 37.68 + */ 37.69 + ld8 sp=[r26] // load kernel stack pointer of new task 37.70 + mov r21=in0 // update "current" application register 37.71 + mov r8=r13 // return pointer to previously running task 37.72 + mov r13=in0 // set "current" pointer 37.73 + ;; 37.74 + bsw.1 37.75 + ;; 37.76 + DO_LOAD_SWITCH_STACK 37.77 + 37.78 +#ifdef CONFIG_SMP 37.79 + sync.i // ensure "fc"s done by this CPU are visible on other CPUs 37.80 +#endif 37.81 + br.ret.sptk.many rp // boogie on out in new context 37.82 +END(vmx_ia64_switch_to) 37.83 + 37.84 +GLOBAL_ENTRY(ia64_leave_nested) 37.85 + rsm psr.i 37.86 + ;; 37.87 + adds r21=PT(PR)+16,r12 37.88 + ;; 37.89 + 37.90 + lfetch [r21],PT(CR_IPSR)-PT(PR) 37.91 + adds r2=PT(B6)+16,r12 37.92 + adds r3=PT(R16)+16,r12 37.93 + ;; 37.94 + lfetch [r21] 37.95 + ld8 r28=[r2],8 // load b6 37.96 + adds r29=PT(R24)+16,r12 37.97 + 37.98 + ld8.fill r16=[r3] 37.99 + adds r3=PT(AR_CSD)-PT(R16),r3 37.100 + adds r30=PT(AR_CCV)+16,r12 37.101 + ;; 37.102 + ld8.fill r24=[r29] 37.103 + ld8 r15=[r30] // load ar.ccv 37.104 + ;; 37.105 + ld8 r29=[r2],16 // load b7 37.106 + ld8 r30=[r3],16 // load ar.csd 37.107 + ;; 37.108 + ld8 r31=[r2],16 // load ar.ssd 37.109 + ld8.fill r8=[r3],16 37.110 + ;; 37.111 + ld8.fill r9=[r2],16 37.112 + ld8.fill r10=[r3],PT(R17)-PT(R10) 37.113 + ;; 37.114 + ld8.fill r11=[r2],PT(R18)-PT(R11) 37.115 + ld8.fill r17=[r3],16 37.116 + ;; 37.117 + ld8.fill r18=[r2],16 37.118 + ld8.fill r19=[r3],16 37.119 + ;; 37.120 + ld8.fill r20=[r2],16 37.121 + ld8.fill r21=[r3],16 37.122 + mov ar.csd=r30 37.123 + mov ar.ssd=r31 37.124 + ;; 37.125 + rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 37.126 + invala // invalidate ALAT 37.127 + ;; 37.128 + ld8.fill r22=[r2],24 37.129 + ld8.fill r23=[r3],24 37.130 + mov b6=r28 37.131 + ;; 37.132 + ld8.fill r25=[r2],16 37.133 + ld8.fill r26=[r3],16 37.134 + mov b7=r29 37.135 + ;; 37.136 + ld8.fill r27=[r2],16 37.137 + ld8.fill r28=[r3],16 37.138 + ;; 37.139 + ld8.fill r29=[r2],16 37.140 + ld8.fill r30=[r3],24 37.141 + ;; 37.142 + ld8.fill r31=[r2],PT(F9)-PT(R31) 37.143 + adds r3=PT(F10)-PT(F6),r3 37.144 + ;; 37.145 + ldf.fill f9=[r2],PT(F6)-PT(F9) 37.146 + ldf.fill f10=[r3],PT(F8)-PT(F10) 37.147 + ;; 37.148 + ldf.fill f6=[r2],PT(F7)-PT(F6) 37.149 + ;; 37.150 + ldf.fill f7=[r2],PT(F11)-PT(F7) 37.151 + ldf.fill f8=[r3],32 37.152 + ;; 37.153 + srlz.i // ensure interruption collection is off 37.154 + mov ar.ccv=r15 37.155 + ;; 37.156 + bsw.0 // switch back to bank 0 (no stop bit required beforehand...) 37.157 + ;; 37.158 + ldf.fill f11=[r2] 37.159 +// mov r18=r13 37.160 +// mov r21=r13 37.161 + adds r16=PT(CR_IPSR)+16,r12 37.162 + adds r17=PT(CR_IIP)+16,r12 37.163 + ;; 37.164 + ld8 r29=[r16],16 // load cr.ipsr 37.165 + ld8 r28=[r17],16 // load cr.iip 37.166 + ;; 37.167 + ld8 r30=[r16],16 // load cr.ifs 37.168 + ld8 r25=[r17],16 // load ar.unat 37.169 + ;; 37.170 + ld8 r26=[r16],16 // load ar.pfs 37.171 + ld8 r27=[r17],16 // load ar.rsc 37.172 + cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs 37.173 + ;; 37.174 + ld8 r24=[r16],16 // load ar.rnat (may be garbage) 37.175 + ld8 r23=[r17],16// load ar.bspstore (may be garbage) 37.176 + ;; 37.177 + ld8 r31=[r16],16 // load predicates 37.178 + ld8 r22=[r17],16 // load b0 37.179 + ;; 37.180 + ld8 r19=[r16],16 // load ar.rsc value for "loadrs" 37.181 + ld8.fill r1=[r17],16 // load r1 37.182 + ;; 37.183 + ld8.fill r12=[r16],16 37.184 + ld8.fill r13=[r17],16 37.185 + ;; 37.186 + ld8 r20=[r16],16 // ar.fpsr 37.187 + ld8.fill r15=[r17],16 37.188 + ;; 37.189 + ld8.fill r14=[r16],16 37.190 + ld8.fill r2=[r17] 37.191 + ;; 37.192 + ld8.fill r3=[r16] 37.193 + ;; 37.194 + mov r16=ar.bsp // get existing backing store pointer 37.195 + ;; 37.196 + mov b0=r22 37.197 + mov ar.pfs=r26 37.198 + mov cr.ifs=r30 37.199 + mov cr.ipsr=r29 37.200 + mov ar.fpsr=r20 37.201 + mov cr.iip=r28 37.202 + ;; 37.203 + mov ar.rsc=r27 37.204 + mov ar.unat=r25 37.205 + mov pr=r31,-1 37.206 + rfi 37.207 +END(ia64_leave_nested) 37.208 + 37.209 + 37.210 + 37.211 +GLOBAL_ENTRY(ia64_leave_hypervisor) 37.212 + PT_REGS_UNWIND_INFO(0) 37.213 + /* 37.214 + * work.need_resched etc. mustn't get changed by this CPU before it returns to 37.215 + ;; 37.216 + * user- or fsys-mode, hence we disable interrupts early on: 37.217 + */ 37.218 + rsm psr.i 37.219 + ;; 37.220 + alloc loc0=ar.pfs,0,1,1,0 37.221 + adds out0=16,r12 37.222 + ;; 37.223 + br.call.sptk.many b0=vmx_deliver_pending_interrupt 37.224 + mov ar.pfs=loc0 37.225 + adds r8=IA64_VPD_BASE_OFFSET,r13 37.226 + ;; 37.227 + ld8 r8=[r8] 37.228 + ;; 37.229 + adds r9=VPD(VPSR),r8 37.230 + ;; 37.231 + ld8 r9=[r9] 37.232 + ;; 37.233 + tbit.z pBN0,pBN1=r9,IA64_PSR_BN_BIT 37.234 + ;; 37.235 +(pBN0) add r7=VPD(VBNAT),r8; 37.236 +(pBN1) add r7=VPD(VNAT),r8; 37.237 + ;; 37.238 + ld8 r7=[r7] 37.239 + ;; 37.240 + mov ar.unat=r7 37.241 +(pBN0) add r4=VPD(VBGR),r8; 37.242 +(pBN1) add r4=VPD(VGR),r8; 37.243 +(pBN0) add r5=VPD(VBGR)+0x8,r8; 37.244 +(pBN1) add r5=VPD(VGR)+0x8,r8; 37.245 + ;; 37.246 + ld8.fill r16=[r4],16 37.247 + ld8.fill r17=[r5],16 37.248 + ;; 37.249 + ld8.fill r18=[r4],16 37.250 + ld8.fill r19=[r5],16 37.251 + ;; 37.252 + ld8.fill r20=[r4],16 37.253 + ld8.fill r21=[r5],16 37.254 + ;; 37.255 + ld8.fill r22=[r4],16 37.256 + ld8.fill r23=[r5],16 37.257 + ;; 37.258 + ld8.fill r24=[r4],16 37.259 + ld8.fill r25=[r5],16 37.260 + ;; 37.261 + ld8.fill r26=[r4],16 37.262 + ld8.fill r27=[r5],16 37.263 + ;; 37.264 + ld8.fill r28=[r4],16 37.265 + ld8.fill r29=[r5],16 37.266 + ;; 37.267 + ld8.fill r30=[r4],16 37.268 + ld8.fill r31=[r5],16 37.269 + ;; 37.270 + bsw.0 37.271 + ;; 37.272 + mov r18=r8 //vpd 37.273 + mov r19=r9 //vpsr 37.274 + adds r20=PT(PR)+16,r12 37.275 + ;; 37.276 + lfetch [r20],PT(CR_IPSR)-PT(PR) 37.277 + adds r16=PT(B6)+16,r12 37.278 + adds r17=PT(B7)+16,r12 37.279 + ;; 37.280 + lfetch [r20] 37.281 + mov r21=r13 // get current 37.282 + ;; 37.283 + ld8 r30=[r16],16 // load b6 37.284 + ld8 r31=[r17],16 // load b7 37.285 + add r20=PT(EML_UNAT)+16,r12 37.286 + ;; 37.287 + ld8 r29=[r20] //load ar_unat 37.288 + mov b6=r30 37.289 + mov b7=r31 37.290 + ld8 r30=[r16],16 //load ar_csd 37.291 + ld8 r31=[r17],16 //load ar_ssd 37.292 + ;; 37.293 + mov ar.unat=r29 37.294 + mov ar.csd=r30 37.295 + mov ar.ssd=r31 37.296 + ;; 37.297 + ld8.fill r8=[r16],16 //load r8 37.298 + ld8.fill r9=[r17],16 //load r9 37.299 + ;; 37.300 + ld8.fill r10=[r16],PT(R1)-PT(R10) //load r10 37.301 + ld8.fill r11=[r17],PT(R12)-PT(R11) //load r11 37.302 + ;; 37.303 + ld8.fill r1=[r16],16 //load r1 37.304 + ld8.fill r12=[r17],16 //load r12 37.305 + ;; 37.306 + ld8.fill r13=[r16],16 //load r13 37.307 + ld8 r30=[r17],16 //load ar_fpsr 37.308 + ;; 37.309 + ld8.fill r15=[r16],16 //load r15 37.310 + ld8.fill r14=[r17],16 //load r14 37.311 + mov ar.fpsr=r30 37.312 + ;; 37.313 + ld8.fill r2=[r16],16 //load r2 37.314 + ld8.fill r3=[r17],16 //load r3 37.315 + ;; 37.316 +/* 37.317 +(pEml) ld8.fill r4=[r16],16 //load r4 37.318 +(pEml) ld8.fill r5=[r17],16 //load r5 37.319 + ;; 37.320 +(pEml) ld8.fill r6=[r16],PT(AR_CCV)-PT(R6) //load r6 37.321 +(pEml) ld8.fill r7=[r17],PT(F7)-PT(R7) //load r7 37.322 + ;; 37.323 +(pNonEml) adds r16=PT(AR_CCV)-PT(R4),r16 37.324 +(pNonEml) adds r17=PT(F7)-PT(R5),r17 37.325 + ;; 37.326 +*/ 37.327 + ld8.fill r4=[r16],16 //load r4 37.328 + ld8.fill r5=[r17],16 //load r5 37.329 + ;; 37.330 + ld8.fill r6=[r16],PT(AR_CCV)-PT(R6) //load r6 37.331 + ld8.fill r7=[r17],PT(F7)-PT(R7) //load r7 37.332 + ;; 37.333 + 37.334 + ld8 r30=[r16],PT(F6)-PT(AR_CCV) 37.335 + rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 37.336 + ;; 37.337 + srlz.i // ensure interruption collection is off 37.338 + ;; 37.339 + invala // invalidate ALAT 37.340 + ;; 37.341 + ldf.fill f6=[r16],32 37.342 + ldf.fill f7=[r17],32 37.343 + ;; 37.344 + ldf.fill f8=[r16],32 37.345 + ldf.fill f9=[r17],32 37.346 + ;; 37.347 + ldf.fill f10=[r16] 37.348 + ldf.fill f11=[r17] 37.349 + ;; 37.350 + mov ar.ccv=r30 37.351 + adds r16=PT(CR_IPSR)-PT(F10),r16 37.352 + adds r17=PT(CR_IIP)-PT(F11),r17 37.353 + ;; 37.354 + ld8 r31=[r16],16 // load cr.ipsr 37.355 + ld8 r30=[r17],16 // load cr.iip 37.356 + ;; 37.357 + ld8 r29=[r16],16 // load cr.ifs 37.358 + ld8 r28=[r17],16 // load ar.unat 37.359 + ;; 37.360 + ld8 r27=[r16],16 // load ar.pfs 37.361 + ld8 r26=[r17],16 // load ar.rsc 37.362 + ;; 37.363 + ld8 r25=[r16],16 // load ar.rnat (may be garbage) 37.364 + ld8 r24=[r17],16// load ar.bspstore (may be garbage) 37.365 + ;; 37.366 + ld8 r23=[r16],16 // load predicates 37.367 + ld8 r22=[r17],PT(RFI_PFS)-PT(B0) // load b0 37.368 + ;; 37.369 + ld8 r20=[r16],16 // load ar.rsc value for "loadrs" 37.370 + ;; 37.371 +//rbs_switch 37.372 + // loadrs has already been shifted 37.373 + alloc r16=ar.pfs,0,0,0,0 // drop current register frame 37.374 + ;; 37.375 + mov ar.rsc=r20 37.376 + ;; 37.377 + loadrs 37.378 + ;; 37.379 + mov ar.bspstore=r24 37.380 + ;; 37.381 + ld8 r24=[r17] //load rfi_pfs 37.382 + mov ar.unat=r28 37.383 + mov ar.rnat=r25 37.384 + mov ar.rsc=r26 37.385 + ;; 37.386 + mov cr.ipsr=r31 37.387 + mov cr.iip=r30 37.388 + mov cr.ifs=r29 37.389 + cmp.ne p6,p0=r24,r0 37.390 +(p6)br.sptk vmx_dorfirfi 37.391 + ;; 37.392 +vmx_dorfirfi_back: 37.393 + mov ar.pfs=r27 37.394 + 37.395 +//vsa_sync_write_start 37.396 + movl r20=__vsa_base 37.397 + ;; 37.398 + ld8 r20=[r20] // read entry point 37.399 + mov r25=r18 37.400 + ;; 37.401 + add r16=PAL_VPS_SYNC_WRITE,r20 37.402 + movl r24=switch_rr7 // calculate return address 37.403 + ;; 37.404 + mov b0=r16 37.405 + br.cond.sptk b0 // call the service 37.406 + ;; 37.407 +// switch rr7 and rr5 37.408 +switch_rr7: 37.409 + adds r24=SWITCH_MRR5_OFFSET, r21 37.410 + adds r26=SWITCH_MRR6_OFFSET, r21 37.411 + adds r16=SWITCH_MRR7_OFFSET ,r21 37.412 + movl r25=(5<<61) 37.413 + movl r27=(6<<61) 37.414 + movl r17=(7<<61) 37.415 + ;; 37.416 + ld8 r24=[r24] 37.417 + ld8 r26=[r26] 37.418 + ld8 r16=[r16] 37.419 + ;; 37.420 + mov rr[r25]=r24 37.421 + mov rr[r27]=r26 37.422 + mov rr[r17]=r16 37.423 + ;; 37.424 + srlz.i 37.425 + ;; 37.426 + add r24=SWITCH_MPTA_OFFSET, r21 37.427 + ;; 37.428 + ld8 r24=[r24] 37.429 + ;; 37.430 + mov cr.pta=r24 37.431 + ;; 37.432 + srlz.i 37.433 + ;; 37.434 +// fall through 37.435 +GLOBAL_ENTRY(ia64_vmm_entry) 37.436 +/* 37.437 + * must be at bank 0 37.438 + * parameter: 37.439 + * r18:vpd 37.440 + * r19:vpsr 37.441 + * r20:__vsa_base 37.442 + * r22:b0 37.443 + * r23:predicate 37.444 + */ 37.445 + mov r24=r22 37.446 + mov r25=r18 37.447 + tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic 37.448 + ;; 37.449 + (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 37.450 + (p2) add r29=PAL_VPS_RESUME_HANDLER,r20 37.451 + ;; 37.452 + mov pr=r23,-2 37.453 + mov b0=r29 37.454 + ;; 37.455 + br.cond.sptk b0 // call pal service 37.456 +END(ia64_leave_hypervisor) 37.457 + 37.458 +//r24 rfi_pfs 37.459 +//r17 address of rfi_pfs 37.460 +GLOBAL_ENTRY(vmx_dorfirfi) 37.461 + mov r16=ar.ec 37.462 + movl r20 = vmx_dorfirfi_back 37.463 + ;; 37.464 +// clean rfi_pfs 37.465 + st8 [r17]=r0 37.466 + mov b0=r20 37.467 +// pfs.pec=ar.ec 37.468 + dep r24 = r16, r24, 52, 6 37.469 + ;; 37.470 + mov ar.pfs=r24 37.471 + ;; 37.472 + br.ret.sptk b0 37.473 + ;; 37.474 +END(vmx_dorfirfi) 37.475 + 37.476 + 37.477 +#define VMX_PURGE_RR7 0 37.478 +#define VMX_INSERT_RR7 1 37.479 +/* 37.480 + * in0: old rr7 37.481 + * in1: virtual address of xen image 37.482 + * in2: virtual address of vhpt table 37.483 + */ 37.484 +GLOBAL_ENTRY(vmx_purge_double_mapping) 37.485 + alloc loc1 = ar.pfs,5,9,0,0 37.486 + mov loc0 = rp 37.487 + movl r8 = 1f 37.488 + ;; 37.489 + movl loc4 = KERNEL_TR_PAGE_SHIFT 37.490 + movl loc5 = VCPU_TLB_SHIFT 37.491 + mov loc6 = psr 37.492 + movl loc7 = XEN_RR7_SWITCH_STUB 37.493 + mov loc8 = (1<<VMX_PURGE_RR7) 37.494 + ;; 37.495 + srlz.i 37.496 + ;; 37.497 + rsm psr.i | psr.ic 37.498 + ;; 37.499 + srlz.i 37.500 + ;; 37.501 + mov ar.rsc = 0 37.502 + mov b6 = loc7 37.503 + mov rp = r8 37.504 + ;; 37.505 + br.sptk b6 37.506 +1: 37.507 + mov ar.rsc = 3 37.508 + mov rp = loc0 37.509 + ;; 37.510 + mov psr.l = loc6 37.511 + ;; 37.512 + srlz.i 37.513 + ;; 37.514 + br.ret.sptk rp 37.515 +END(vmx_purge_double_mapping) 37.516 + 37.517 +/* 37.518 + * in0: new rr7 37.519 + * in1: virtual address of xen image 37.520 + * in2: virtual address of vhpt table 37.521 + * in3: pte entry of xen image 37.522 + * in4: pte entry of vhpt table 37.523 + */ 37.524 +GLOBAL_ENTRY(vmx_insert_double_mapping) 37.525 + alloc loc1 = ar.pfs,5,9,0,0 37.526 + mov loc0 = rp 37.527 + movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image 37.528 + ;; 37.529 + movl loc3 = IA64_TR_VHPT_IN_DOM // TR number for vhpt table 37.530 + movl r8 = 1f 37.531 + movl loc4 = KERNEL_TR_PAGE_SHIFT 37.532 + ;; 37.533 + movl loc5 = VCPU_TLB_SHIFT 37.534 + mov loc6 = psr 37.535 + movl loc7 = XEN_RR7_SWITCH_STUB 37.536 + ;; 37.537 + srlz.i 37.538 + ;; 37.539 + rsm psr.i | psr.ic 37.540 + mov loc8 = (1<<VMX_INSERT_RR7) 37.541 + ;; 37.542 + srlz.i 37.543 + ;; 37.544 + mov ar.rsc = 0 37.545 + mov b6 = loc7 37.546 + mov rp = r8 37.547 + ;; 37.548 + br.sptk b6 37.549 +1: 37.550 + mov ar.rsc = 3 37.551 + mov rp = loc0 37.552 + ;; 37.553 + mov psr.l = loc6 37.554 + ;; 37.555 + srlz.i 37.556 + ;; 37.557 + br.ret.sptk rp 37.558 +END(vmx_insert_double_mapping) 37.559 + 37.560 + .align PAGE_SIZE 37.561 +/* 37.562 + * Stub to add double mapping for new domain, which shouldn't 37.563 + * access any memory when active. Before reaching this point, 37.564 + * both psr.i/ic is cleared and rse is set in lazy mode. 37.565 + * 37.566 + * in0: new rr7 37.567 + * in1: virtual address of xen image 37.568 + * in2: virtual address of vhpt table 37.569 + * in3: pte entry of xen image 37.570 + * in4: pte entry of vhpt table 37.571 + * loc2: TR number for xen image 37.572 + * loc3: TR number for vhpt table 37.573 + * loc4: page size for xen image 37.574 + * loc5: page size of vhpt table 37.575 + * loc7: free to use 37.576 + * loc8: purge or insert 37.577 + * r8: will contain old rid value 37.578 + */ 37.579 +GLOBAL_ENTRY(vmx_switch_rr7) 37.580 + movl loc7 = (7<<61) 37.581 + dep.z loc4 = loc4, 2, 6 37.582 + dep.z loc5 = loc5, 2, 6 37.583 + ;; 37.584 + tbit.nz p6,p7=loc8, VMX_INSERT_RR7 37.585 + mov r8 = rr[loc7] 37.586 + ;; 37.587 + mov rr[loc7] = in0 37.588 +(p6)mov cr.ifa = in1 37.589 +(p6)mov cr.itir = loc4 37.590 + ;; 37.591 + srlz.i 37.592 + ;; 37.593 +(p6)itr.i itr[loc2] = in3 37.594 +(p7)ptr.i in1, loc4 37.595 + ;; 37.596 +(p6)itr.d dtr[loc2] = in3 37.597 +(p7)ptr.d in1, loc4 37.598 + ;; 37.599 + srlz.i 37.600 + ;; 37.601 +(p6)mov cr.ifa = in2 37.602 +(p6)mov cr.itir = loc5 37.603 + ;; 37.604 +(p6)itr.d dtr[loc3] = in4 37.605 +(p7)ptr.d in2, loc5 37.606 + ;; 37.607 + srlz.i 37.608 + ;; 37.609 + mov rr[loc7] = r8 37.610 + ;; 37.611 + srlz.i 37.612 + br.sptk rp 37.613 +END(vmx_switch_rr7) 37.614 + .align PAGE_SIZE
38.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 38.2 +++ b/xen/arch/ia64/vmx_init.c Mon May 23 15:29:59 2005 +0000 38.3 @@ -0,0 +1,296 @@ 38.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 38.5 +/* 38.6 + * vmx_init.c: initialization work for vt specific domain 38.7 + * Copyright (c) 2005, Intel Corporation. 38.8 + * Kun Tian (Kevin Tian) <kevin.tian@intel.com> 38.9 + * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> 38.10 + * Fred Yang <fred.yang@intel.com> 38.11 + * 38.12 + * This program is free software; you can redistribute it and/or modify it 38.13 + * under the terms and conditions of the GNU General Public License, 38.14 + * version 2, as published by the Free Software Foundation. 38.15 + * 38.16 + * This program is distributed in the hope it will be useful, but WITHOUT 38.17 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 38.18 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 38.19 + * more details. 38.20 + * 38.21 + * You should have received a copy of the GNU General Public License along with 38.22 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 38.23 + * Place - Suite 330, Boston, MA 02111-1307 USA. 38.24 + * 38.25 + */ 38.26 + 38.27 +/* 38.28 + * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>: 38.29 + * Simplied design in first step: 38.30 + * - One virtual environment 38.31 + * - Domain is bound to one LP 38.32 + * Later to support guest SMP: 38.33 + * - Need interface to handle VP scheduled to different LP 38.34 + */ 38.35 +#include <xen/config.h> 38.36 +#include <xen/types.h> 38.37 +#include <xen/sched.h> 38.38 +#include <asm/pal.h> 38.39 +#include <asm/page.h> 38.40 +#include <asm/processor.h> 38.41 +#include <asm/vmx_vcpu.h> 38.42 +#include <xen/lib.h> 38.43 +#include <asm/vmmu.h> 38.44 +#include <public/arch-ia64.h> 38.45 +#include <asm/vmx_phy_mode.h> 38.46 +#include <asm/vmx.h> 38.47 + 38.48 +/* Global flag to identify whether Intel vmx feature is on */ 38.49 +u32 vmx_enabled = 0; 38.50 +static u32 vm_order; 38.51 +static u64 buffer_size; 38.52 +static u64 vp_env_info; 38.53 +static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */ 38.54 +u64 __vsa_base = 0; /* Run-time service base of VMX */ 38.55 + 38.56 +/* Check whether vt feature is enabled or not. */ 38.57 +void 38.58 +identify_vmx_feature(void) 38.59 +{ 38.60 + pal_status_t ret; 38.61 + u64 avail = 1, status = 1, control = 1; 38.62 + 38.63 + vmx_enabled = 0; 38.64 + /* Check VT-i feature */ 38.65 + ret = ia64_pal_proc_get_features(&avail, &status, &control); 38.66 + if (ret != PAL_STATUS_SUCCESS) { 38.67 + printk("Get proc features failed.\n"); 38.68 + goto no_vti; 38.69 + } 38.70 + 38.71 + /* FIXME: do we need to check status field, to see whether 38.72 + * PSR.vm is actually enabled? If yes, aonther call to 38.73 + * ia64_pal_proc_set_features may be reuqired then. 38.74 + */ 38.75 + printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n", 38.76 + avail, status, control, avail & PAL_PROC_VM_BIT); 38.77 + if (!(avail & PAL_PROC_VM_BIT)) { 38.78 + printk("No VT feature supported.\n"); 38.79 + goto no_vti; 38.80 + } 38.81 + 38.82 + ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info); 38.83 + if (ret != PAL_STATUS_SUCCESS) { 38.84 + printk("Get vp environment info failed.\n"); 38.85 + goto no_vti; 38.86 + } 38.87 + 38.88 + /* Does xen has ability to decode itself? */ 38.89 + if (!(vp_env_info & VP_OPCODE)) 38.90 + printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info); 38.91 + vm_order = get_order(buffer_size); 38.92 + printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order); 38.93 + 38.94 + vmx_enabled = 1; 38.95 +no_vti: 38.96 + return; 38.97 +} 38.98 + 38.99 +/* 38.100 + * Init virtual environment on current LP 38.101 + * vsa_base is the indicator whether it's first LP to be initialized 38.102 + * for current domain. 38.103 + */ 38.104 +void 38.105 +vmx_init_env(void) 38.106 +{ 38.107 + u64 status, tmp_base; 38.108 + 38.109 + if (!vm_buffer) { 38.110 + vm_buffer = alloc_xenheap_pages(vm_order); 38.111 + ASSERT(vm_buffer); 38.112 + printk("vm_buffer: 0x%lx\n", vm_buffer); 38.113 + } 38.114 + 38.115 + status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE, 38.116 + __pa(vm_buffer), 38.117 + vm_buffer, 38.118 + &tmp_base); 38.119 + 38.120 + if (status != PAL_STATUS_SUCCESS) { 38.121 + printk("ia64_pal_vp_init_env failed.\n"); 38.122 + return -1; 38.123 + } 38.124 + 38.125 + if (!__vsa_base) 38.126 + __vsa_base = tmp_base; 38.127 + else 38.128 + ASSERT(tmp_base != __vsa_base); 38.129 + 38.130 + /* Init stub for rr7 switch */ 38.131 + vmx_init_double_mapping_stub(); 38.132 +} 38.133 + 38.134 +typedef union { 38.135 + u64 value; 38.136 + struct { 38.137 + u64 number : 8; 38.138 + u64 revision : 8; 38.139 + u64 model : 8; 38.140 + u64 family : 8; 38.141 + u64 archrev : 8; 38.142 + u64 rv : 24; 38.143 + }; 38.144 +} cpuid3_t; 38.145 + 38.146 +/* Allocate vpd from xenheap */ 38.147 +static vpd_t *alloc_vpd(void) 38.148 +{ 38.149 + int i; 38.150 + cpuid3_t cpuid3; 38.151 + vpd_t *vpd; 38.152 + 38.153 + vpd = alloc_xenheap_pages(get_order(VPD_SIZE)); 38.154 + if (!vpd) { 38.155 + printk("VPD allocation failed.\n"); 38.156 + return NULL; 38.157 + } 38.158 + 38.159 + printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t)); 38.160 + memset(vpd, 0, VPD_SIZE); 38.161 + /* CPUID init */ 38.162 + for (i = 0; i < 5; i++) 38.163 + vpd->vcpuid[i] = ia64_get_cpuid(i); 38.164 + 38.165 + /* Limit the CPUID number to 5 */ 38.166 + cpuid3.value = vpd->vcpuid[3]; 38.167 + cpuid3.number = 4; /* 5 - 1 */ 38.168 + vpd->vcpuid[3] = cpuid3.value; 38.169 + 38.170 + vpd->vdc.d_vmsw = 1; 38.171 + return vpd; 38.172 +} 38.173 + 38.174 + 38.175 + 38.176 +/* 38.177 + * Create a VP on intialized VMX environment. 38.178 + */ 38.179 +static void 38.180 +vmx_create_vp(struct exec_domain *ed) 38.181 +{ 38.182 + u64 ret; 38.183 + vpd_t *vpd = ed->arch.arch_vmx.vpd; 38.184 + u64 ivt_base; 38.185 + extern char vmx_ia64_ivt; 38.186 + /* ia64_ivt is function pointer, so need this tranlation */ 38.187 + ivt_base = (u64) &vmx_ia64_ivt; 38.188 + printk("ivt_base: 0x%lx\n", ivt_base); 38.189 + ret = ia64_pal_vp_create(vpd, ivt_base, 0); 38.190 + if (ret != PAL_STATUS_SUCCESS) 38.191 + panic("ia64_pal_vp_create failed. \n"); 38.192 +} 38.193 + 38.194 +void vmx_init_double_mapping_stub(void) 38.195 +{ 38.196 + u64 base, psr; 38.197 + extern void vmx_switch_rr7(void); 38.198 + 38.199 + base = (u64) &vmx_switch_rr7; 38.200 + base = *((u64*)base); 38.201 + 38.202 + psr = ia64_clear_ic(); 38.203 + ia64_itr(0x1, IA64_TR_RR7_SWITCH_STUB, XEN_RR7_SWITCH_STUB, 38.204 + pte_val(pfn_pte(__pa(base) >> PAGE_SHIFT, PAGE_KERNEL)), 38.205 + RR7_SWITCH_SHIFT); 38.206 + ia64_set_psr(psr); 38.207 + ia64_srlz_i(); 38.208 + printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", (u64)(__pa(base))); 38.209 +} 38.210 + 38.211 +/* Other non-context related tasks can be done in context switch */ 38.212 +void 38.213 +vmx_save_state(struct exec_domain *ed) 38.214 +{ 38.215 + u64 status, psr; 38.216 + u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt; 38.217 + 38.218 + /* FIXME: about setting of pal_proc_vector... time consuming */ 38.219 + status = ia64_pal_vp_save(ed->arch.arch_vmx.vpd, 0); 38.220 + if (status != PAL_STATUS_SUCCESS) 38.221 + panic("Save vp status failed\n"); 38.222 + 38.223 + /* FIXME: Do we really need purge double mapping for old ed? 38.224 + * Since rid is completely different between prev and next, 38.225 + * it's not overlap and thus no MCA possible... */ 38.226 + dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7])); 38.227 + vmx_purge_double_mapping(dom_rr7, KERNEL_START, 38.228 + (u64)ed->arch.vtlb->ts->vhpt->hash); 38.229 + 38.230 +} 38.231 + 38.232 +/* Even guest is in physical mode, we still need such double mapping */ 38.233 +void 38.234 +vmx_load_state(struct exec_domain *ed) 38.235 +{ 38.236 + u64 status, psr; 38.237 + u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt; 38.238 + u64 pte_xen, pte_vhpt; 38.239 + 38.240 + status = ia64_pal_vp_restore(ed->arch.arch_vmx.vpd, 0); 38.241 + if (status != PAL_STATUS_SUCCESS) 38.242 + panic("Restore vp status failed\n"); 38.243 + 38.244 + dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7])); 38.245 + pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL)); 38.246 + pte_vhpt = pte_val(pfn_pte((__pa(ed->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL)); 38.247 + vmx_insert_double_mapping(dom_rr7, KERNEL_START, 38.248 + (u64)ed->arch.vtlb->ts->vhpt->hash, 38.249 + pte_xen, pte_vhpt); 38.250 + 38.251 + /* Guest vTLB is not required to be switched explicitly, since 38.252 + * anchored in exec_domain */ 38.253 +} 38.254 + 38.255 +/* Purge old double mapping and insert new one, due to rr7 change */ 38.256 +void 38.257 +vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7) 38.258 +{ 38.259 + u64 pte_xen, pte_vhpt, vhpt_base; 38.260 + 38.261 + vhpt_base = (u64)ed->arch.vtlb->ts->vhpt->hash; 38.262 + vmx_purge_double_mapping(oldrr7, KERNEL_START, 38.263 + vhpt_base); 38.264 + 38.265 + pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL)); 38.266 + pte_vhpt = pte_val(pfn_pte((__pa(vhpt_base) >> PAGE_SHIFT), PAGE_KERNEL)); 38.267 + vmx_insert_double_mapping(newrr7, KERNEL_START, 38.268 + vhpt_base, 38.269 + pte_xen, pte_vhpt); 38.270 +} 38.271 + 38.272 +/* 38.273 + * Initialize VMX envirenment for guest. Only the 1st vp/exec_domain 38.274 + * is registered here. 38.275 + */ 38.276 +void 38.277 +vmx_final_setup_domain(struct domain *d) 38.278 +{ 38.279 + struct exec_domain *ed = d->exec_domain[0]; 38.280 + vpd_t *vpd; 38.281 + 38.282 + /* Allocate resources for exec_domain 0 */ 38.283 + //memset(&ed->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct)); 38.284 + 38.285 + vpd = alloc_vpd(); 38.286 + ASSERT(vpd); 38.287 + 38.288 + ed->arch.arch_vmx.vpd = vpd; 38.289 + vpd->virt_env_vaddr = vm_buffer; 38.290 + 38.291 + /* ed->arch.schedule_tail = arch_vmx_do_launch; */ 38.292 + vmx_create_vp(ed); 38.293 + 38.294 + /* Set this ed to be vmx */ 38.295 + ed->arch.arch_vmx.flags = 1; 38.296 + 38.297 + /* Other vmx specific initialization work */ 38.298 +} 38.299 +
39.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 39.2 +++ b/xen/arch/ia64/vmx_interrupt.c Mon May 23 15:29:59 2005 +0000 39.3 @@ -0,0 +1,388 @@ 39.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 39.5 +/* 39.6 + * vmx_interrupt.c: handle inject interruption. 39.7 + * Copyright (c) 2005, Intel Corporation. 39.8 + * 39.9 + * This program is free software; you can redistribute it and/or modify it 39.10 + * under the terms and conditions of the GNU General Public License, 39.11 + * version 2, as published by the Free Software Foundation. 39.12 + * 39.13 + * This program is distributed in the hope it will be useful, but WITHOUT 39.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 39.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 39.16 + * more details. 39.17 + * 39.18 + * You should have received a copy of the GNU General Public License along with 39.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 39.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 39.21 + * 39.22 + * Shaofan Li (Susue Li) <susie.li@intel.com> 39.23 + * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> 39.24 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 39.25 + */ 39.26 + 39.27 + 39.28 +#include <xen/types.h> 39.29 +#include <asm/vmx_vcpu.h> 39.30 +#include <asm/vmx_mm_def.h> 39.31 +#include <asm/vmx_pal_vsa.h> 39.32 +/* SDM vol2 5.5 - IVA based interruption handling */ 39.33 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034 39.34 +void 39.35 +collect_interruption(VCPU *vcpu) 39.36 +{ 39.37 + u64 ipsr; 39.38 + u64 vdcr; 39.39 + u64 vifs; 39.40 + IA64_PSR vpsr; 39.41 + REGS * regs = vcpu_regs(vcpu); 39.42 + vpsr.val = vmx_vcpu_get_psr(vcpu); 39.43 + 39.44 + if(vpsr.ic){ 39.45 + extern void vmx_dorfirfi(void); 39.46 + if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi) 39.47 + panic("COLLECT interruption for vmx_dorfirfi\n"); 39.48 + 39.49 + /* Sync mpsr id/da/dd/ss/ed bits to vipsr 39.50 + * since after guest do rfi, we still want these bits on in 39.51 + * mpsr 39.52 + */ 39.53 + 39.54 + ipsr = regs->cr_ipsr; 39.55 + vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA 39.56 + | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED)); 39.57 + vmx_vcpu_set_ipsr(vcpu, vpsr.val); 39.58 + 39.59 + /* Currently, for trap, we do not advance IIP to next 39.60 + * instruction. That's because we assume caller already 39.61 + * set up IIP correctly 39.62 + */ 39.63 + 39.64 + vmx_vcpu_set_iip(vcpu , regs->cr_iip); 39.65 + 39.66 + /* set vifs.v to zero */ 39.67 + vifs = VPD_CR(vcpu,ifs); 39.68 + vifs &= ~IA64_IFS_V; 39.69 + vmx_vcpu_set_ifs(vcpu, vifs); 39.70 + 39.71 + vmx_vcpu_set_iipa(vcpu, regs->cr_iipa); 39.72 + } 39.73 + 39.74 + vdcr = VPD_CR(vcpu,dcr); 39.75 + 39.76 + /* Set guest psr 39.77 + * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged 39.78 + * be: set to the value of dcr.be 39.79 + * pp: set to the value of dcr.pp 39.80 + */ 39.81 + vpsr.val &= INITIAL_PSR_VALUE_AT_INTERRUPTION; 39.82 + vpsr.val |= ( vdcr & IA64_DCR_BE); 39.83 + 39.84 + /* VDCR pp bit position is different from VPSR pp bit */ 39.85 + if ( vdcr & IA64_DCR_PP ) { 39.86 + vpsr.val |= IA64_PSR_PP; 39.87 + } else { 39.88 + vpsr.val &= ~IA64_PSR_PP;; 39.89 + } 39.90 + 39.91 + vmx_vcpu_set_psr(vcpu, vpsr.val); 39.92 + 39.93 +} 39.94 +int 39.95 +inject_guest_interruption(VCPU *vcpu, u64 vec) 39.96 +{ 39.97 + u64 viva; 39.98 + REGS *regs; 39.99 + regs=vcpu_regs(vcpu); 39.100 + 39.101 + collect_interruption(vcpu); 39.102 + 39.103 + vmx_vcpu_get_iva(vcpu,&viva); 39.104 + regs->cr_iip = viva + vec; 39.105 +} 39.106 + 39.107 + 39.108 +/* 39.109 + * Set vIFA & vITIR & vIHA, when vPSR.ic =1 39.110 + * Parameter: 39.111 + * set_ifa: if true, set vIFA 39.112 + * set_itir: if true, set vITIR 39.113 + * set_iha: if true, set vIHA 39.114 + */ 39.115 +void 39.116 +set_ifa_itir_iha (VCPU *vcpu, u64 vadr, 39.117 + int set_ifa, int set_itir, int set_iha) 39.118 +{ 39.119 + IA64_PSR vpsr; 39.120 + u64 value; 39.121 + vpsr.val = vmx_vcpu_get_psr(vcpu); 39.122 + /* Vol2, Table 8-1 */ 39.123 + if ( vpsr.ic ) { 39.124 + if ( set_ifa){ 39.125 + vmx_vcpu_set_ifa(vcpu, vadr); 39.126 + } 39.127 + if ( set_itir) { 39.128 + value = vmx_vcpu_get_itir_on_fault(vcpu, vadr); 39.129 + vmx_vcpu_set_itir(vcpu, value); 39.130 + } 39.131 + 39.132 + if ( set_iha) { 39.133 + vmx_vcpu_thash(vcpu, vadr, &value); 39.134 + vmx_vcpu_set_iha(vcpu, value); 39.135 + } 39.136 + } 39.137 + 39.138 + 39.139 +} 39.140 + 39.141 +/* 39.142 + * Data TLB Fault 39.143 + * @ Data TLB vector 39.144 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.145 + */ 39.146 +void 39.147 +dtlb_fault (VCPU *vcpu, u64 vadr) 39.148 +{ 39.149 + /* If vPSR.ic, IFA, ITIR, IHA */ 39.150 + set_ifa_itir_iha (vcpu, vadr, 1, 1, 1); 39.151 + inject_guest_interruption(vcpu,IA64_DATA_TLB_VECTOR); 39.152 +} 39.153 + 39.154 +/* 39.155 + * Instruction TLB Fault 39.156 + * @ Instruction TLB vector 39.157 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.158 + */ 39.159 +void 39.160 +itlb_fault (VCPU *vcpu, u64 vadr) 39.161 +{ 39.162 + /* If vPSR.ic, IFA, ITIR, IHA */ 39.163 + set_ifa_itir_iha (vcpu, vadr, 1, 1, 1); 39.164 + inject_guest_interruption(vcpu,IA64_INST_TLB_VECTOR); 39.165 +} 39.166 + 39.167 + 39.168 + 39.169 +/* 39.170 + * Data Nested TLB Fault 39.171 + * @ Data Nested TLB Vector 39.172 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.173 + */ 39.174 +void 39.175 +nested_dtlb (VCPU *vcpu) 39.176 +{ 39.177 + inject_guest_interruption(vcpu,IA64_DATA_NESTED_TLB_VECTOR); 39.178 +} 39.179 + 39.180 +/* 39.181 + * Alternate Data TLB Fault 39.182 + * @ Alternate Data TLB vector 39.183 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.184 + */ 39.185 +void 39.186 +alt_dtlb (VCPU *vcpu, u64 vadr) 39.187 +{ 39.188 + set_ifa_itir_iha (vcpu, vadr, 1, 1, 0); 39.189 + inject_guest_interruption(vcpu,IA64_ALT_DATA_TLB_VECTOR); 39.190 +} 39.191 + 39.192 + 39.193 +/* 39.194 + * Data TLB Fault 39.195 + * @ Data TLB vector 39.196 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.197 + */ 39.198 +void 39.199 +alt_itlb (VCPU *vcpu, u64 vadr) 39.200 +{ 39.201 + set_ifa_itir_iha (vcpu, vadr, 1, 1, 0); 39.202 + inject_guest_interruption(vcpu,IA64_ALT_INST_TLB_VECTOR); 39.203 +} 39.204 + 39.205 +/* Deal with: 39.206 + * VHPT Translation Vector 39.207 + */ 39.208 +static void 39.209 +_vhpt_fault(VCPU *vcpu, u64 vadr) 39.210 +{ 39.211 + /* If vPSR.ic, IFA, ITIR, IHA*/ 39.212 + set_ifa_itir_iha (vcpu, vadr, 1, 1, 1); 39.213 + inject_guest_interruption(vcpu,IA64_VHPT_TRANS_VECTOR); 39.214 + 39.215 + 39.216 +} 39.217 + 39.218 +/* 39.219 + * VHPT Instruction Fault 39.220 + * @ VHPT Translation vector 39.221 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.222 + */ 39.223 +void 39.224 +ivhpt_fault (VCPU *vcpu, u64 vadr) 39.225 +{ 39.226 + _vhpt_fault(vcpu, vadr); 39.227 +} 39.228 + 39.229 + 39.230 +/* 39.231 + * VHPT Data Fault 39.232 + * @ VHPT Translation vector 39.233 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.234 + */ 39.235 +void 39.236 +dvhpt_fault (VCPU *vcpu, u64 vadr) 39.237 +{ 39.238 + _vhpt_fault(vcpu, vadr); 39.239 +} 39.240 + 39.241 + 39.242 + 39.243 +/* 39.244 + * Deal with: 39.245 + * General Exception vector 39.246 + */ 39.247 +void 39.248 +_general_exception (VCPU *vcpu) 39.249 +{ 39.250 + inject_guest_interruption(vcpu,IA64_GENEX_VECTOR); 39.251 +} 39.252 + 39.253 + 39.254 +/* 39.255 + * Illegal Operation Fault 39.256 + * @ General Exception Vector 39.257 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.258 + */ 39.259 +void 39.260 +illegal_op (VCPU *vcpu) 39.261 +{ 39.262 + _general_exception(vcpu); 39.263 +} 39.264 + 39.265 +/* 39.266 + * Illegal Dependency Fault 39.267 + * @ General Exception Vector 39.268 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.269 + */ 39.270 +void 39.271 +illegal_dep (VCPU *vcpu) 39.272 +{ 39.273 + _general_exception(vcpu); 39.274 +} 39.275 + 39.276 +/* 39.277 + * Reserved Register/Field Fault 39.278 + * @ General Exception Vector 39.279 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.280 + */ 39.281 +void 39.282 +rsv_reg_field (VCPU *vcpu) 39.283 +{ 39.284 + _general_exception(vcpu); 39.285 +} 39.286 +/* 39.287 + * Privileged Operation Fault 39.288 + * @ General Exception Vector 39.289 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.290 + */ 39.291 + 39.292 +void 39.293 +privilege_op (VCPU *vcpu) 39.294 +{ 39.295 + _general_exception(vcpu); 39.296 +} 39.297 + 39.298 +/* 39.299 + * Unimplement Data Address Fault 39.300 + * @ General Exception Vector 39.301 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.302 + */ 39.303 +void 39.304 +unimpl_daddr (VCPU *vcpu) 39.305 +{ 39.306 + _general_exception(vcpu); 39.307 +} 39.308 + 39.309 +/* 39.310 + * Privileged Register Fault 39.311 + * @ General Exception Vector 39.312 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.313 + */ 39.314 +void 39.315 +privilege_reg (VCPU *vcpu) 39.316 +{ 39.317 + _general_exception(vcpu); 39.318 +} 39.319 + 39.320 +/* Deal with 39.321 + * Nat consumption vector 39.322 + * Parameter: 39.323 + * vaddr: Optional, if t == REGISTER 39.324 + */ 39.325 +static void 39.326 +_nat_consumption_fault(VCPU *vcpu, u64 vadr, miss_type t) 39.327 +{ 39.328 + /* If vPSR.ic && t == DATA/INST, IFA */ 39.329 + if ( t == DATA || t == INSTRUCTION ) { 39.330 + /* IFA */ 39.331 + set_ifa_itir_iha (vcpu, vadr, 1, 0, 0); 39.332 + } 39.333 + 39.334 + inject_guest_interruption(vcpu,IA64_NAT_CONSUMPTION_VECTOR); 39.335 +} 39.336 + 39.337 +/* 39.338 + * IR Data Nat Page Consumption Fault 39.339 + * @ Nat Consumption Vector 39.340 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.341 + */ 39.342 +static void 39.343 +ir_nat_page_consumption (VCPU *vcpu, u64 vadr) 39.344 +{ 39.345 + _nat_consumption_fault(vcpu, vadr, DATA); 39.346 +} 39.347 + 39.348 +/* 39.349 + * Instruction Nat Page Consumption Fault 39.350 + * @ Nat Consumption Vector 39.351 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.352 + */ 39.353 +void 39.354 +inat_page_consumption (VCPU *vcpu, u64 vadr) 39.355 +{ 39.356 + _nat_consumption_fault(vcpu, vadr, INSTRUCTION); 39.357 +} 39.358 + 39.359 +/* 39.360 + * Register Nat Consumption Fault 39.361 + * @ Nat Consumption Vector 39.362 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.363 + */ 39.364 +void 39.365 +rnat_consumption (VCPU *vcpu) 39.366 +{ 39.367 + _nat_consumption_fault(vcpu, 0, REGISTER); 39.368 +} 39.369 + 39.370 +/* 39.371 + * Data Nat Page Consumption Fault 39.372 + * @ Nat Consumption Vector 39.373 + * Refer to SDM Vol2 Table 5-6 & 8-1 39.374 + */ 39.375 +void 39.376 +dnat_page_consumption (VCPU *vcpu, uint64_t vadr) 39.377 +{ 39.378 + _nat_consumption_fault(vcpu, vadr, DATA); 39.379 +} 39.380 + 39.381 +/* Deal with 39.382 + * Page not present vector 39.383 + */ 39.384 +void 39.385 +page_not_present(VCPU *vcpu, u64 vadr) 39.386 +{ 39.387 + /* If vPSR.ic, IFA, ITIR */ 39.388 + set_ifa_itir_iha (vcpu, vadr, 1, 1, 0); 39.389 + inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); 39.390 +} 39.391 +
40.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 40.2 +++ b/xen/arch/ia64/vmx_ivt.S Mon May 23 15:29:59 2005 +0000 40.3 @@ -0,0 +1,978 @@ 40.4 +/* 40.5 + * arch/ia64/kernel/vmx_ivt.S 40.6 + * 40.7 + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 40.8 + * Stephane Eranian <eranian@hpl.hp.com> 40.9 + * David Mosberger <davidm@hpl.hp.com> 40.10 + * Copyright (C) 2000, 2002-2003 Intel Co 40.11 + * Asit Mallick <asit.k.mallick@intel.com> 40.12 + * Suresh Siddha <suresh.b.siddha@intel.com> 40.13 + * Kenneth Chen <kenneth.w.chen@intel.com> 40.14 + * Fenghua Yu <fenghua.yu@intel.com> 40.15 + * 40.16 + * 40.17 + * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 40.18 + * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 40.19 + * 40.20 + * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) 40.21 + * Supporting Intel virtualization architecture 40.22 + * 40.23 + */ 40.24 + 40.25 +/* 40.26 + * This file defines the interruption vector table used by the CPU. 40.27 + * It does not include one entry per possible cause of interruption. 40.28 + * 40.29 + * The first 20 entries of the table contain 64 bundles each while the 40.30 + * remaining 48 entries contain only 16 bundles each. 40.31 + * 40.32 + * The 64 bundles are used to allow inlining the whole handler for critical 40.33 + * interruptions like TLB misses. 40.34 + * 40.35 + * For each entry, the comment is as follows: 40.36 + * 40.37 + * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 40.38 + * entry offset ----/ / / / / 40.39 + * entry number ---------/ / / / 40.40 + * size of the entry -------------/ / / 40.41 + * vector name -------------------------------------/ / 40.42 + * interruptions triggering this vector ----------------------/ 40.43 + * 40.44 + * The table is 32KB in size and must be aligned on 32KB boundary. 40.45 + * (The CPU ignores the 15 lower bits of the address) 40.46 + * 40.47 + * Table is based upon EAS2.6 (Oct 1999) 40.48 + */ 40.49 + 40.50 +#include <linux/config.h> 40.51 + 40.52 +#include <asm/asmmacro.h> 40.53 +#include <asm/break.h> 40.54 +#include <asm/ia32.h> 40.55 +#include <asm/kregs.h> 40.56 +#include <asm/offsets.h> 40.57 +#include <asm/pgtable.h> 40.58 +#include <asm/processor.h> 40.59 +#include <asm/ptrace.h> 40.60 +#include <asm/system.h> 40.61 +#include <asm/thread_info.h> 40.62 +#include <asm/unistd.h> 40.63 +#include <asm/vhpt.h> 40.64 + 40.65 + 40.66 +#if 0 40.67 + /* 40.68 + * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't 40.69 + * needed for something else before enabling this... 40.70 + */ 40.71 +# define VMX_DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 40.72 +#else 40.73 +# define VMX_DBG_FAULT(i) 40.74 +#endif 40.75 + 40.76 +#include "vmx_minstate.h" 40.77 + 40.78 + 40.79 + 40.80 +#define VMX_FAULT(n) \ 40.81 +vmx_fault_##n:; \ 40.82 + br.sptk vmx_fault_##n; \ 40.83 + ;; \ 40.84 + 40.85 + 40.86 +#define VMX_REFLECT(n) \ 40.87 + mov r31=pr; \ 40.88 + mov r19=n; /* prepare to save predicates */ \ 40.89 + mov r29=cr.ipsr; \ 40.90 + ;; \ 40.91 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ 40.92 +(p7) br.sptk.many vmx_dispatch_reflection; \ 40.93 + VMX_FAULT(n); \ 40.94 + 40.95 + 40.96 +GLOBAL_ENTRY(vmx_panic) 40.97 + br.sptk.many vmx_panic 40.98 + ;; 40.99 +END(vmx_panic) 40.100 + 40.101 + 40.102 + 40.103 + 40.104 + 40.105 + .section .text.ivt,"ax" 40.106 + 40.107 + .align 32768 // align on 32KB boundary 40.108 + .global vmx_ia64_ivt 40.109 +vmx_ia64_ivt: 40.110 +///////////////////////////////////////////////////////////////////////////////////////// 40.111 +// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 40.112 +ENTRY(vmx_vhpt_miss) 40.113 + VMX_FAULT(0) 40.114 +END(vmx_vhpt_miss) 40.115 + 40.116 + .org vmx_ia64_ivt+0x400 40.117 +///////////////////////////////////////////////////////////////////////////////////////// 40.118 +// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 40.119 +ENTRY(vmx_itlb_miss) 40.120 + mov r31 = pr 40.121 + mov r29=cr.ipsr; 40.122 + ;; 40.123 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 40.124 +(p6) br.sptk vmx_fault_1 40.125 + mov r16 = cr.ifa 40.126 + ;; 40.127 + thash r17 = r16 40.128 + ttag r20 = r16 40.129 + ;; 40.130 +vmx_itlb_loop: 40.131 + cmp.eq p6,p0 = r0, r17 40.132 +(p6) br vmx_itlb_out 40.133 + ;; 40.134 + adds r22 = VLE_TITAG_OFFSET, r17 40.135 + adds r23 = VLE_CCHAIN_OFFSET, r17 40.136 + ;; 40.137 + ld8 r24 = [r22] 40.138 + ld8 r25 = [r23] 40.139 + ;; 40.140 + lfetch [r25] 40.141 + cmp.eq p6,p7 = r20, r24 40.142 + ;; 40.143 +(p7) mov r17 = r25; 40.144 +(p7) br.sptk vmx_itlb_loop 40.145 + ;; 40.146 + adds r23 = VLE_PGFLAGS_OFFSET, r17 40.147 + adds r24 = VLE_ITIR_OFFSET, r17 40.148 + ;; 40.149 + ld8 r26 = [r23] 40.150 + ld8 r25 = [r24] 40.151 + ;; 40.152 + mov cr.itir = r25 40.153 + ;; 40.154 + itc.i r26 40.155 + ;; 40.156 + srlz.i 40.157 + ;; 40.158 + mov r23=r31 40.159 + mov r22=b0 40.160 + adds r16=IA64_VPD_BASE_OFFSET,r21 40.161 + ;; 40.162 + ld8 r18=[r16] 40.163 + ;; 40.164 + adds r19=VPD(VPSR),r18 40.165 + movl r20=__vsa_base 40.166 + ;; 40.167 + ld8 r19=[r19] 40.168 + ld8 r20=[r20] 40.169 + ;; 40.170 + br.sptk ia64_vmm_entry 40.171 + ;; 40.172 +vmx_itlb_out: 40.173 + mov r19 = 1 40.174 + br.sptk vmx_dispatch_tlb_miss 40.175 + VMX_FAULT(1); 40.176 +END(vmx_itlb_miss) 40.177 + 40.178 + .org vmx_ia64_ivt+0x0800 40.179 +///////////////////////////////////////////////////////////////////////////////////////// 40.180 +// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 40.181 +ENTRY(vmx_dtlb_miss) 40.182 + mov r31 = pr 40.183 + mov r29=cr.ipsr; 40.184 + ;; 40.185 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 40.186 +(p6)br.sptk vmx_fault_1 40.187 + mov r16 = cr.ifa 40.188 + ;; 40.189 + thash r17 = r16 40.190 + ttag r20 = r16 40.191 + ;; 40.192 +vmx_dtlb_loop: 40.193 + cmp.eq p6,p0 = r0, r17 40.194 +(p6)br vmx_dtlb_out 40.195 + ;; 40.196 + adds r22 = VLE_TITAG_OFFSET, r17 40.197 + adds r23 = VLE_CCHAIN_OFFSET, r17 40.198 + ;; 40.199 + ld8 r24 = [r22] 40.200 + ld8 r25 = [r23] 40.201 + ;; 40.202 + lfetch [r25] 40.203 + cmp.eq p6,p7 = r20, r24 40.204 + ;; 40.205 +(p7)mov r17 = r25; 40.206 +(p7)br.sptk vmx_dtlb_loop 40.207 + ;; 40.208 + adds r23 = VLE_PGFLAGS_OFFSET, r17 40.209 + adds r24 = VLE_ITIR_OFFSET, r17 40.210 + ;; 40.211 + ld8 r26 = [r23] 40.212 + ld8 r25 = [r24] 40.213 + ;; 40.214 + mov cr.itir = r25 40.215 + ;; 40.216 + itc.d r26 40.217 + ;; 40.218 + srlz.d; 40.219 + ;; 40.220 + mov r23=r31 40.221 + mov r22=b0 40.222 + adds r16=IA64_VPD_BASE_OFFSET,r21 40.223 + ;; 40.224 + ld8 r18=[r16] 40.225 + ;; 40.226 + adds r19=VPD(VPSR),r18 40.227 + movl r20=__vsa_base 40.228 + ;; 40.229 + ld8 r19=[r19] 40.230 + ld8 r20=[r20] 40.231 + ;; 40.232 + br.sptk ia64_vmm_entry 40.233 + ;; 40.234 +vmx_dtlb_out: 40.235 + mov r19 = 2 40.236 + br.sptk vmx_dispatch_tlb_miss 40.237 + VMX_FAULT(2); 40.238 +END(vmx_dtlb_miss) 40.239 + 40.240 + .org vmx_ia64_ivt+0x0c00 40.241 +///////////////////////////////////////////////////////////////////////////////////////// 40.242 +// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 40.243 +ENTRY(vmx_alt_itlb_miss) 40.244 + mov r31 = pr 40.245 + mov r29=cr.ipsr; 40.246 + ;; 40.247 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 40.248 +(p7)br.sptk vmx_fault_3 40.249 + mov r16=cr.ifa // get address that caused the TLB miss 40.250 + movl r17=PAGE_KERNEL 40.251 + mov r24=cr.ipsr 40.252 + movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 40.253 + ;; 40.254 + and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 40.255 + shr.u r18=r16,57 // move address bit 61 to bit 4 40.256 + ;; 40.257 + andcm r18=0x10,r18 // bit 4=~address-bit(61) 40.258 + or r19=r17,r19 // insert PTE control bits into r19 40.259 + ;; 40.260 + or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 40.261 + ;; 40.262 + itc.i r19 // insert the TLB entry 40.263 + mov pr=r31,-1 40.264 + rfi 40.265 + VMX_FAULT(3); 40.266 +END(vmx_alt_itlb_miss) 40.267 + 40.268 + 40.269 + .org vmx_ia64_ivt+0x1000 40.270 +///////////////////////////////////////////////////////////////////////////////////////// 40.271 +// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 40.272 +ENTRY(vmx_alt_dtlb_miss) 40.273 + mov r31=pr 40.274 + mov r29=cr.ipsr; 40.275 + ;; 40.276 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 40.277 +(p7)br.sptk vmx_fault_4 40.278 + mov r16=cr.ifa // get address that caused the TLB miss 40.279 + movl r17=PAGE_KERNEL 40.280 + mov r20=cr.isr 40.281 + movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 40.282 + mov r24=cr.ipsr 40.283 + ;; 40.284 + and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field 40.285 + tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? 40.286 + shr.u r18=r16,57 // move address bit 61 to bit 4 40.287 + and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 40.288 + tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 40.289 + ;; 40.290 + andcm r18=0x10,r18 // bit 4=~address-bit(61) 40.291 +(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 40.292 + dep r24=-1,r24,IA64_PSR_ED_BIT,1 40.293 + or r19=r19,r17 // insert PTE control bits into r19 40.294 + ;; 40.295 + or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 40.296 +(p6) mov cr.ipsr=r24 40.297 + ;; 40.298 +(p7) itc.d r19 // insert the TLB entry 40.299 + mov pr=r31,-1 40.300 + rfi 40.301 + VMX_FAULT(4); 40.302 +END(vmx_alt_dtlb_miss) 40.303 + 40.304 + .org vmx_ia64_ivt+0x1400 40.305 +///////////////////////////////////////////////////////////////////////////////////////// 40.306 +// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 40.307 +ENTRY(vmx_nested_dtlb_miss) 40.308 + VMX_FAULT(5) 40.309 +END(vmx_nested_dtlb_miss) 40.310 + 40.311 + .org vmx_ia64_ivt+0x1800 40.312 +///////////////////////////////////////////////////////////////////////////////////////// 40.313 +// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 40.314 +ENTRY(vmx_ikey_miss) 40.315 + VMX_REFLECT(6) 40.316 +END(vmx_ikey_miss) 40.317 + 40.318 + .org vmx_ia64_ivt+0x1c00 40.319 +///////////////////////////////////////////////////////////////////////////////////////// 40.320 +// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 40.321 +ENTRY(vmx_dkey_miss) 40.322 + VMX_REFLECT(7) 40.323 +END(vmx_dkey_miss) 40.324 + 40.325 + .org vmx_ia64_ivt+0x2000 40.326 +///////////////////////////////////////////////////////////////////////////////////////// 40.327 +// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 40.328 +ENTRY(vmx_dirty_bit) 40.329 + VMX_REFLECT(8) 40.330 +END(vmx_idirty_bit) 40.331 + 40.332 + .org vmx_ia64_ivt+0x2400 40.333 +///////////////////////////////////////////////////////////////////////////////////////// 40.334 +// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 40.335 +ENTRY(vmx_iaccess_bit) 40.336 + VMX_REFLECT(9) 40.337 +END(vmx_iaccess_bit) 40.338 + 40.339 + .org vmx_ia64_ivt+0x2800 40.340 +///////////////////////////////////////////////////////////////////////////////////////// 40.341 +// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 40.342 +ENTRY(vmx_daccess_bit) 40.343 + VMX_REFLECT(10) 40.344 +END(vmx_daccess_bit) 40.345 + 40.346 + .org vmx_ia64_ivt+0x2c00 40.347 +///////////////////////////////////////////////////////////////////////////////////////// 40.348 +// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 40.349 +ENTRY(vmx_break_fault) 40.350 + mov r31=pr 40.351 + mov r19=11 40.352 + br.sptk.many vmx_dispatch_break_fault 40.353 +END(vmx_break_fault) 40.354 + 40.355 + .org vmx_ia64_ivt+0x3000 40.356 +///////////////////////////////////////////////////////////////////////////////////////// 40.357 +// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 40.358 +ENTRY(vmx_interrupt) 40.359 + mov r31=pr // prepare to save predicates 40.360 + mov r19=12 40.361 + mov r29=cr.ipsr 40.362 + ;; 40.363 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT 40.364 +(p7) br.sptk vmx_dispatch_interrupt 40.365 + ;; 40.366 + mov r27=ar.rsc /* M */ 40.367 + mov r20=r1 /* A */ 40.368 + mov r25=ar.unat /* M */ 40.369 + mov r26=ar.pfs /* I */ 40.370 + mov r28=cr.iip /* M */ 40.371 + cover /* B (or nothing) */ 40.372 + ;; 40.373 + mov r1=sp 40.374 + ;; 40.375 + invala /* M */ 40.376 + mov r30=cr.ifs 40.377 + ;; 40.378 + addl r1=-IA64_PT_REGS_SIZE,r1 40.379 + ;; 40.380 + adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ 40.381 + adds r16=PT(CR_IPSR),r1 40.382 + ;; 40.383 + lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES 40.384 + st8 [r16]=r29 /* save cr.ipsr */ 40.385 + ;; 40.386 + lfetch.fault.excl.nt1 [r17] 40.387 + mov r29=b0 40.388 + ;; 40.389 + adds r16=PT(R8),r1 /* initialize first base pointer */ 40.390 + adds r17=PT(R9),r1 /* initialize second base pointer */ 40.391 + mov r18=r0 /* make sure r18 isn't NaT */ 40.392 + ;; 40.393 +.mem.offset 0,0; st8.spill [r16]=r8,16 40.394 +.mem.offset 8,0; st8.spill [r17]=r9,16 40.395 + ;; 40.396 +.mem.offset 0,0; st8.spill [r16]=r10,24 40.397 +.mem.offset 8,0; st8.spill [r17]=r11,24 40.398 + ;; 40.399 + st8 [r16]=r28,16 /* save cr.iip */ 40.400 + st8 [r17]=r30,16 /* save cr.ifs */ 40.401 + mov r8=ar.fpsr /* M */ 40.402 + mov r9=ar.csd 40.403 + mov r10=ar.ssd 40.404 + movl r11=FPSR_DEFAULT /* L-unit */ 40.405 + ;; 40.406 + st8 [r16]=r25,16 /* save ar.unat */ 40.407 + st8 [r17]=r26,16 /* save ar.pfs */ 40.408 + shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ 40.409 + ;; 40.410 + st8 [r16]=r27,16 /* save ar.rsc */ 40.411 + adds r17=16,r17 /* skip over ar_rnat field */ 40.412 + ;; /* avoid RAW on r16 & r17 */ 40.413 + st8 [r17]=r31,16 /* save predicates */ 40.414 + adds r16=16,r16 /* skip over ar_bspstore field */ 40.415 + ;; 40.416 + st8 [r16]=r29,16 /* save b0 */ 40.417 + st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ 40.418 + ;; 40.419 +.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ 40.420 +.mem.offset 8,0; st8.spill [r17]=r12,16 40.421 + adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */ 40.422 + ;; 40.423 +.mem.offset 0,0; st8.spill [r16]=r13,16 40.424 +.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ 40.425 + mov r13=r21 /* establish `current' */ 40.426 + ;; 40.427 +.mem.offset 0,0; st8.spill [r16]=r15,16 40.428 +.mem.offset 8,0; st8.spill [r17]=r14,16 40.429 + dep r14=-1,r0,60,4 40.430 + ;; 40.431 +.mem.offset 0,0; st8.spill [r16]=r2,16 40.432 +.mem.offset 8,0; st8.spill [r17]=r3,16 40.433 + adds r2=IA64_PT_REGS_R16_OFFSET,r1 40.434 + ;; 40.435 + mov r8=ar.ccv 40.436 + movl r1=__gp /* establish kernel global pointer */ 40.437 + ;; \ 40.438 + bsw.1 40.439 + ;; 40.440 + alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 40.441 + mov out0=cr.ivr // pass cr.ivr as first arg 40.442 + add out1=16,sp // pass pointer to pt_regs as second arg 40.443 + 40.444 + ssm psr.ic 40.445 + ;; 40.446 + srlz.i 40.447 + ;; 40.448 + ssm psr.i 40.449 + adds r3=8,r2 // set up second base pointer for SAVE_REST 40.450 + srlz.i // ensure everybody knows psr.ic is back on 40.451 + ;; 40.452 +.mem.offset 0,0; st8.spill [r2]=r16,16 40.453 +.mem.offset 8,0; st8.spill [r3]=r17,16 40.454 + ;; 40.455 +.mem.offset 0,0; st8.spill [r2]=r18,16 40.456 +.mem.offset 8,0; st8.spill [r3]=r19,16 40.457 + ;; 40.458 +.mem.offset 0,0; st8.spill [r2]=r20,16 40.459 +.mem.offset 8,0; st8.spill [r3]=r21,16 40.460 + mov r18=b6 40.461 + ;; 40.462 +.mem.offset 0,0; st8.spill [r2]=r22,16 40.463 +.mem.offset 8,0; st8.spill [r3]=r23,16 40.464 + mov r19=b7 40.465 + ;; 40.466 +.mem.offset 0,0; st8.spill [r2]=r24,16 40.467 +.mem.offset 8,0; st8.spill [r3]=r25,16 40.468 + ;; 40.469 +.mem.offset 0,0; st8.spill [r2]=r26,16 40.470 +.mem.offset 8,0; st8.spill [r3]=r27,16 40.471 + ;; 40.472 +.mem.offset 0,0; st8.spill [r2]=r28,16 40.473 +.mem.offset 8,0; st8.spill [r3]=r29,16 40.474 + ;; 40.475 +.mem.offset 0,0; st8.spill [r2]=r30,16 40.476 +.mem.offset 8,0; st8.spill [r3]=r31,32 40.477 + ;; 40.478 + mov ar.fpsr=r11 /* M-unit */ 40.479 + st8 [r2]=r8,8 /* ar.ccv */ 40.480 + adds r24=PT(B6)-PT(F7),r3 40.481 + ;; 40.482 + stf.spill [r2]=f6,32 40.483 + stf.spill [r3]=f7,32 40.484 + ;; 40.485 + stf.spill [r2]=f8,32 40.486 + stf.spill [r3]=f9,32 40.487 + ;; 40.488 + stf.spill [r2]=f10 40.489 + stf.spill [r3]=f11 40.490 + adds r25=PT(B7)-PT(F11),r3 40.491 + ;; 40.492 + st8 [r24]=r18,16 /* b6 */ 40.493 + st8 [r25]=r19,16 /* b7 */ 40.494 + ;; 40.495 + st8 [r24]=r9 /* ar.csd */ 40.496 + st8 [r25]=r10 /* ar.ssd */ 40.497 + ;; 40.498 + srlz.d // make sure we see the effect of cr.ivr 40.499 + movl r14=ia64_leave_nested 40.500 + ;; 40.501 + mov rp=r14 40.502 + br.call.sptk.many b6=vmx_ia64_handle_irq 40.503 + ;; 40.504 +END(vmx_interrupt) 40.505 + 40.506 + .org vmx_ia64_ivt+0x3400 40.507 +///////////////////////////////////////////////////////////////////////////////////////// 40.508 +// 0x3400 Entry 13 (size 64 bundles) Reserved 40.509 + VMX_DBG_FAULT(13) 40.510 + VMX_FAULT(13) 40.511 + 40.512 + 40.513 + .org vmx_ia64_ivt+0x3800 40.514 +///////////////////////////////////////////////////////////////////////////////////////// 40.515 +// 0x3800 Entry 14 (size 64 bundles) Reserved 40.516 + VMX_DBG_FAULT(14) 40.517 + VMX_FAULT(14) 40.518 + 40.519 + 40.520 + .org vmx_ia64_ivt+0x3c00 40.521 +///////////////////////////////////////////////////////////////////////////////////////// 40.522 +// 0x3c00 Entry 15 (size 64 bundles) Reserved 40.523 + VMX_DBG_FAULT(15) 40.524 + VMX_FAULT(15) 40.525 + 40.526 + 40.527 + .org vmx_ia64_ivt+0x4000 40.528 +///////////////////////////////////////////////////////////////////////////////////////// 40.529 +// 0x4000 Entry 16 (size 64 bundles) Reserved 40.530 + VMX_DBG_FAULT(16) 40.531 + VMX_FAULT(16) 40.532 + 40.533 + .org vmx_ia64_ivt+0x4400 40.534 +///////////////////////////////////////////////////////////////////////////////////////// 40.535 +// 0x4400 Entry 17 (size 64 bundles) Reserved 40.536 + VMX_DBG_FAULT(17) 40.537 + VMX_FAULT(17) 40.538 + 40.539 + .org vmx_ia64_ivt+0x4800 40.540 +///////////////////////////////////////////////////////////////////////////////////////// 40.541 +// 0x4800 Entry 18 (size 64 bundles) Reserved 40.542 + VMX_DBG_FAULT(18) 40.543 + VMX_FAULT(18) 40.544 + 40.545 + .org vmx_ia64_ivt+0x4c00 40.546 +///////////////////////////////////////////////////////////////////////////////////////// 40.547 +// 0x4c00 Entry 19 (size 64 bundles) Reserved 40.548 + VMX_DBG_FAULT(19) 40.549 + VMX_FAULT(19) 40.550 + 40.551 +///////////////////////////////////////////////////////////////////////////////////////// 40.552 +// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 40.553 +ENTRY(vmx_iaccess_rights) 40.554 + VMX_REFLECT(22) 40.555 +END(vmx_iaccess_rights) 40.556 + 40.557 + .org vmx_ia64_ivt+0x5300 40.558 +///////////////////////////////////////////////////////////////////////////////////////// 40.559 +// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 40.560 +ENTRY(vmx_daccess_rights) 40.561 + VMX_REFLECT(23) 40.562 +END(vmx_daccess_rights) 40.563 + 40.564 + .org vmx_ia64_ivt+0x5400 40.565 +///////////////////////////////////////////////////////////////////////////////////////// 40.566 +// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 40.567 +ENTRY(vmx_general_exception) 40.568 + VMX_FAULT(24) 40.569 +// VMX_REFLECT(24) 40.570 +END(vmx_general_exception) 40.571 + 40.572 + .org vmx_ia64_ivt+0x5500 40.573 +///////////////////////////////////////////////////////////////////////////////////////// 40.574 +// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 40.575 +ENTRY(vmx_disabled_fp_reg) 40.576 + VMX_REFLECT(25) 40.577 +END(vmx_disabled_fp_reg) 40.578 + 40.579 + .org vmx_ia64_ivt+0x5600 40.580 +///////////////////////////////////////////////////////////////////////////////////////// 40.581 +// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 40.582 +ENTRY(vmx_nat_consumption) 40.583 + VMX_REFLECT(26) 40.584 +END(vmx_nat_consumption) 40.585 + 40.586 + .org vmx_ia64_ivt+0x5700 40.587 +///////////////////////////////////////////////////////////////////////////////////////// 40.588 +// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 40.589 +ENTRY(vmx_speculation_vector) 40.590 + VMX_REFLECT(27) 40.591 +END(vmx_speculation_vector) 40.592 + 40.593 + .org vmx_ia64_ivt+0x5800 40.594 +///////////////////////////////////////////////////////////////////////////////////////// 40.595 +// 0x5800 Entry 28 (size 16 bundles) Reserved 40.596 + VMX_DBG_FAULT(28) 40.597 + VMX_FAULT(28) 40.598 + 40.599 + .org vmx_ia64_ivt+0x5900 40.600 +///////////////////////////////////////////////////////////////////////////////////////// 40.601 +// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 40.602 +ENTRY(vmx_debug_vector) 40.603 + VMX_DBG_FAULT(29) 40.604 + VMX_FAULT(29) 40.605 +END(vmx_debug_vector) 40.606 + 40.607 + .org vmx_ia64_ivt+0x5a00 40.608 +///////////////////////////////////////////////////////////////////////////////////////// 40.609 +// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 40.610 +ENTRY(vmx_unaligned_access) 40.611 + VMX_REFLECT(30) 40.612 +END(vmx_unaligned_access) 40.613 + 40.614 + .org vmx_ia64_ivt+0x5b00 40.615 +///////////////////////////////////////////////////////////////////////////////////////// 40.616 +// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 40.617 +ENTRY(vmx_unsupported_data_reference) 40.618 + VMX_REFLECT(31) 40.619 +END(vmx_unsupported_data_reference) 40.620 + 40.621 + .org vmx_ia64_ivt+0x5c00 40.622 +///////////////////////////////////////////////////////////////////////////////////////// 40.623 +// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) 40.624 +ENTRY(vmx_floating_point_fault) 40.625 + VMX_REFLECT(32) 40.626 +END(vmx_floating_point_fault) 40.627 + 40.628 + .org vmx_ia64_ivt+0x5d00 40.629 +///////////////////////////////////////////////////////////////////////////////////////// 40.630 +// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 40.631 +ENTRY(vmx_floating_point_trap) 40.632 + VMX_REFLECT(33) 40.633 +END(vmx_floating_point_trap) 40.634 + 40.635 + .org vmx_ia64_ivt+0x5e00 40.636 +///////////////////////////////////////////////////////////////////////////////////////// 40.637 +// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 40.638 +ENTRY(vmx_lower_privilege_trap) 40.639 + VMX_REFLECT(34) 40.640 +END(vmx_lower_privilege_trap) 40.641 + 40.642 + .org vmx_ia64_ivt+0x5f00 40.643 +///////////////////////////////////////////////////////////////////////////////////////// 40.644 +// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 40.645 +ENTRY(vmx_taken_branch_trap) 40.646 + VMX_REFLECT(35) 40.647 +END(vmx_taken_branch_trap) 40.648 + 40.649 + .org vmx_ia64_ivt+0x6000 40.650 +///////////////////////////////////////////////////////////////////////////////////////// 40.651 +// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 40.652 +ENTRY(vmx_single_step_trap) 40.653 + VMX_REFLECT(36) 40.654 +END(vmx_single_step_trap) 40.655 + 40.656 + .org vmx_ia64_ivt+0x6100 40.657 +///////////////////////////////////////////////////////////////////////////////////////// 40.658 +// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault 40.659 +ENTRY(vmx_virtualization_fault) 40.660 + VMX_DBG_FAULT(37) 40.661 + mov r31=pr 40.662 + mov r19=37 40.663 + br.sptk vmx_dispatch_virtualization_fault 40.664 +END(vmx_virtualization_fault) 40.665 + 40.666 + .org vmx_ia64_ivt+0x6200 40.667 +///////////////////////////////////////////////////////////////////////////////////////// 40.668 +// 0x6200 Entry 38 (size 16 bundles) Reserved 40.669 + VMX_DBG_FAULT(38) 40.670 + VMX_FAULT(38) 40.671 + 40.672 + .org vmx_ia64_ivt+0x6300 40.673 +///////////////////////////////////////////////////////////////////////////////////////// 40.674 +// 0x6300 Entry 39 (size 16 bundles) Reserved 40.675 + VMX_DBG_FAULT(39) 40.676 + VMX_FAULT(39) 40.677 + 40.678 + .org vmx_ia64_ivt+0x6400 40.679 +///////////////////////////////////////////////////////////////////////////////////////// 40.680 +// 0x6400 Entry 40 (size 16 bundles) Reserved 40.681 + VMX_DBG_FAULT(40) 40.682 + VMX_FAULT(40) 40.683 + 40.684 + .org vmx_ia64_ivt+0x6500 40.685 +///////////////////////////////////////////////////////////////////////////////////////// 40.686 +// 0x6500 Entry 41 (size 16 bundles) Reserved 40.687 + VMX_DBG_FAULT(41) 40.688 + VMX_FAULT(41) 40.689 + 40.690 + .org vmx_ia64_ivt+0x6600 40.691 +///////////////////////////////////////////////////////////////////////////////////////// 40.692 +// 0x6600 Entry 42 (size 16 bundles) Reserved 40.693 + VMX_DBG_FAULT(42) 40.694 + VMX_FAULT(42) 40.695 + 40.696 + .org vmx_ia64_ivt+0x6700 40.697 +///////////////////////////////////////////////////////////////////////////////////////// 40.698 +// 0x6700 Entry 43 (size 16 bundles) Reserved 40.699 + VMX_DBG_FAULT(43) 40.700 + VMX_FAULT(43) 40.701 + 40.702 + .org vmx_ia64_ivt+0x6800 40.703 +///////////////////////////////////////////////////////////////////////////////////////// 40.704 +// 0x6800 Entry 44 (size 16 bundles) Reserved 40.705 + VMX_DBG_FAULT(44) 40.706 + VMX_FAULT(44) 40.707 + 40.708 + .org vmx_ia64_ivt+0x6900 40.709 +///////////////////////////////////////////////////////////////////////////////////////// 40.710 +// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 40.711 +ENTRY(vmx_ia32_exception) 40.712 + VMX_DBG_FAULT(45) 40.713 + VMX_FAULT(45) 40.714 +END(vmx_ia32_exception) 40.715 + 40.716 + .org vmx_ia64_ivt+0x6a00 40.717 +///////////////////////////////////////////////////////////////////////////////////////// 40.718 +// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 40.719 +ENTRY(vmx_ia32_intercept) 40.720 + VMX_DBG_FAULT(46) 40.721 + VMX_FAULT(46) 40.722 +END(vmx_ia32_intercept) 40.723 + 40.724 + .org vmx_ia64_ivt+0x6b00 40.725 +///////////////////////////////////////////////////////////////////////////////////////// 40.726 +// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 40.727 +ENTRY(vmx_ia32_interrupt) 40.728 + VMX_DBG_FAULT(47) 40.729 + VMX_FAULT(47) 40.730 +END(vmx_ia32_interrupt) 40.731 + 40.732 + .org vmx_ia64_ivt+0x6c00 40.733 +///////////////////////////////////////////////////////////////////////////////////////// 40.734 +// 0x6c00 Entry 48 (size 16 bundles) Reserved 40.735 + VMX_DBG_FAULT(48) 40.736 + VMX_FAULT(48) 40.737 + 40.738 + .org vmx_ia64_ivt+0x6d00 40.739 +///////////////////////////////////////////////////////////////////////////////////////// 40.740 +// 0x6d00 Entry 49 (size 16 bundles) Reserved 40.741 + VMX_DBG_FAULT(49) 40.742 + VMX_FAULT(49) 40.743 + 40.744 + .org vmx_ia64_ivt+0x6e00 40.745 +///////////////////////////////////////////////////////////////////////////////////////// 40.746 +// 0x6e00 Entry 50 (size 16 bundles) Reserved 40.747 + VMX_DBG_FAULT(50) 40.748 + VMX_FAULT(50) 40.749 + 40.750 + .org vmx_ia64_ivt+0x6f00 40.751 +///////////////////////////////////////////////////////////////////////////////////////// 40.752 +// 0x6f00 Entry 51 (size 16 bundles) Reserved 40.753 + VMX_DBG_FAULT(51) 40.754 + VMX_FAULT(51) 40.755 + 40.756 + .org vmx_ia64_ivt+0x7000 40.757 +///////////////////////////////////////////////////////////////////////////////////////// 40.758 +// 0x7000 Entry 52 (size 16 bundles) Reserved 40.759 + VMX_DBG_FAULT(52) 40.760 + VMX_FAULT(52) 40.761 + 40.762 + .org vmx_ia64_ivt+0x7100 40.763 +///////////////////////////////////////////////////////////////////////////////////////// 40.764 +// 0x7100 Entry 53 (size 16 bundles) Reserved 40.765 + VMX_DBG_FAULT(53) 40.766 + VMX_FAULT(53) 40.767 + 40.768 + .org vmx_ia64_ivt+0x7200 40.769 +///////////////////////////////////////////////////////////////////////////////////////// 40.770 +// 0x7200 Entry 54 (size 16 bundles) Reserved 40.771 + VMX_DBG_FAULT(54) 40.772 + VMX_FAULT(54) 40.773 + 40.774 + .org vmx_ia64_ivt+0x7300 40.775 +///////////////////////////////////////////////////////////////////////////////////////// 40.776 +// 0x7300 Entry 55 (size 16 bundles) Reserved 40.777 + VMX_DBG_FAULT(55) 40.778 + VMX_FAULT(55) 40.779 + 40.780 + .org vmx_ia64_ivt+0x7400 40.781 +///////////////////////////////////////////////////////////////////////////////////////// 40.782 +// 0x7400 Entry 56 (size 16 bundles) Reserved 40.783 + VMX_DBG_FAULT(56) 40.784 + VMX_FAULT(56) 40.785 + 40.786 + .org vmx_ia64_ivt+0x7500 40.787 +///////////////////////////////////////////////////////////////////////////////////////// 40.788 +// 0x7500 Entry 57 (size 16 bundles) Reserved 40.789 + VMX_DBG_FAULT(57) 40.790 + VMX_FAULT(57) 40.791 + 40.792 + .org vmx_ia64_ivt+0x7600 40.793 +///////////////////////////////////////////////////////////////////////////////////////// 40.794 +// 0x7600 Entry 58 (size 16 bundles) Reserved 40.795 + VMX_DBG_FAULT(58) 40.796 + VMX_FAULT(58) 40.797 + 40.798 + .org vmx_ia64_ivt+0x7700 40.799 +///////////////////////////////////////////////////////////////////////////////////////// 40.800 +// 0x7700 Entry 59 (size 16 bundles) Reserved 40.801 + VMX_DBG_FAULT(59) 40.802 + VMX_FAULT(59) 40.803 + 40.804 + .org vmx_ia64_ivt+0x7800 40.805 +///////////////////////////////////////////////////////////////////////////////////////// 40.806 +// 0x7800 Entry 60 (size 16 bundles) Reserved 40.807 + VMX_DBG_FAULT(60) 40.808 + VMX_FAULT(60) 40.809 + 40.810 + .org vmx_ia64_ivt+0x7900 40.811 +///////////////////////////////////////////////////////////////////////////////////////// 40.812 +// 0x7900 Entry 61 (size 16 bundles) Reserved 40.813 + VMX_DBG_FAULT(61) 40.814 + VMX_FAULT(61) 40.815 + 40.816 + .org vmx_ia64_ivt+0x7a00 40.817 +///////////////////////////////////////////////////////////////////////////////////////// 40.818 +// 0x7a00 Entry 62 (size 16 bundles) Reserved 40.819 + VMX_DBG_FAULT(62) 40.820 + VMX_FAULT(62) 40.821 + 40.822 + .org vmx_ia64_ivt+0x7b00 40.823 +///////////////////////////////////////////////////////////////////////////////////////// 40.824 +// 0x7b00 Entry 63 (size 16 bundles) Reserved 40.825 + VMX_DBG_FAULT(63) 40.826 + VMX_FAULT(63) 40.827 + 40.828 + .org vmx_ia64_ivt+0x7c00 40.829 +///////////////////////////////////////////////////////////////////////////////////////// 40.830 +// 0x7c00 Entry 64 (size 16 bundles) Reserved 40.831 + VMX_DBG_FAULT(64) 40.832 + VMX_FAULT(64) 40.833 + 40.834 + .org vmx_ia64_ivt+0x7d00 40.835 +///////////////////////////////////////////////////////////////////////////////////////// 40.836 +// 0x7d00 Entry 65 (size 16 bundles) Reserved 40.837 + VMX_DBG_FAULT(65) 40.838 + VMX_FAULT(65) 40.839 + 40.840 + .org vmx_ia64_ivt+0x7e00 40.841 +///////////////////////////////////////////////////////////////////////////////////////// 40.842 +// 0x7e00 Entry 66 (size 16 bundles) Reserved 40.843 + VMX_DBG_FAULT(66) 40.844 + VMX_FAULT(66) 40.845 + 40.846 + .org vmx_ia64_ivt+0x7f00 40.847 +///////////////////////////////////////////////////////////////////////////////////////// 40.848 +// 0x7f00 Entry 67 (size 16 bundles) Reserved 40.849 + VMX_DBG_FAULT(67) 40.850 + VMX_FAULT(67) 40.851 + 40.852 + .org vmx_ia64_ivt+0x8000 40.853 + // There is no particular reason for this code to be here, other than that 40.854 + // there happens to be space here that would go unused otherwise. If this 40.855 + // fault ever gets "unreserved", simply moved the following code to a more 40.856 + // suitable spot... 40.857 + 40.858 + 40.859 +ENTRY(vmx_dispatch_reflection) 40.860 + /* 40.861 + * Input: 40.862 + * psr.ic: off 40.863 + * r19: intr type (offset into ivt, see ia64_int.h) 40.864 + * r31: contains saved predicates (pr) 40.865 + */ 40.866 + VMX_SAVE_MIN_WITH_COVER_R19 40.867 + alloc r14=ar.pfs,0,0,4,0 40.868 + mov out0=cr.ifa 40.869 + mov out1=cr.isr 40.870 + mov out2=cr.iim 40.871 + mov out3=r15 40.872 + 40.873 + ssm psr.ic 40.874 + ;; 40.875 + srlz.i // guarantee that interruption collection is on 40.876 + ;; 40.877 + ssm psr.i // restore psr.i 40.878 + adds r3=16,r2 // set up second base pointer 40.879 + ;; 40.880 + VMX_SAVE_REST 40.881 + movl r14=ia64_leave_hypervisor 40.882 + ;; 40.883 + mov rp=r14 40.884 + br.call.sptk.many b6=vmx_reflect_interruption 40.885 +END(vmx_dispatch_reflection) 40.886 + 40.887 +ENTRY(vmx_dispatch_virtualization_fault) 40.888 + cmp.eq pEml,pNonEml=r0,r0 /* force pEml =1, save r4 ~ r7 */ 40.889 + ;; 40.890 + VMX_SAVE_MIN_WITH_COVER_R19 40.891 + ;; 40.892 + alloc r14=ar.pfs,0,0,3,0 // now it's safe (must be first in insn group!) 40.893 + mov out0=r13 //vcpu 40.894 + mov out1=r4 //cause 40.895 + mov out2=r5 //opcode 40.896 + ssm psr.ic 40.897 + ;; 40.898 + srlz.i // guarantee that interruption collection is on 40.899 + ;; 40.900 + ssm psr.i // restore psr.i 40.901 + adds r3=16,r2 // set up second base pointer 40.902 + ;; 40.903 + VMX_SAVE_REST 40.904 + movl r14=ia64_leave_hypervisor 40.905 + ;; 40.906 + mov rp=r14 40.907 + br.call.sptk.many b6=vmx_emulate 40.908 +END(vmx_dispatch_virtualization_fault) 40.909 + 40.910 + 40.911 + 40.912 +ENTRY(vmx_dispatch_tlb_miss) 40.913 + VMX_SAVE_MIN_WITH_COVER_R19 40.914 + alloc r14=ar.pfs,0,0,3,0 40.915 + mov out0=r13 40.916 + mov out1=r15 40.917 + mov out2=cr.ifa 40.918 + 40.919 + ssm psr.ic 40.920 + ;; 40.921 + srlz.i // guarantee that interruption collection is on 40.922 + ;; 40.923 + ssm psr.i // restore psr.i 40.924 + adds r3=16,r2 // set up second base pointer 40.925 + ;; 40.926 + VMX_SAVE_REST 40.927 + movl r14=ia64_leave_hypervisor 40.928 + ;; 40.929 + mov rp=r14 40.930 + br.call.sptk.many b6=vmx_hpw_miss 40.931 +END(vmx_dispatch_tlb_miss) 40.932 + 40.933 + 40.934 +ENTRY(vmx_dispatch_break_fault) 40.935 + cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */ 40.936 + ;; 40.937 + VMX_SAVE_MIN_WITH_COVER_R19 40.938 + ;; 40.939 + alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 40.940 + mov out0=cr.ifa 40.941 + adds out1=16,sp 40.942 + mov out2=cr.isr // FIXME: pity to make this slow access twice 40.943 + mov out3=cr.iim // FIXME: pity to make this slow access twice 40.944 + 40.945 + ssm psr.ic 40.946 + ;; 40.947 + srlz.i // guarantee that interruption collection is on 40.948 + ;; 40.949 + ssm psr.i // restore psr.i 40.950 + adds r3=16,r2 // set up second base pointer 40.951 + ;; 40.952 + VMX_SAVE_REST 40.953 + movl r14=ia64_leave_hypervisor 40.954 + ;; 40.955 + mov rp=r14 40.956 + br.call.sptk.many b6=vmx_ia64_handle_break 40.957 +END(vmx_dispatch_break_fault) 40.958 + 40.959 + 40.960 +ENTRY(vmx_dispatch_interrupt) 40.961 + cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */ 40.962 + ;; 40.963 + VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 40.964 + ;; 40.965 + alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 40.966 + mov out0=cr.ivr // pass cr.ivr as first arg 40.967 + add out1=16,sp // pass pointer to pt_regs as second arg 40.968 + 40.969 + ssm psr.ic 40.970 + ;; 40.971 + srlz.i 40.972 + ;; 40.973 + ssm psr.i 40.974 + adds r3=16,r2 // set up second base pointer for SAVE_REST 40.975 + ;; 40.976 + VMX_SAVE_REST 40.977 + movl r14=ia64_leave_hypervisor 40.978 + ;; 40.979 + mov rp=r14 40.980 + br.call.sptk.many b6=vmx_ia64_handle_irq 40.981 +END(vmx_dispatch_interrupt)
41.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 41.2 +++ b/xen/arch/ia64/vmx_minstate.h Mon May 23 15:29:59 2005 +0000 41.3 @@ -0,0 +1,329 @@ 41.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 41.5 +/* 41.6 + * vmx_minstate.h: 41.7 + * Copyright (c) 2005, Intel Corporation. 41.8 + * 41.9 + * This program is free software; you can redistribute it and/or modify it 41.10 + * under the terms and conditions of the GNU General Public License, 41.11 + * version 2, as published by the Free Software Foundation. 41.12 + * 41.13 + * This program is distributed in the hope it will be useful, but WITHOUT 41.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 41.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 41.16 + * more details. 41.17 + * 41.18 + * You should have received a copy of the GNU General Public License along with 41.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 41.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 41.21 + * 41.22 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 41.23 + */ 41.24 + 41.25 +#include <linux/config.h> 41.26 + 41.27 +#include <asm/asmmacro.h> 41.28 +#include <asm/fpu.h> 41.29 +#include <asm/mmu_context.h> 41.30 +#include <asm/offsets.h> 41.31 +#include <asm/pal.h> 41.32 +#include <asm/pgtable.h> 41.33 +#include <asm/processor.h> 41.34 +#include <asm/ptrace.h> 41.35 +#include <asm/system.h> 41.36 +#include <asm/vmx_pal_vsa.h> 41.37 +#include <asm/vmx_vpd.h> 41.38 +#include <asm/cache.h> 41.39 +#include "entry.h" 41.40 + 41.41 +#define VMX_MINSTATE_START_SAVE_MIN \ 41.42 + mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 41.43 + ;; \ 41.44 + mov.m r28=ar.rnat; \ 41.45 + addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ 41.46 + ;; \ 41.47 + lfetch.fault.excl.nt1 [r22]; \ 41.48 + addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 41.49 + mov r23=ar.bspstore; /* save ar.bspstore */ \ 41.50 + ;; \ 41.51 + mov ar.bspstore=r22; /* switch to kernel RBS */ \ 41.52 + ;; \ 41.53 + mov r18=ar.bsp; \ 41.54 + mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ 41.55 + 41.56 + 41.57 + 41.58 +#define VMX_MINSTATE_END_SAVE_MIN \ 41.59 + bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ 41.60 + ;; 41.61 + 41.62 + 41.63 +#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL \ 41.64 + /* begin to call pal vps sync_read and cleanup psr.pl */ \ 41.65 + add r25=IA64_VPD_BASE_OFFSET, r21; \ 41.66 + movl r20=__vsa_base; \ 41.67 + ;; \ 41.68 + ld8 r25=[r25]; /* read vpd base */ \ 41.69 + ld8 r20=[r20]; /* read entry point */ \ 41.70 + ;; \ 41.71 + mov r6=r25; \ 41.72 + add r20=PAL_VPS_SYNC_READ,r20; \ 41.73 + ;; \ 41.74 +{ .mii; \ 41.75 + add r22=VPD(VPSR),r25; \ 41.76 + mov r24=ip; \ 41.77 + mov b0=r20; \ 41.78 + ;; \ 41.79 +}; \ 41.80 +{ .mmb; \ 41.81 + add r24 = 0x20, r24; \ 41.82 + mov r16 = cr.ipsr; /* Temp workaround since psr.ic is off */ \ 41.83 + br.cond.sptk b0; /* call the service */ \ 41.84 + ;; \ 41.85 +}; \ 41.86 + ld8 r7=[r22]; \ 41.87 + /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */ \ 41.88 + extr.u r30=r16, IA64_PSR_CPL0_BIT, 2; \ 41.89 + ;; \ 41.90 + dep r7=r30, r7, IA64_PSR_CPL0_BIT, 2; \ 41.91 + ;; \ 41.92 + extr.u r30=r16, IA64_PSR_BE_BIT, 5; \ 41.93 + ;; \ 41.94 + dep r7=r30, r7, IA64_PSR_BE_BIT, 5; \ 41.95 + ;; \ 41.96 + extr.u r30=r16, IA64_PSR_RI_BIT, 2; \ 41.97 + ;; \ 41.98 + dep r7=r30, r7, IA64_PSR_RI_BIT, 2; \ 41.99 + ;; \ 41.100 + st8 [r22]=r7; \ 41.101 + ;; 41.102 + 41.103 + 41.104 + 41.105 +#define IA64_CURRENT_REG IA64_KR(CURRENT) /* r21 is reserved for current pointer */ 41.106 +//#define VMX_MINSTATE_GET_CURRENT(reg) mov reg=IA64_CURRENT_REG 41.107 +#define VMX_MINSTATE_GET_CURRENT(reg) mov reg=r21 41.108 + 41.109 +/* 41.110 + * VMX_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 41.111 + * the minimum state necessary that allows us to turn psr.ic back 41.112 + * on. 41.113 + * 41.114 + * Assumed state upon entry: 41.115 + * psr.ic: off 41.116 + * r31: contains saved predicates (pr) 41.117 + * 41.118 + * Upon exit, the state is as follows: 41.119 + * psr.ic: off 41.120 + * r2 = points to &pt_regs.r16 41.121 + * r8 = contents of ar.ccv 41.122 + * r9 = contents of ar.csd 41.123 + * r10 = contents of ar.ssd 41.124 + * r11 = FPSR_DEFAULT 41.125 + * r12 = kernel sp (kernel virtual address) 41.126 + * r13 = points to current task_struct (kernel virtual address) 41.127 + * p15 = TRUE if psr.i is set in cr.ipsr 41.128 + * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: 41.129 + * preserved 41.130 + * 41.131 + * Note that psr.ic is NOT turned on by this macro. This is so that 41.132 + * we can pass interruption state as arguments to a handler. 41.133 + */ 41.134 +#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ 41.135 +/* switch rr7 */ \ 41.136 + movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \ 41.137 + movl r17=(7<<61); \ 41.138 + movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \ 41.139 + movl r22=(6<<61); \ 41.140 + movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1); \ 41.141 + movl r23=(5<<61); \ 41.142 + ;; \ 41.143 + mov rr[r17]=r16; \ 41.144 + mov rr[r22]=r20; \ 41.145 + mov rr[r23]=r18; \ 41.146 + ;; \ 41.147 + srlz.i; \ 41.148 + ;; \ 41.149 + VMX_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ 41.150 + mov r27=ar.rsc; /* M */ \ 41.151 + mov r20=r1; /* A */ \ 41.152 + mov r26=ar.unat; /* M */ \ 41.153 + mov r29=cr.ipsr; /* M */ \ 41.154 + COVER; /* B;; (or nothing) */ \ 41.155 + ;; \ 41.156 + tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ 41.157 +(p6) br.sptk.few vmx_panic; \ 41.158 + mov r1=r16; \ 41.159 +/* mov r21=r16; */ \ 41.160 + /* switch from user to kernel RBS: */ \ 41.161 + ;; \ 41.162 + invala; /* M */ \ 41.163 + SAVE_IFS; \ 41.164 + ;; \ 41.165 + VMX_MINSTATE_START_SAVE_MIN \ 41.166 + adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ 41.167 + adds r16=PT(CR_IPSR),r1; \ 41.168 + ;; \ 41.169 + lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ 41.170 + st8 [r16]=r29; /* save cr.ipsr */ \ 41.171 + ;; \ 41.172 + lfetch.fault.excl.nt1 [r17]; \ 41.173 + tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ 41.174 + mov r29=b0 \ 41.175 + ;; \ 41.176 + adds r16=PT(R8),r1; /* initialize first base pointer */ \ 41.177 + adds r17=PT(R9),r1; /* initialize second base pointer */ \ 41.178 + ;; \ 41.179 +.mem.offset 0,0; st8.spill [r16]=r8,16; \ 41.180 +.mem.offset 8,0; st8.spill [r17]=r9,16; \ 41.181 + ;; \ 41.182 +.mem.offset 0,0; st8.spill [r16]=r10,24; \ 41.183 +.mem.offset 8,0; st8.spill [r17]=r11,24; \ 41.184 + ;; \ 41.185 + mov r8=ar.pfs; /* I */ \ 41.186 + mov r9=cr.iip; /* M */ \ 41.187 + mov r10=ar.fpsr; /* M */ \ 41.188 + ;; \ 41.189 + st8 [r16]=r9,16; /* save cr.iip */ \ 41.190 + st8 [r17]=r30,16; /* save cr.ifs */ \ 41.191 + sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ 41.192 + ;; \ 41.193 + st8 [r16]=r26,16; /* save ar.unat */ \ 41.194 + st8 [r17]=r8,16; /* save ar.pfs */ \ 41.195 + shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ 41.196 + ;; \ 41.197 + st8 [r16]=r27,16; /* save ar.rsc */ \ 41.198 + st8 [r17]=r28,16; /* save ar.rnat */ \ 41.199 + ;; /* avoid RAW on r16 & r17 */ \ 41.200 + st8 [r16]=r23,16; /* save ar.bspstore */ \ 41.201 + st8 [r17]=r31,16; /* save predicates */ \ 41.202 + ;; \ 41.203 + st8 [r16]=r29,16; /* save b0 */ \ 41.204 + st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ 41.205 + ;; \ 41.206 +.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ 41.207 +.mem.offset 8,0; st8.spill [r17]=r12,16; \ 41.208 + adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ 41.209 + ;; \ 41.210 +.mem.offset 0,0; st8.spill [r16]=r13,16; \ 41.211 +.mem.offset 8,0; st8.spill [r17]=r10,16; /* save ar.fpsr */ \ 41.212 + mov r13=r21; /* establish `current' */ \ 41.213 + ;; \ 41.214 +.mem.offset 0,0; st8.spill [r16]=r15,16; \ 41.215 +.mem.offset 8,0; st8.spill [r17]=r14,16; \ 41.216 + ;; \ 41.217 +.mem.offset 0,0; st8.spill [r16]=r2,16; \ 41.218 +.mem.offset 8,0; st8.spill [r17]=r3,16; \ 41.219 + adds r2=PT(F6),r1; \ 41.220 + ;; \ 41.221 + .mem.offset 0,0; st8.spill [r16]=r4,16; \ 41.222 + .mem.offset 8,0; st8.spill [r17]=r5,16; \ 41.223 + ;; \ 41.224 + .mem.offset 0,0; st8.spill [r16]=r6,16; \ 41.225 + .mem.offset 8,0; st8.spill [r17]=r7,16; \ 41.226 + mov r20=ar.ccv; \ 41.227 + ;; \ 41.228 + mov r18=cr.iipa; \ 41.229 + mov r4=cr.isr; \ 41.230 + mov r22=ar.unat; \ 41.231 + ;; \ 41.232 + st8 [r16]=r18,16; \ 41.233 + st8 [r17]=r4; \ 41.234 + ;; \ 41.235 + adds r16=PT(EML_UNAT),r1; \ 41.236 + adds r17=PT(AR_CCV),r1; \ 41.237 + ;; \ 41.238 + st8 [r16]=r22,8; \ 41.239 + st8 [r17]=r20; \ 41.240 + mov r4=r24; \ 41.241 + mov r5=r25; \ 41.242 + ;; \ 41.243 + st8 [r16]=r0; \ 41.244 + EXTRA; \ 41.245 + mov r9=ar.csd; \ 41.246 + mov r10=ar.ssd; \ 41.247 + movl r11=FPSR_DEFAULT; /* L-unit */ \ 41.248 + movl r1=__gp; /* establish kernel global pointer */ \ 41.249 + ;; \ 41.250 + PAL_VSA_SYNC_READ_CLEANUP_PSR_PL \ 41.251 + VMX_MINSTATE_END_SAVE_MIN 41.252 + 41.253 +/* 41.254 + * SAVE_REST saves the remainder of pt_regs (with psr.ic on). 41.255 + * 41.256 + * Assumed state upon entry: 41.257 + * psr.ic: on 41.258 + * r2: points to &pt_regs.f6 41.259 + * r3: points to &pt_regs.f7 41.260 + * r4,r5,scrach 41.261 + * r6: points to vpd 41.262 + * r7: vpsr 41.263 + * r9: contents of ar.csd 41.264 + * r10: contents of ar.ssd 41.265 + * r11: FPSR_DEFAULT 41.266 + * 41.267 + * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. 41.268 + */ 41.269 +#define VMX_SAVE_REST \ 41.270 + tbit.z pBN0,pBN1=r7,IA64_PSR_BN_BIT; /* guest bank0 or bank1 ? */ \ 41.271 + ;; \ 41.272 +(pBN0) add r4=VPD(VBGR),r6; \ 41.273 +(pBN0) add r5=VPD(VBGR)+0x8,r6; \ 41.274 +(pBN0) add r7=VPD(VBNAT),r6; \ 41.275 + ;; \ 41.276 +(pBN1) add r5=VPD(VGR)+0x8,r6; \ 41.277 +(pBN1) add r4=VPD(VGR),r6; \ 41.278 +(pBN1) add r7=VPD(VNAT),r6; \ 41.279 + ;; \ 41.280 +.mem.offset 0,0; st8.spill [r4]=r16,16; \ 41.281 +.mem.offset 8,0; st8.spill [r5]=r17,16; \ 41.282 + ;; \ 41.283 +.mem.offset 0,0; st8.spill [r4]=r18,16; \ 41.284 +.mem.offset 8,0; st8.spill [r5]=r19,16; \ 41.285 + ;; \ 41.286 +.mem.offset 0,0; st8.spill [r4]=r20,16; \ 41.287 +.mem.offset 8,0; st8.spill [r5]=r21,16; \ 41.288 + mov r18=b6; \ 41.289 + ;; \ 41.290 +.mem.offset 0,0; st8.spill [r4]=r22,16; \ 41.291 +.mem.offset 8,0; st8.spill [r5]=r23,16; \ 41.292 + mov r19=b7; \ 41.293 + ;; \ 41.294 +.mem.offset 0,0; st8.spill [r4]=r24,16; \ 41.295 +.mem.offset 8,0; st8.spill [r5]=r25,16; \ 41.296 + ;; \ 41.297 +.mem.offset 0,0; st8.spill [r4]=r26,16; \ 41.298 +.mem.offset 8,0; st8.spill [r5]=r27,16; \ 41.299 + ;; \ 41.300 +.mem.offset 0,0; st8.spill [r4]=r28,16; \ 41.301 +.mem.offset 8,0; st8.spill [r5]=r29,16; \ 41.302 + ;; \ 41.303 +.mem.offset 0,0; st8.spill [r4]=r30,16; \ 41.304 +.mem.offset 8,0; st8.spill [r5]=r31,16; \ 41.305 + ;; \ 41.306 + mov r30=ar.unat; \ 41.307 + ;; \ 41.308 + st8 [r7]=r30; \ 41.309 + mov ar.fpsr=r11; /* M-unit */ \ 41.310 + ;; \ 41.311 + stf.spill [r2]=f6,32; \ 41.312 + stf.spill [r3]=f7,32; \ 41.313 + ;; \ 41.314 + stf.spill [r2]=f8,32; \ 41.315 + stf.spill [r3]=f9,32; \ 41.316 + ;; \ 41.317 + stf.spill [r2]=f10; \ 41.318 + stf.spill [r3]=f11; \ 41.319 + ;; \ 41.320 + adds r2=PT(B6)-PT(F10),r2; \ 41.321 + adds r3=PT(B7)-PT(F11),r3; \ 41.322 + ;; \ 41.323 + st8 [r2]=r18,16; /* b6 */ \ 41.324 + st8 [r3]=r19,16; /* b7 */ \ 41.325 + ;; \ 41.326 + st8 [r2]=r9; /* ar.csd */ \ 41.327 + st8 [r3]=r10; /* ar.ssd */ \ 41.328 + ;; 41.329 + 41.330 +#define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,) 41.331 +#define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) 41.332 +#define VMX_SAVE_MIN VMX_DO_SAVE_MIN( , mov r30=r0, )
42.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 42.2 +++ b/xen/arch/ia64/vmx_phy_mode.c Mon May 23 15:29:59 2005 +0000 42.3 @@ -0,0 +1,393 @@ 42.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 42.5 +/* 42.6 + * vmx_phy_mode.c: emulating domain physical mode. 42.7 + * Copyright (c) 2005, Intel Corporation. 42.8 + * 42.9 + * This program is free software; you can redistribute it and/or modify it 42.10 + * under the terms and conditions of the GNU General Public License, 42.11 + * version 2, as published by the Free Software Foundation. 42.12 + * 42.13 + * This program is distributed in the hope it will be useful, but WITHOUT 42.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 42.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 42.16 + * more details. 42.17 + * 42.18 + * You should have received a copy of the GNU General Public License along with 42.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 42.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 42.21 + * 42.22 + * Arun Sharma (arun.sharma@intel.com) 42.23 + * Kun Tian (Kevin Tian) (kevin.tian@intel.com) 42.24 + * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) 42.25 + */ 42.26 + 42.27 + 42.28 +#include <asm/processor.h> 42.29 +#include <asm/gcc_intrin.h> 42.30 +#include <asm/vmx_phy_mode.h> 42.31 +#include <xen/sched.h> 42.32 +#include <asm/pgtable.h> 42.33 + 42.34 + 42.35 +int valid_mm_mode[8] = { 42.36 + GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */ 42.37 + INV_MODE, 42.38 + INV_MODE, 42.39 + GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */ 42.40 + INV_MODE, 42.41 + GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */ 42.42 + INV_MODE, 42.43 + GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/ 42.44 +}; 42.45 + 42.46 +/* 42.47 + * Special notes: 42.48 + * - Index by it/dt/rt sequence 42.49 + * - Only existing mode transitions are allowed in this table 42.50 + * - RSE is placed at lazy mode when emulating guest partial mode 42.51 + * - If gva happens to be rr0 and rr4, only allowed case is identity 42.52 + * mapping (gva=gpa), or panic! (How?) 42.53 + */ 42.54 +int mm_switch_table[8][8] = { 42.55 + /* 2004/09/12(Kevin): Allow switch to self */ 42.56 + /* 42.57 + * (it,dt,rt): (0,0,0) -> (1,1,1) 42.58 + * This kind of transition usually occurs in the very early 42.59 + * stage of Linux boot up procedure. Another case is in efi 42.60 + * and pal calls. (see "arch/ia64/kernel/head.S") 42.61 + * 42.62 + * (it,dt,rt): (0,0,0) -> (0,1,1) 42.63 + * This kind of transition is found when OSYa exits efi boot 42.64 + * service. Due to gva = gpa in this case (Same region), 42.65 + * data access can be satisfied though itlb entry for physical 42.66 + * emulation is hit. 42.67 + */ 42.68 + SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V, 42.69 + 0, 0, 0, 0, 0, 0, 0, 0, 42.70 + 0, 0, 0, 0, 0, 0, 0, 0, 42.71 + /* 42.72 + * (it,dt,rt): (0,1,1) -> (1,1,1) 42.73 + * This kind of transition is found in OSYa. 42.74 + * 42.75 + * (it,dt,rt): (0,1,1) -> (0,0,0) 42.76 + * This kind of transition is found in OSYa 42.77 + */ 42.78 + SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V, 42.79 + /* (1,0,0)->(1,1,1) */ 42.80 + 0, 0, 0, 0, 0, 0, 0, SW_P2V, 42.81 + /* 42.82 + * (it,dt,rt): (1,0,1) -> (1,1,1) 42.83 + * This kind of transition usually occurs when Linux returns 42.84 + * from the low level TLB miss handlers. 42.85 + * (see "arch/ia64/kernel/ivt.S") 42.86 + */ 42.87 + 0, 0, 0, 0, 0, SW_SELF,0, SW_P2V, 42.88 + 0, 0, 0, 0, 0, 0, 0, 0, 42.89 + /* 42.90 + * (it,dt,rt): (1,1,1) -> (1,0,1) 42.91 + * This kind of transition usually occurs in Linux low level 42.92 + * TLB miss handler. (see "arch/ia64/kernel/ivt.S") 42.93 + * 42.94 + * (it,dt,rt): (1,1,1) -> (0,0,0) 42.95 + * This kind of transition usually occurs in pal and efi calls, 42.96 + * which requires running in physical mode. 42.97 + * (see "arch/ia64/kernel/head.S") 42.98 + * (1,1,1)->(1,0,0) 42.99 + */ 42.100 + 42.101 + SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF, 42.102 +}; 42.103 + 42.104 +void 42.105 +physical_mode_init(VCPU *vcpu) 42.106 +{ 42.107 + UINT64 psr; 42.108 + struct domain * d = vcpu->domain; 42.109 + 42.110 + vcpu->domain->arch.emul_phy_rr0.rid = XEN_RR7_RID+((d->domain_id)<<3); 42.111 + /* FIXME */ 42.112 +#if 0 42.113 + vcpu->domain->arch.emul_phy_rr0.ps = 28; /* set page size to 256M */ 42.114 +#endif 42.115 + vcpu->domain->arch.emul_phy_rr0.ps = EMUL_PHY_PAGE_SHIFT; /* set page size to 4k */ 42.116 + vcpu->domain->arch.emul_phy_rr0.ve = 1; /* enable VHPT walker on this region */ 42.117 + 42.118 + vcpu->domain->arch.emul_phy_rr4.rid = XEN_RR7_RID + ((d->domain_id)<<3) + 4; 42.119 + vcpu->domain->arch.emul_phy_rr4.ps = EMUL_PHY_PAGE_SHIFT; /* set page size to 4k */ 42.120 + vcpu->domain->arch.emul_phy_rr4.ve = 1; /* enable VHPT walker on this region */ 42.121 + 42.122 + vcpu->arch.old_rsc = 0; 42.123 + vcpu->arch.mode_flags = GUEST_IN_PHY; 42.124 + 42.125 + psr = ia64_clear_ic(); 42.126 + 42.127 + ia64_set_rr((VRN0<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr0.rrval); 42.128 + ia64_srlz_d(); 42.129 + ia64_set_rr((VRN4<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr4.rrval); 42.130 + ia64_srlz_d(); 42.131 +#if 0 42.132 + /* FIXME: temp workaround to support guest physical mode */ 42.133 +ia64_itr(0x1, IA64_TEMP_PHYSICAL, dom0_start, 42.134 + pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)), 42.135 + 28); 42.136 +ia64_itr(0x2, IA64_TEMP_PHYSICAL, dom0_start, 42.137 + pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)), 42.138 + 28); 42.139 +ia64_srlz_i(); 42.140 +#endif 42.141 + ia64_set_psr(psr); 42.142 + ia64_srlz_i(); 42.143 + return; 42.144 +} 42.145 + 42.146 +extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages); 42.147 +void 42.148 +physical_itlb_miss(VCPU *vcpu, u64 vadr) 42.149 +{ 42.150 + u64 psr; 42.151 + IA64_PSR vpsr; 42.152 + u64 mppn,gppn; 42.153 + vpsr.val=vmx_vcpu_get_psr(vcpu); 42.154 + gppn=(vadr<<1)>>13; 42.155 + mppn = get_mfn(DOMID_SELF,gppn,1); 42.156 + mppn=(mppn<<12)|(vpsr.cpl<<7)|PHY_PAGE_WB; 42.157 + 42.158 + psr=ia64_clear_ic(); 42.159 + ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT); 42.160 + ia64_set_psr(psr); 42.161 + ia64_srlz_i(); 42.162 + return; 42.163 +} 42.164 + 42.165 +void 42.166 +physical_dtlb_miss(VCPU *vcpu, u64 vadr) 42.167 +{ 42.168 + u64 psr; 42.169 + IA64_PSR vpsr; 42.170 + u64 mppn,gppn; 42.171 + vpsr.val=vmx_vcpu_get_psr(vcpu); 42.172 + gppn=(vadr<<1)>>13; 42.173 + mppn = get_mfn(DOMID_SELF,gppn,1); 42.174 + mppn=(mppn<<12)|(vpsr.cpl<<7); 42.175 + if(vadr>>63) 42.176 + mppn |= PHY_PAGE_UC; 42.177 + else 42.178 + mppn |= PHY_PAGE_WB; 42.179 + 42.180 + psr=ia64_clear_ic(); 42.181 + ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT); 42.182 + ia64_set_psr(psr); 42.183 + ia64_srlz_i(); 42.184 + return; 42.185 +} 42.186 + 42.187 +void 42.188 +vmx_init_all_rr(VCPU *vcpu) 42.189 +{ 42.190 + VMX(vcpu,vrr[VRN0]) = 0x38; 42.191 + VMX(vcpu,vrr[VRN1]) = 0x38; 42.192 + VMX(vcpu,vrr[VRN2]) = 0x38; 42.193 + VMX(vcpu,vrr[VRN3]) = 0x38; 42.194 + VMX(vcpu,vrr[VRN4]) = 0x38; 42.195 + VMX(vcpu,vrr[VRN5]) = 0x38; 42.196 + VMX(vcpu,vrr[VRN6]) = 0x60; 42.197 + VMX(vcpu,vrr[VRN7]) = 0x60; 42.198 + 42.199 + VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38); 42.200 + VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60); 42.201 + VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60); 42.202 +} 42.203 + 42.204 +void 42.205 +vmx_load_all_rr(VCPU *vcpu) 42.206 +{ 42.207 + unsigned long psr; 42.208 + 42.209 + psr = ia64_clear_ic(); 42.210 + 42.211 + /* WARNING: not allow co-exist of both virtual mode and physical 42.212 + * mode in same region 42.213 + */ 42.214 + if (is_physical_mode(vcpu)) { 42.215 + ia64_set_rr((VRN0 << VRN_SHIFT), 42.216 + vcpu->domain->arch.emul_phy_rr0.rrval); 42.217 + ia64_set_rr((VRN4 << VRN_SHIFT), 42.218 + vcpu->domain->arch.emul_phy_rr4.rrval); 42.219 + } else { 42.220 + ia64_set_rr((VRN0 << VRN_SHIFT), 42.221 + vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0]))); 42.222 + ia64_set_rr((VRN4 << VRN_SHIFT), 42.223 + vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4]))); 42.224 + } 42.225 + 42.226 +#if 1 42.227 + /* rr567 will be postponed to last point when resuming back to guest */ 42.228 + ia64_set_rr((VRN1 << VRN_SHIFT), 42.229 + vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1]))); 42.230 + ia64_set_rr((VRN2 << VRN_SHIFT), 42.231 + vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2]))); 42.232 + ia64_set_rr((VRN3 << VRN_SHIFT), 42.233 + vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3]))); 42.234 +#endif 42.235 + ia64_srlz_d(); 42.236 + ia64_set_psr(psr); 42.237 + ia64_srlz_i(); 42.238 +} 42.239 + 42.240 +void 42.241 +switch_to_physical_rid(VCPU *vcpu) 42.242 +{ 42.243 + UINT64 psr; 42.244 + 42.245 + /* Save original virtual mode rr[0] and rr[4] */ 42.246 + 42.247 + psr=ia64_clear_ic(); 42.248 + ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr0.rrval); 42.249 + ia64_srlz_d(); 42.250 + ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr4.rrval); 42.251 + ia64_srlz_d(); 42.252 + 42.253 + ia64_set_psr(psr); 42.254 + ia64_srlz_i(); 42.255 + return; 42.256 +} 42.257 + 42.258 + 42.259 +void 42.260 +switch_to_virtual_rid(VCPU *vcpu) 42.261 +{ 42.262 + UINT64 psr; 42.263 + ia64_rr mrr; 42.264 + 42.265 + psr=ia64_clear_ic(); 42.266 + 42.267 + mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT); 42.268 + mrr.rid = VRID_2_MRID(vcpu,mrr.rid); 42.269 +//VRID_2_MRID(vcpu,mrr.rid); 42.270 + mrr.ve = 1; 42.271 + ia64_set_rr(VRN0<<VRN_SHIFT, mrr.rrval ); 42.272 + ia64_srlz_d(); 42.273 + mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT); 42.274 + mrr.rid = VRID_2_MRID(vcpu,mrr.rid); 42.275 + mrr.ve = 1; 42.276 + ia64_set_rr(VRN4<<VRN_SHIFT, mrr.rrval ); 42.277 + ia64_srlz_d(); 42.278 + ia64_set_psr(psr); 42.279 + ia64_srlz_i(); 42.280 + return; 42.281 +} 42.282 + 42.283 +static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr) 42.284 +{ 42.285 + return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; 42.286 +} 42.287 + 42.288 +void 42.289 +switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr) 42.290 +{ 42.291 + int act; 42.292 + REGS * regs=vcpu_regs(vcpu); 42.293 + act = mm_switch_action(old_psr, new_psr); 42.294 + switch (act) { 42.295 + case SW_V2P: 42.296 + vcpu->arch.old_rsc = regs->ar_rsc; 42.297 + switch_to_physical_rid(vcpu); 42.298 + /* 42.299 + * Set rse to enforced lazy, to prevent active rse save/restor when 42.300 + * guest physical mode. 42.301 + */ 42.302 + regs->ar_rsc &= ~(IA64_RSC_MODE); 42.303 + vcpu->arch.mode_flags |= GUEST_IN_PHY; 42.304 + break; 42.305 + case SW_P2V: 42.306 + switch_to_virtual_rid(vcpu); 42.307 + /* 42.308 + * recover old mode which is saved when entering 42.309 + * guest physical mode 42.310 + */ 42.311 + regs->ar_rsc = vcpu->arch.old_rsc; 42.312 + vcpu->arch.mode_flags &= ~GUEST_IN_PHY; 42.313 + break; 42.314 + case SW_SELF: 42.315 + printf("Switch to self-0x%lx!!! MM mode doesn't change...\n", 42.316 + old_psr.val); 42.317 + break; 42.318 + case SW_NOP: 42.319 + printf("No action required for mode transition: (0x%lx -> 0x%lx)\n", 42.320 + old_psr.val, new_psr.val); 42.321 + break; 42.322 + default: 42.323 + /* Sanity check */ 42.324 + printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val); 42.325 + panic("Unexpected virtual <--> physical mode transition"); 42.326 + break; 42.327 + } 42.328 + return; 42.329 +} 42.330 + 42.331 + 42.332 + 42.333 +/* 42.334 + * In physical mode, insert tc/tr for region 0 and 4 uses 42.335 + * RID[0] and RID[4] which is for physical mode emulation. 42.336 + * However what those inserted tc/tr wants is rid for 42.337 + * virtual mode. So original virtual rid needs to be restored 42.338 + * before insert. 42.339 + * 42.340 + * Operations which required such switch include: 42.341 + * - insertions (itc.*, itr.*) 42.342 + * - purges (ptc.* and ptr.*) 42.343 + * - tpa 42.344 + * - tak 42.345 + * - thash?, ttag? 42.346 + * All above needs actual virtual rid for destination entry. 42.347 + */ 42.348 + 42.349 +void 42.350 +check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr) 42.351 +{ 42.352 + 42.353 + if ( (old_psr.dt != new_psr.dt ) || 42.354 + (old_psr.it != new_psr.it ) || 42.355 + (old_psr.rt != new_psr.rt ) 42.356 + ) { 42.357 + switch_mm_mode (vcpu, old_psr, new_psr); 42.358 + } 42.359 + 42.360 + return 0; 42.361 +} 42.362 + 42.363 + 42.364 +/* 42.365 + * In physical mode, insert tc/tr for region 0 and 4 uses 42.366 + * RID[0] and RID[4] which is for physical mode emulation. 42.367 + * However what those inserted tc/tr wants is rid for 42.368 + * virtual mode. So original virtual rid needs to be restored 42.369 + * before insert. 42.370 + * 42.371 + * Operations which required such switch include: 42.372 + * - insertions (itc.*, itr.*) 42.373 + * - purges (ptc.* and ptr.*) 42.374 + * - tpa 42.375 + * - tak 42.376 + * - thash?, ttag? 42.377 + * All above needs actual virtual rid for destination entry. 42.378 + */ 42.379 + 42.380 +void 42.381 +prepare_if_physical_mode(VCPU *vcpu) 42.382 +{ 42.383 + if (is_physical_mode(vcpu)) 42.384 + switch_to_virtual_rid(vcpu); 42.385 + return; 42.386 +} 42.387 + 42.388 +/* Recover always follows prepare */ 42.389 +void 42.390 +recover_if_physical_mode(VCPU *vcpu) 42.391 +{ 42.392 + if (is_physical_mode(vcpu)) 42.393 + switch_to_physical_rid(vcpu); 42.394 + return; 42.395 +} 42.396 +
43.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 43.2 +++ b/xen/arch/ia64/vmx_process.c Mon May 23 15:29:59 2005 +0000 43.3 @@ -0,0 +1,345 @@ 43.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 43.5 +/* 43.6 + * vmx_process.c: handling VMX architecture-related VM exits 43.7 + * Copyright (c) 2005, Intel Corporation. 43.8 + * 43.9 + * This program is free software; you can redistribute it and/or modify it 43.10 + * under the terms and conditions of the GNU General Public License, 43.11 + * version 2, as published by the Free Software Foundation. 43.12 + * 43.13 + * This program is distributed in the hope it will be useful, but WITHOUT 43.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 43.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 43.16 + * more details. 43.17 + * 43.18 + * You should have received a copy of the GNU General Public License along with 43.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 43.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 43.21 + * 43.22 + * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> 43.23 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 43.24 + */ 43.25 + 43.26 +#include <xen/config.h> 43.27 +#include <xen/lib.h> 43.28 +#include <xen/errno.h> 43.29 +#include <xen/sched.h> 43.30 +#include <xen/smp.h> 43.31 +#include <asm/ptrace.h> 43.32 +#include <xen/delay.h> 43.33 + 43.34 +#include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */ 43.35 +#include <asm/sal.h> /* FOR struct ia64_sal_retval */ 43.36 + 43.37 +#include <asm/system.h> 43.38 +#include <asm/io.h> 43.39 +#include <asm/processor.h> 43.40 +#include <asm/desc.h> 43.41 +//#include <asm/ldt.h> 43.42 +#include <xen/irq.h> 43.43 +#include <xen/event.h> 43.44 +#include <asm/regionreg.h> 43.45 +#include <asm/privop.h> 43.46 +#include <asm/ia64_int.h> 43.47 +#include <asm/hpsim_ssc.h> 43.48 +#include <asm/dom_fw.h> 43.49 +#include <asm/vmx_vcpu.h> 43.50 +#include <asm/kregs.h> 43.51 +#include <asm/vmx_mm_def.h> 43.52 +/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */ 43.53 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034 43.54 + 43.55 + 43.56 +extern struct ia64_sal_retval pal_emulator_static(UINT64); 43.57 +extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64); 43.58 +extern void rnat_consumption (VCPU *vcpu); 43.59 + 43.60 +IA64FAULT 43.61 +vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim) 43.62 +{ 43.63 + static int first_time = 1; 43.64 + struct domain *d = (struct domain *) current->domain; 43.65 + struct exec_domain *ed = (struct domain *) current; 43.66 + extern unsigned long running_on_sim; 43.67 + unsigned long i, sal_param[8]; 43.68 + 43.69 +#if 0 43.70 + if (first_time) { 43.71 + if (platform_is_hp_ski()) running_on_sim = 1; 43.72 + else running_on_sim = 0; 43.73 + first_time = 0; 43.74 + } 43.75 + if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant 43.76 + if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs); 43.77 + else do_ssc(vcpu_get_gr(current,36), regs); 43.78 + } 43.79 +#endif 43.80 + if (iim == d->breakimm) { 43.81 + struct ia64_sal_retval x; 43.82 + switch (regs->r2) { 43.83 + case FW_HYPERCALL_PAL_CALL: 43.84 + //printf("*** PAL hypercall: index=%d\n",regs->r28); 43.85 + //FIXME: This should call a C routine 43.86 + x = pal_emulator_static(VMX_VPD(ed, vgr[12])); 43.87 + regs->r8 = x.status; regs->r9 = x.v0; 43.88 + regs->r10 = x.v1; regs->r11 = x.v2; 43.89 +#if 0 43.90 + if (regs->r8) 43.91 + printk("Failed vpal emulation, with index:0x%lx\n", 43.92 + VMX_VPD(ed, vgr[12])); 43.93 +#endif 43.94 + break; 43.95 + case FW_HYPERCALL_SAL_CALL: 43.96 + for (i = 0; i < 8; i++) 43.97 + vmx_vcpu_get_gr(ed, 32+i, &sal_param[i]); 43.98 + x = sal_emulator(sal_param[0], sal_param[1], 43.99 + sal_param[2], sal_param[3], 43.100 + sal_param[4], sal_param[5], 43.101 + sal_param[6], sal_param[7]); 43.102 + regs->r8 = x.status; regs->r9 = x.v0; 43.103 + regs->r10 = x.v1; regs->r11 = x.v2; 43.104 +#if 0 43.105 + if (regs->r8) 43.106 + printk("Failed vsal emulation, with index:0x%lx\n", 43.107 + sal_param[0]); 43.108 +#endif 43.109 + break; 43.110 + case FW_HYPERCALL_EFI_RESET_SYSTEM: 43.111 + printf("efi.reset_system called "); 43.112 + if (current->domain == dom0) { 43.113 + printf("(by dom0)\n "); 43.114 + (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL); 43.115 + } 43.116 + printf("(not supported for non-0 domain)\n"); 43.117 + regs->r8 = EFI_UNSUPPORTED; 43.118 + break; 43.119 + case FW_HYPERCALL_EFI_GET_TIME: 43.120 + { 43.121 + unsigned long *tv, *tc; 43.122 + fooefi(); 43.123 + vmx_vcpu_get_gr(ed, 32, &tv); 43.124 + vmx_vcpu_get_gr(ed, 33, &tc); 43.125 + printf("efi_get_time(%p,%p) called...",tv,tc); 43.126 + tv = __va(translate_domain_mpaddr(tv)); 43.127 + if (tc) tc = __va(translate_domain_mpaddr(tc)); 43.128 + regs->r8 = (*efi.get_time)(tv,tc); 43.129 + printf("and returns %lx\n",regs->r8); 43.130 + } 43.131 + break; 43.132 + case FW_HYPERCALL_EFI_SET_TIME: 43.133 + case FW_HYPERCALL_EFI_GET_WAKEUP_TIME: 43.134 + case FW_HYPERCALL_EFI_SET_WAKEUP_TIME: 43.135 + // FIXME: need fixes in efi.h from 2.6.9 43.136 + case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP: 43.137 + // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED 43.138 + // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS 43.139 + // POINTER ARGUMENTS WILL BE VIRTUAL!! 43.140 + case FW_HYPERCALL_EFI_GET_VARIABLE: 43.141 + // FIXME: need fixes in efi.h from 2.6.9 43.142 + case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE: 43.143 + case FW_HYPERCALL_EFI_SET_VARIABLE: 43.144 + case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT: 43.145 + // FIXME: need fixes in efi.h from 2.6.9 43.146 + regs->r8 = EFI_UNSUPPORTED; 43.147 + break; 43.148 + } 43.149 +#if 0 43.150 + if (regs->r8) 43.151 + printk("Failed vgfw emulation, with index:0x%lx\n", 43.152 + regs->r2); 43.153 +#endif 43.154 + vmx_vcpu_increment_iip(current); 43.155 + } else 43.156 + vmx_reflect_interruption(ifa,isr,iim,11); 43.157 +} 43.158 + 43.159 +static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800, 43.160 + 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000, 43.161 + 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600, 43.162 + 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000, 43.163 + 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00, 43.164 + 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400, 43.165 + 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00, 43.166 + 0x7f00, 43.167 +}; 43.168 + 43.169 + 43.170 + 43.171 +void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim, 43.172 + UINT64 vector) 43.173 +{ 43.174 + VCPU *vcpu = current; 43.175 + REGS *regs=vcpu_regs(vcpu); 43.176 + UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu); 43.177 + if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){ 43.178 + panic("Guest nested fault!"); 43.179 + } 43.180 + VPD_CR(vcpu,isr)=isr; 43.181 + VPD_CR(vcpu,iipa) = regs->cr_iip; 43.182 + vector=vec2off[vector]; 43.183 + if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) 43.184 + VPD_CR(vcpu,iim) = iim; 43.185 + else { 43.186 + set_ifa_itir_iha(vcpu,ifa,1,1,1); 43.187 + } 43.188 + inject_guest_interruption(vcpu, vector); 43.189 +} 43.190 + 43.191 +// ONLY gets called from ia64_leave_kernel 43.192 +// ONLY call with interrupts disabled?? (else might miss one?) 43.193 +// NEVER successful if already reflecting a trap/fault because psr.i==0 43.194 +void vmx_deliver_pending_interrupt(struct pt_regs *regs) 43.195 +{ 43.196 + struct domain *d = current->domain; 43.197 + struct exec_domain *ed = current; 43.198 + // FIXME: Will this work properly if doing an RFI??? 43.199 + if (!is_idle_task(d) ) { // always comes from guest 43.200 + //vcpu_poke_timer(ed); 43.201 + //if (vcpu_deliverable_interrupts(ed)) { 43.202 + // unsigned long isr = regs->cr_ipsr & IA64_PSR_RI; 43.203 + // foodpi(); 43.204 + // reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR); 43.205 + //} 43.206 + extern void vmx_dorfirfi(void); 43.207 + struct pt_regs *user_regs = vcpu_regs(current); 43.208 + 43.209 + if (user_regs != regs) 43.210 + printk("WARNING: checking pending interrupt in nested interrupt!!!\n"); 43.211 + if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi) 43.212 + return; 43.213 + vmx_check_pending_irq(ed); 43.214 + } 43.215 +} 43.216 + 43.217 +extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr); 43.218 + 43.219 +/* We came here because the H/W VHPT walker failed to find an entry */ 43.220 +void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr) 43.221 +{ 43.222 + IA64_PSR vpsr; 43.223 + CACHE_LINE_TYPE type; 43.224 + u64 vhpt_adr; 43.225 + ISR misr; 43.226 + ia64_rr vrr; 43.227 + REGS *regs; 43.228 + thash_cb_t *vtlb, *vhpt; 43.229 + thash_data_t *data, me; 43.230 + vtlb=vmx_vcpu_get_vtlb(vcpu); 43.231 +#ifdef VTLB_DEBUG 43.232 + check_vtlb_sanity(vtlb); 43.233 + dump_vtlb(vtlb); 43.234 +#endif 43.235 + vpsr.val = vmx_vcpu_get_psr(vcpu); 43.236 + regs = vcpu_regs(vcpu); 43.237 + misr.val=regs->cr_isr; 43.238 +/* TODO 43.239 + if(vcpu->domain->id && vec == 2 && 43.240 + vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){ 43.241 + emulate_ins(&v); 43.242 + return; 43.243 + } 43.244 +*/ 43.245 + 43.246 + if((vec==1)&&(!vpsr.it)){ 43.247 + physical_itlb_miss(vcpu, vadr); 43.248 + return; 43.249 + } 43.250 + if((vec==2)&&(!vpsr.dt)){ 43.251 + physical_dtlb_miss(vcpu, vadr); 43.252 + return; 43.253 + } 43.254 + vrr = vmx_vcpu_rr(vcpu,vadr); 43.255 + if(vec == 1) type = ISIDE_TLB; 43.256 + else if(vec == 2) type = DSIDE_TLB; 43.257 + else panic("wrong vec\n"); 43.258 + 43.259 +// prepare_if_physical_mode(vcpu); 43.260 + 43.261 + if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){ 43.262 + if ( data->ps != vrr.ps ) { 43.263 + machine_tlb_insert(vcpu, data); 43.264 + } 43.265 + else { 43.266 + thash_insert(vtlb->ts->vhpt,data,vadr); 43.267 + } 43.268 + }else if(type == DSIDE_TLB){ 43.269 + if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){ 43.270 + if(vpsr.ic){ 43.271 + vmx_vcpu_set_isr(vcpu, misr.val); 43.272 + alt_dtlb(vcpu, vadr); 43.273 + return IA64_FAULT; 43.274 + } else{ 43.275 + if(misr.sp){ 43.276 + //TODO lds emulation 43.277 + panic("Don't support speculation load"); 43.278 + }else{ 43.279 + nested_dtlb(vcpu); 43.280 + return IA64_FAULT; 43.281 + } 43.282 + } 43.283 + } else{ 43.284 + vmx_vcpu_thash(vcpu, vadr, &vhpt_adr); 43.285 + vrr=vmx_vcpu_rr(vcpu,vhpt_adr); 43.286 + data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB); 43.287 + if(data){ 43.288 + if(vpsr.ic){ 43.289 + vmx_vcpu_set_isr(vcpu, misr.val); 43.290 + dtlb_fault(vcpu, vadr); 43.291 + return IA64_FAULT; 43.292 + }else{ 43.293 + if(misr.sp){ 43.294 + //TODO lds emulation 43.295 + panic("Don't support speculation load"); 43.296 + }else{ 43.297 + nested_dtlb(vcpu); 43.298 + return IA64_FAULT; 43.299 + } 43.300 + } 43.301 + }else{ 43.302 + if(vpsr.ic){ 43.303 + vmx_vcpu_set_isr(vcpu, misr.val); 43.304 + dvhpt_fault(vcpu, vadr); 43.305 + return IA64_FAULT; 43.306 + }else{ 43.307 + if(misr.sp){ 43.308 + //TODO lds emulation 43.309 + panic("Don't support speculation load"); 43.310 + }else{ 43.311 + nested_dtlb(vcpu); 43.312 + return IA64_FAULT; 43.313 + } 43.314 + } 43.315 + } 43.316 + } 43.317 + }else if(type == ISIDE_TLB){ 43.318 + if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){ 43.319 + if(!vpsr.ic){ 43.320 + misr.ni=1; 43.321 + } 43.322 + vmx_vcpu_set_isr(vcpu, misr.val); 43.323 + alt_itlb(vcpu, vadr); 43.324 + return IA64_FAULT; 43.325 + } else{ 43.326 + vmx_vcpu_thash(vcpu, vadr, &vhpt_adr); 43.327 + vrr=vmx_vcpu_rr(vcpu,vhpt_adr); 43.328 + data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB); 43.329 + if(data){ 43.330 + if(!vpsr.ic){ 43.331 + misr.ni=1; 43.332 + } 43.333 + vmx_vcpu_set_isr(vcpu, misr.val); 43.334 + itlb_fault(vcpu, vadr); 43.335 + return IA64_FAULT; 43.336 + }else{ 43.337 + if(!vpsr.ic){ 43.338 + misr.ni=1; 43.339 + } 43.340 + vmx_vcpu_set_isr(vcpu, misr.val); 43.341 + ivhpt_fault(vcpu, vadr); 43.342 + return IA64_FAULT; 43.343 + } 43.344 + } 43.345 + } 43.346 +} 43.347 + 43.348 +
44.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 44.2 +++ b/xen/arch/ia64/vmx_utility.c Mon May 23 15:29:59 2005 +0000 44.3 @@ -0,0 +1,659 @@ 44.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 44.5 +/* 44.6 + * vmx_utility.c: 44.7 + * Copyright (c) 2005, Intel Corporation. 44.8 + * 44.9 + * This program is free software; you can redistribute it and/or modify it 44.10 + * under the terms and conditions of the GNU General Public License, 44.11 + * version 2, as published by the Free Software Foundation. 44.12 + * 44.13 + * This program is distributed in the hope it will be useful, but WITHOUT 44.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 44.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 44.16 + * more details. 44.17 + * 44.18 + * You should have received a copy of the GNU General Public License along with 44.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 44.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 44.21 + * 44.22 + * Shaofan Li (Susue Li) <susie.li@intel.com> 44.23 + * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> 44.24 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 44.25 + */ 44.26 + 44.27 +#include <xen/types.h> 44.28 +#include <asm/vmx_vcpu.h> 44.29 +#include <asm/processor.h> 44.30 +#include <asm/vmx_mm_def.h> 44.31 + 44.32 + 44.33 +/* 44.34 + * Return: 44.35 + * 0: Not reserved indirect registers 44.36 + * 1: Is reserved indirect registers 44.37 + */ 44.38 +int 44.39 +is_reserved_indirect_register ( 44.40 + int type, 44.41 + int index ) 44.42 +{ 44.43 + switch (type) { 44.44 + case IA64_CPUID: 44.45 + if ( index >= 5 ) { 44.46 + return 1; 44.47 + } 44.48 + 44.49 + case IA64_DBR: 44.50 + case IA64_IBR: 44.51 + //bugbugbug:check with pal about the max ibr/dbr!!!! 44.52 + break; 44.53 + 44.54 + case IA64_PMC: 44.55 + //bugbugbug:check with pal about the max ibr/dbr!!!! 44.56 + break; 44.57 + 44.58 + case IA64_PMD: 44.59 + //bugbugbug:check with pal about the max ibr/dbr!!!! 44.60 + break; 44.61 + 44.62 + case IA64_PKR: 44.63 + //bugbugbug:check with pal about the max pkr!!!! 44.64 + break; 44.65 + 44.66 + case IA64_RR: 44.67 + //bugbugbug:check with pal about the max rr!!!! 44.68 + break; 44.69 + 44.70 + default: 44.71 + panic ("Unsupported instruction!"); 44.72 + } 44.73 + 44.74 + return 0; 44.75 + 44.76 +} 44.77 + 44.78 +/* 44.79 + * Return: 44.80 + * Set all ignored fields in value to 0 and return 44.81 + */ 44.82 +u64 44.83 +indirect_reg_igfld_MASK ( 44.84 + int type, 44.85 + int index, 44.86 + u64 value 44.87 + ) 44.88 +{ 44.89 + u64 nvalue; 44.90 + 44.91 + nvalue = value; 44.92 + switch ( type ) { 44.93 + case IA64_CPUID: 44.94 + if ( index == 2 ) { 44.95 + nvalue = 0; 44.96 + } 44.97 + break; 44.98 + 44.99 + case IA64_DBR: 44.100 + case IA64_IBR: 44.101 + /* Refer to SDM Vol2 Table 7-1,7-2 */ 44.102 + if ( index % 2 != 0) { 44.103 + /* Ignore field: {61:60} */ 44.104 + nvalue = value & (~MASK (60, 2)); 44.105 + } 44.106 + break; 44.107 + case IA64_PMC: 44.108 + if ( index == 0 ) { 44.109 + /* Ignore field: 3:1 */ 44.110 + nvalue = value & (~MASK (1, 3)); 44.111 + } 44.112 + break; 44.113 + case IA64_PMD: 44.114 + if ( index >= 4 ) { 44.115 + /* Ignore field: 7:7 */ 44.116 + /* bugbug: this code is correct for generic 44.117 + * PMD. However, for implementation specific 44.118 + * PMD, it's WRONG. need more info to judge 44.119 + * what's implementation specific PMD. 44.120 + */ 44.121 + nvalue = value & (~MASK (7, 1)); 44.122 + } 44.123 + break; 44.124 + case IA64_PKR: 44.125 + case IA64_RR: 44.126 + break; 44.127 + default: 44.128 + panic ("Unsupported instruction!"); 44.129 + } 44.130 + 44.131 + return nvalue; 44.132 +} 44.133 + 44.134 +/* 44.135 + * Return: 44.136 + * Set all ignored fields in value to 0 and return 44.137 + */ 44.138 +u64 44.139 +cr_igfld_mask (int index, u64 value) 44.140 +{ 44.141 + u64 nvalue; 44.142 + 44.143 + nvalue = value; 44.144 + 44.145 + switch ( index ) { 44.146 + case IA64_REG_CR_IVA: 44.147 + /* Ignore filed: 14:0 */ 44.148 + nvalue = value & (~MASK (0, 15)); 44.149 + break; 44.150 + 44.151 + case IA64_REG_CR_IHA: 44.152 + /* Ignore filed: 1:0 */ 44.153 + nvalue = value & (~MASK (0, 2)); 44.154 + break; 44.155 + 44.156 + case IA64_REG_CR_LID: 44.157 + /* Ignore filed: 63:32 */ 44.158 + nvalue = value & (~MASK (32, 32)); 44.159 + break; 44.160 + 44.161 + case IA64_REG_CR_TPR: 44.162 + /* Ignore filed: 63:17,3:0 */ 44.163 + nvalue = value & (~MASK (17, 47)); 44.164 + nvalue = nvalue & (~MASK (0, 4)); 44.165 + break; 44.166 + 44.167 + case IA64_REG_CR_EOI: 44.168 + /* Ignore filed: 63:0 */ 44.169 + nvalue = 0; 44.170 + break; 44.171 + 44.172 + case IA64_REG_CR_ITV: 44.173 + case IA64_REG_CR_PMV: 44.174 + case IA64_REG_CR_CMCV: 44.175 + case IA64_REG_CR_LRR0: 44.176 + case IA64_REG_CR_LRR1: 44.177 + /* Ignore filed: 63:17,12:12 */ 44.178 + nvalue = value & (~MASK (17, 47)); 44.179 + nvalue = nvalue & (~MASK (12, 1)); 44.180 + break; 44.181 + } 44.182 + 44.183 + return nvalue; 44.184 +} 44.185 + 44.186 + 44.187 +/* 44.188 + * Return: 44.189 + * 1: PSR reserved fields are not zero 44.190 + * 0: PSR reserved fields are all zero 44.191 + */ 44.192 +int 44.193 +check_psr_rsv_fields (u64 value) 44.194 +{ 44.195 + /* PSR reserved fields: 0, 12~6, 16, 31~28, 63~46 44.196 + * These reserved fields shall all be zero 44.197 + * Otherwise we will panic 44.198 + */ 44.199 + 44.200 + if ( value & MASK (0, 1) || 44.201 + value & MASK (6, 7) || 44.202 + value & MASK (16, 1) || 44.203 + value & MASK (28, 4) || 44.204 + value & MASK (46, 18) 44.205 + ) { 44.206 + return 1; 44.207 + } 44.208 + 44.209 + return 0; 44.210 +} 44.211 + 44.212 + 44.213 + 44.214 +/* 44.215 + * Return: 44.216 + * 1: CR reserved fields are not zero 44.217 + * 0: CR reserved fields are all zero 44.218 + */ 44.219 +int 44.220 +check_cr_rsv_fields (int index, u64 value) 44.221 +{ 44.222 + switch (index) { 44.223 + case IA64_REG_CR_DCR: 44.224 + if ( (value & MASK ( 3, 5 )) || 44.225 + (value & MASK (15, 49))) { 44.226 + return 1; 44.227 + } 44.228 + return 0; 44.229 + 44.230 + case IA64_REG_CR_ITM: 44.231 + case IA64_REG_CR_IVA: 44.232 + case IA64_REG_CR_IIP: 44.233 + case IA64_REG_CR_IFA: 44.234 + case IA64_REG_CR_IIPA: 44.235 + case IA64_REG_CR_IIM: 44.236 + case IA64_REG_CR_IHA: 44.237 + case IA64_REG_CR_EOI: 44.238 + return 0; 44.239 + 44.240 + case IA64_REG_CR_PTA: 44.241 + if ( (value & MASK ( 1, 1 )) || 44.242 + (value & MASK (9, 6))) { 44.243 + return 1; 44.244 + } 44.245 + return 0; 44.246 + 44.247 + case IA64_REG_CR_IPSR: 44.248 + return check_psr_rsv_fields (value); 44.249 + 44.250 + 44.251 + case IA64_REG_CR_ISR: 44.252 + if ( (value & MASK ( 24, 8 )) || 44.253 + (value & MASK (44, 20))) { 44.254 + return 1; 44.255 + } 44.256 + return 0; 44.257 + 44.258 + case IA64_REG_CR_ITIR: 44.259 + if ( (value & MASK ( 0, 2 )) || 44.260 + (value & MASK (32, 32))) { 44.261 + return 1; 44.262 + } 44.263 + return 0; 44.264 + 44.265 + case IA64_REG_CR_IFS: 44.266 + if ( (value & MASK ( 38, 25 ))) { 44.267 + return 1; 44.268 + } 44.269 + return 0; 44.270 + 44.271 + case IA64_REG_CR_LID: 44.272 + if ( (value & MASK ( 0, 16 ))) { 44.273 + return 1; 44.274 + } 44.275 + return 0; 44.276 + 44.277 + case IA64_REG_CR_IVR: 44.278 + if ( (value & MASK ( 8, 56 ))) { 44.279 + return 1; 44.280 + } 44.281 + return 0; 44.282 + 44.283 + case IA64_REG_CR_TPR: 44.284 + if ( (value & MASK ( 8, 8 ))) { 44.285 + return 1; 44.286 + } 44.287 + return 0; 44.288 + 44.289 + case IA64_REG_CR_IRR0: 44.290 + if ( (value & MASK ( 1, 1 )) || 44.291 + (value & MASK (3, 13))) { 44.292 + return 1; 44.293 + } 44.294 + return 0; 44.295 + 44.296 + case IA64_REG_CR_ITV: 44.297 + case IA64_REG_CR_PMV: 44.298 + case IA64_REG_CR_CMCV: 44.299 + if ( (value & MASK ( 8, 4 )) || 44.300 + (value & MASK (13, 3))) { 44.301 + return 1; 44.302 + } 44.303 + return 0; 44.304 + 44.305 + case IA64_REG_CR_LRR0: 44.306 + case IA64_REG_CR_LRR1: 44.307 + if ( (value & MASK ( 11, 1 )) || 44.308 + (value & MASK (14, 1))) { 44.309 + return 1; 44.310 + } 44.311 + return 0; 44.312 + } 44.313 + 44.314 + 44.315 + panic ("Unsupported CR"); 44.316 +} 44.317 + 44.318 + 44.319 + 44.320 +/* 44.321 + * Return: 44.322 + * 0: Indirect Reg reserved fields are not zero 44.323 + * 1: Indirect Reg reserved fields are all zero 44.324 + */ 44.325 +int 44.326 +check_indirect_reg_rsv_fields ( int type, int index, u64 value ) 44.327 +{ 44.328 + 44.329 + switch ( type ) { 44.330 + case IA64_CPUID: 44.331 + if ( index == 3 ) { 44.332 + if ( value & MASK (40, 24 )) { 44.333 + return 0; 44.334 + } 44.335 + } else if ( index == 4 ) { 44.336 + if ( value & MASK (2, 62 )) { 44.337 + return 0; 44.338 + } 44.339 + } 44.340 + break; 44.341 + 44.342 + case IA64_DBR: 44.343 + case IA64_IBR: 44.344 + case IA64_PMC: 44.345 + case IA64_PMD: 44.346 + break; 44.347 + 44.348 + case IA64_PKR: 44.349 + if ( value & MASK (4, 4) || 44.350 + value & MASK (32, 32 )) { 44.351 + return 0; 44.352 + } 44.353 + break; 44.354 + 44.355 + case IA64_RR: 44.356 + if ( value & MASK (1, 1) || 44.357 + value & MASK (32, 32 )) { 44.358 + return 0; 44.359 + } 44.360 + break; 44.361 + 44.362 + default: 44.363 + panic ("Unsupported instruction!"); 44.364 + } 44.365 + 44.366 + return 1; 44.367 +} 44.368 + 44.369 + 44.370 + 44.371 + 44.372 +/* Return 44.373 + * Same format as isr_t 44.374 + * Only ei/ni bits are valid, all other bits are zero 44.375 + */ 44.376 +u64 44.377 +set_isr_ei_ni (VCPU *vcpu) 44.378 +{ 44.379 + 44.380 + IA64_PSR vpsr,ipsr; 44.381 + ISR visr; 44.382 + REGS *regs; 44.383 + 44.384 + regs=vcpu_regs(vcpu); 44.385 + 44.386 + visr.val = 0; 44.387 + 44.388 + vpsr.val = vmx_vcpu_get_psr (vcpu); 44.389 + 44.390 + if (!vpsr.ic == 1 ) { 44.391 + /* Set ISR.ni */ 44.392 + visr.ni = 1; 44.393 + } 44.394 + ipsr.val = regs->cr_ipsr; 44.395 + 44.396 + visr.ei = ipsr.ri; 44.397 + return visr.val; 44.398 +} 44.399 + 44.400 + 44.401 +/* Set up ISR.na/code{3:0}/r/w for no-access instructions 44.402 + * Refer to SDM Vol Table 5-1 44.403 + * Parameter: 44.404 + * setr: if 1, indicates this function will set up ISR.r 44.405 + * setw: if 1, indicates this function will set up ISR.w 44.406 + * Return: 44.407 + * Same format as ISR. All fields are zero, except na/code{3:0}/r/w 44.408 + */ 44.409 +u64 44.410 +set_isr_for_na_inst(VCPU *vcpu, int op) 44.411 +{ 44.412 + ISR visr; 44.413 + visr.val = 0; 44.414 + switch (op) { 44.415 + case IA64_INST_TPA: 44.416 + visr.na = 1; 44.417 + visr.code = 0; 44.418 + break; 44.419 + case IA64_INST_TAK: 44.420 + visr.na = 1; 44.421 + visr.code = 3; 44.422 + break; 44.423 + } 44.424 + return visr.val; 44.425 +} 44.426 + 44.427 + 44.428 + 44.429 +/* 44.430 + * Set up ISR for registe Nat consumption fault 44.431 + * Parameters: 44.432 + * read: if 1, indicates this is a read access; 44.433 + * write: if 1, indicates this is a write access; 44.434 + */ 44.435 +void 44.436 +set_rnat_consumption_isr (VCPU *vcpu,int inst,int read,int write) 44.437 +{ 44.438 + ISR visr; 44.439 + u64 value; 44.440 + /* Need set up ISR: code, ei, ni, na, r/w */ 44.441 + visr.val = 0; 44.442 + 44.443 + /* ISR.code{7:4} =1, 44.444 + * Set up ISR.code{3:0}, ISR.na 44.445 + */ 44.446 + visr.code = (1 << 4); 44.447 + if (inst) { 44.448 + 44.449 + value = set_isr_for_na_inst (vcpu,inst); 44.450 + visr.val = visr.val | value; 44.451 + } 44.452 + 44.453 + /* Set up ISR.r/w */ 44.454 + visr.r = read; 44.455 + visr.w = write; 44.456 + 44.457 + /* Set up ei/ni */ 44.458 + value = set_isr_ei_ni (vcpu); 44.459 + visr.val = visr.val | value; 44.460 + 44.461 + vmx_vcpu_set_isr (vcpu,visr.val); 44.462 +} 44.463 + 44.464 + 44.465 + 44.466 +/* 44.467 + * Set up ISR for break fault 44.468 + */ 44.469 +void set_break_isr (VCPU *vcpu) 44.470 +{ 44.471 + ISR visr; 44.472 + u64 value; 44.473 + 44.474 + /* Need set up ISR: ei, ni */ 44.475 + 44.476 + visr.val = 0; 44.477 + 44.478 + /* Set up ei/ni */ 44.479 + value = set_isr_ei_ni (vcpu); 44.480 + visr.val = visr.val | value; 44.481 + 44.482 + vmx_vcpu_set_isr(vcpu, visr.val); 44.483 +} 44.484 + 44.485 + 44.486 + 44.487 + 44.488 + 44.489 + 44.490 +/* 44.491 + * Set up ISR for Priviledged Operation fault 44.492 + */ 44.493 +void set_privileged_operation_isr (VCPU *vcpu,int inst) 44.494 +{ 44.495 + ISR visr; 44.496 + u64 value; 44.497 + 44.498 + /* Need set up ISR: code, ei, ni, na */ 44.499 + 44.500 + visr.val = 0; 44.501 + 44.502 + /* Set up na, code{3:0} for no-access instruction */ 44.503 + value = set_isr_for_na_inst (vcpu, inst); 44.504 + visr.val = visr.val | value; 44.505 + 44.506 + 44.507 + /* ISR.code{7:4} =1 */ 44.508 + visr.code = (1 << 4) | visr.code; 44.509 + 44.510 + /* Set up ei/ni */ 44.511 + value = set_isr_ei_ni (vcpu); 44.512 + visr.val = visr.val | value; 44.513 + 44.514 + vmx_vcpu_set_isr (vcpu, visr.val); 44.515 +} 44.516 + 44.517 + 44.518 + 44.519 + 44.520 +/* 44.521 + * Set up ISR for Priviledged Register fault 44.522 + */ 44.523 +void set_privileged_reg_isr (VCPU *vcpu, int inst) 44.524 +{ 44.525 + ISR visr; 44.526 + u64 value; 44.527 + 44.528 + /* Need set up ISR: code, ei, ni */ 44.529 + 44.530 + visr.val = 0; 44.531 + 44.532 + /* ISR.code{7:4} =2 */ 44.533 + visr.code = 2 << 4; 44.534 + 44.535 + /* Set up ei/ni */ 44.536 + value = set_isr_ei_ni (vcpu); 44.537 + visr.val = visr.val | value; 44.538 + 44.539 + vmx_vcpu_set_isr (vcpu, visr.val); 44.540 +} 44.541 + 44.542 + 44.543 + 44.544 + 44.545 + 44.546 +/* 44.547 + * Set up ISR for Reserved Register/Field fault 44.548 + */ 44.549 +void set_rsv_reg_field_isr (VCPU *vcpu) 44.550 +{ 44.551 + ISR visr; 44.552 + u64 value; 44.553 + 44.554 + /* Need set up ISR: code, ei, ni */ 44.555 + 44.556 + visr.val = 0; 44.557 + 44.558 + /* ISR.code{7:4} =4 */ 44.559 + visr.code = (3 << 4) | visr.code; 44.560 + 44.561 + /* Set up ei/ni */ 44.562 + value = set_isr_ei_ni (vcpu); 44.563 + visr.val = visr.val | value; 44.564 + 44.565 + vmx_vcpu_set_isr (vcpu, visr.val); 44.566 +} 44.567 + 44.568 + 44.569 + 44.570 +/* 44.571 + * Set up ISR for Illegal Operation fault 44.572 + */ 44.573 +void set_illegal_op_isr (VCPU *vcpu) 44.574 +{ 44.575 + ISR visr; 44.576 + u64 value; 44.577 + 44.578 + /* Need set up ISR: ei, ni */ 44.579 + 44.580 + visr.val = 0; 44.581 + 44.582 + /* Set up ei/ni */ 44.583 + value = set_isr_ei_ni (vcpu); 44.584 + visr.val = visr.val | value; 44.585 + 44.586 + vmx_vcpu_set_isr (vcpu, visr.val); 44.587 +} 44.588 + 44.589 + 44.590 +void set_isr_reg_nat_consumption(VCPU *vcpu, u64 flag, u64 non_access) 44.591 +{ 44.592 + ISR isr; 44.593 + 44.594 + isr.val = 0; 44.595 + isr.val = set_isr_ei_ni(vcpu); 44.596 + isr.code = IA64_REG_NAT_CONSUMPTION_FAULT | flag; 44.597 + isr.na = non_access; 44.598 + isr.r = 1; 44.599 + isr.w = 0; 44.600 + vmx_vcpu_set_isr(vcpu, isr.val); 44.601 + return; 44.602 +} 44.603 + 44.604 +void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access) 44.605 +{ 44.606 + u64 value; 44.607 + ISR isr; 44.608 + 44.609 + isr.val = set_isr_ei_ni(vcpu); 44.610 + isr.code = IA64_PRIV_OP_FAULT; 44.611 + isr.na = non_access; 44.612 + vmx_vcpu_set_isr(vcpu, isr.val); 44.613 + 44.614 + return; 44.615 +} 44.616 + 44.617 + 44.618 +IA64FAULT check_target_register(VCPU *vcpu, u64 reg_index) 44.619 +{ 44.620 + u64 sof; 44.621 + REGS *regs; 44.622 + regs=vcpu_regs(vcpu); 44.623 + sof = regs->cr_ifs & 0x7f; 44.624 + if(reg_index >= sof + 32) 44.625 + return IA64_FAULT; 44.626 + return IA64_NO_FAULT;; 44.627 +} 44.628 + 44.629 + 44.630 +int is_reserved_rr_register(VCPU* vcpu, int reg_index) 44.631 +{ 44.632 + return (reg_index >= 8); 44.633 +} 44.634 + 44.635 +#define ITIR_RSV_MASK (0x3UL | (((1UL<<32)-1) << 32)) 44.636 +int is_reserved_itir_field(VCPU* vcpu, u64 itir) 44.637 +{ 44.638 + if ( itir & ITIR_RSV_MASK ) { 44.639 + return 1; 44.640 + } 44.641 + return 0; 44.642 +} 44.643 + 44.644 +int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) 44.645 +{ 44.646 + ia64_rr rr; 44.647 + rr.rrval = reg_value; 44.648 + 44.649 + if(rr.reserved0 != 0 || rr.reserved1 != 0){ 44.650 + return 1; 44.651 + } 44.652 + if(rr.ps < 12 || rr.ps > 28){ 44.653 + // page too big or small. 44.654 + return 1; 44.655 + } 44.656 + if(rr.ps > 15 && rr.ps % 2 != 0){ 44.657 + // unsupported page size. 44.658 + return 1; 44.659 + } 44.660 + return 0; 44.661 +} 44.662 +
45.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 45.2 +++ b/xen/arch/ia64/vmx_vcpu.c Mon May 23 15:29:59 2005 +0000 45.3 @@ -0,0 +1,436 @@ 45.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 45.5 +/* 45.6 + * vmx_vcpu.c: handling all virtual cpu related thing. 45.7 + * Copyright (c) 2005, Intel Corporation. 45.8 + * 45.9 + * This program is free software; you can redistribute it and/or modify it 45.10 + * under the terms and conditions of the GNU General Public License, 45.11 + * version 2, as published by the Free Software Foundation. 45.12 + * 45.13 + * This program is distributed in the hope it will be useful, but WITHOUT 45.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 45.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 45.16 + * more details. 45.17 + * 45.18 + * You should have received a copy of the GNU General Public License along with 45.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 45.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 45.21 + * 45.22 + * Fred yang (fred.yang@intel.com) 45.23 + * Arun Sharma (arun.sharma@intel.com) 45.24 + * Shaofan Li (Susue Li) <susie.li@intel.com> 45.25 + * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) 45.26 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 45.27 + */ 45.28 + 45.29 + 45.30 + 45.31 +#include <linux/sched.h> 45.32 +#include <public/arch-ia64.h> 45.33 +#include <asm/ia64_int.h> 45.34 +#include <asm/vmx_vcpu.h> 45.35 +#include <asm/regionreg.h> 45.36 +#include <asm/tlb.h> 45.37 +#include <asm/processor.h> 45.38 +#include <asm/delay.h> 45.39 +#include <asm/regs.h> 45.40 +#include <asm/gcc_intrin.h> 45.41 +#include <asm/vmx_mm_def.h> 45.42 +#include <asm/vmx.h> 45.43 + 45.44 +//u64 fire_itc; 45.45 +//u64 fire_itc2; 45.46 +//u64 fire_itm; 45.47 +//u64 fire_itm2; 45.48 +/* 45.49 + * Copyright (c) 2005 Intel Corporation. 45.50 + * Anthony Xu (anthony.xu@intel.com) 45.51 + * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) 45.52 + * 45.53 + * This program is free software; you can redistribute it and/or modify it 45.54 + * under the terms and conditions of the GNU General Public License, 45.55 + * version 2, as published by the Free Software Foundation. 45.56 + * 45.57 + * This program is distributed in the hope it will be useful, but WITHOUT 45.58 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 45.59 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 45.60 + * more details. 45.61 + * 45.62 + * You should have received a copy of the GNU General Public License along with 45.63 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 45.64 + * Place - Suite 330, Boston, MA 02111-1307 USA. 45.65 + * 45.66 + */ 45.67 + 45.68 +/************************************************************************** 45.69 + VCPU general register access routines 45.70 +**************************************************************************/ 45.71 +#include <asm/hw_irq.h> 45.72 +#include <asm/vmx_pal_vsa.h> 45.73 +#include <asm/kregs.h> 45.74 + 45.75 +//unsigned long last_guest_rsm = 0x0; 45.76 +struct guest_psr_bundle{ 45.77 + unsigned long ip; 45.78 + unsigned long psr; 45.79 +}; 45.80 + 45.81 +struct guest_psr_bundle guest_psr_buf[100]; 45.82 +unsigned long guest_psr_index = 0; 45.83 + 45.84 +void 45.85 +vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value) 45.86 +{ 45.87 + 45.88 + UINT64 mask; 45.89 + REGS *regs; 45.90 + IA64_PSR old_psr, new_psr; 45.91 + old_psr.val=vmx_vcpu_get_psr(vcpu); 45.92 + 45.93 + regs=vcpu_regs(vcpu); 45.94 + /* We only support guest as: 45.95 + * vpsr.pk = 0 45.96 + * vpsr.is = 0 45.97 + * Otherwise panic 45.98 + */ 45.99 + if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) { 45.100 + panic ("Setting unsupport guest psr!"); 45.101 + } 45.102 + 45.103 + /* 45.104 + * For those IA64_PSR bits: id/da/dd/ss/ed/ia 45.105 + * Since these bits will become 0, after success execution of each 45.106 + * instruction, we will change set them to mIA64_PSR 45.107 + */ 45.108 + VMX_VPD(vcpu,vpsr) = value & 45.109 + (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD | 45.110 + IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA 45.111 + )); 45.112 + 45.113 + new_psr.val=vmx_vcpu_get_psr(vcpu); 45.114 + { 45.115 + struct xen_regs *regs = vcpu_regs(vcpu); 45.116 + guest_psr_buf[guest_psr_index].ip = regs->cr_iip; 45.117 + guest_psr_buf[guest_psr_index].psr = new_psr.val; 45.118 + if (++guest_psr_index >= 100) 45.119 + guest_psr_index = 0; 45.120 + } 45.121 +#if 0 45.122 + if (old_psr.i != new_psr.i) { 45.123 + if (old_psr.i) 45.124 + last_guest_rsm = vcpu_regs(vcpu)->cr_iip; 45.125 + else 45.126 + last_guest_rsm = 0; 45.127 + } 45.128 +#endif 45.129 + 45.130 + /* 45.131 + * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) 45.132 + * , except for the following bits: 45.133 + * ic/i/dt/si/rt/mc/it/bn/vm 45.134 + */ 45.135 + mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI + 45.136 + IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN + 45.137 + IA64_PSR_VM; 45.138 + 45.139 + regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) ); 45.140 + 45.141 + check_mm_mode_switch(vcpu, old_psr, new_psr); 45.142 + return IA64_NO_FAULT; 45.143 +} 45.144 + 45.145 +/* Adjust slot both in xen_regs and vpd, upon vpsr.ri which 45.146 + * should have sync with ipsr in entry. 45.147 + * 45.148 + * Clear some bits due to successfully emulation. 45.149 + */ 45.150 +IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu) 45.151 +{ 45.152 + // TODO: trap_bounce?? Eddie 45.153 + REGS *regs = vcpu_regs(vcpu); 45.154 + IA64_PSR vpsr; 45.155 + IA64_PSR *ipsr = (IA64_PSR *)®s->cr_ipsr; 45.156 + 45.157 + vpsr.val = vmx_vcpu_get_psr(vcpu); 45.158 + if (vpsr.ri == 2) { 45.159 + vpsr.ri = 0; 45.160 + regs->cr_iip += 16; 45.161 + } else { 45.162 + vpsr.ri++; 45.163 + } 45.164 + 45.165 + ipsr->ri = vpsr.ri; 45.166 + vpsr.val &= 45.167 + (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD | 45.168 + IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA 45.169 + )); 45.170 + 45.171 + VMX_VPD(vcpu, vpsr) = vpsr.val; 45.172 + 45.173 + ipsr->val &= 45.174 + (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD | 45.175 + IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA 45.176 + )); 45.177 + 45.178 + return (IA64_NO_FAULT); 45.179 +} 45.180 + 45.181 + 45.182 +IA64FAULT vmx_vcpu_cover(VCPU *vcpu) 45.183 +{ 45.184 + REGS *regs = vcpu_regs(vcpu); 45.185 + IA64_PSR vpsr; 45.186 + vpsr.val = vmx_vcpu_get_psr(vcpu); 45.187 + 45.188 + if(!vpsr.ic) 45.189 + VPD_CR(vcpu,ifs) = regs->cr_ifs; 45.190 + regs->cr_ifs = IA64_IFS_V; 45.191 + return (IA64_NO_FAULT); 45.192 +} 45.193 + 45.194 + 45.195 +thash_cb_t * 45.196 +vmx_vcpu_get_vtlb(VCPU *vcpu) 45.197 +{ 45.198 + return vcpu->arch.vtlb; 45.199 +} 45.200 + 45.201 + 45.202 +struct virutal_platform_def * 45.203 +vmx_vcpu_get_plat(VCPU *vcpu) 45.204 +{ 45.205 + return &(vcpu->arch.arch_vmx.vmx_platform); 45.206 +} 45.207 + 45.208 + 45.209 +ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr) 45.210 +{ 45.211 + return (ia64_rr)VMX(vcpu,vrr[vadr>>61]); 45.212 +} 45.213 + 45.214 + 45.215 +IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val) 45.216 +{ 45.217 + extern void set_one_rr(UINT64, UINT64); 45.218 + ia64_rr oldrr,newrr; 45.219 + thash_cb_t *hcb; 45.220 + oldrr=vmx_vcpu_rr(vcpu,reg); 45.221 + newrr.rrval=val; 45.222 +#if 1 45.223 + if(oldrr.ps!=newrr.ps){ 45.224 + hcb = vmx_vcpu_get_vtlb(vcpu); 45.225 + thash_purge_all(hcb); 45.226 + } 45.227 +#endif 45.228 + VMX(vcpu,vrr[reg>>61]) = val; 45.229 + switch((u64)(reg>>61)) { 45.230 + case VRN5: 45.231 + VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val); 45.232 + break; 45.233 + case VRN6: 45.234 + VMX(vcpu,mrr6)=vmx_vrrtomrr(vcpu,val); 45.235 + break; 45.236 + case VRN7: 45.237 + VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val); 45.238 + /* Change double mapping for this domain */ 45.239 + vmx_change_double_mapping(vcpu, 45.240 + vmx_vrrtomrr(vcpu,oldrr.rrval), 45.241 + vmx_vrrtomrr(vcpu,newrr.rrval)); 45.242 + break; 45.243 + default: 45.244 + ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val)); 45.245 + break; 45.246 + } 45.247 + 45.248 + return (IA64_NO_FAULT); 45.249 +} 45.250 + 45.251 + 45.252 + 45.253 +/************************************************************************** 45.254 + VCPU protection key register access routines 45.255 +**************************************************************************/ 45.256 + 45.257 +IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval) 45.258 +{ 45.259 + UINT64 val = (UINT64)ia64_get_pkr(reg); 45.260 + *pval = val; 45.261 + return (IA64_NO_FAULT); 45.262 +} 45.263 + 45.264 +IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val) 45.265 +{ 45.266 + ia64_set_pkr(reg,val); 45.267 + return (IA64_NO_FAULT); 45.268 +} 45.269 + 45.270 +#if 0 45.271 +int tlb_debug=0; 45.272 +check_entry(u64 va, u64 ps, char *str) 45.273 +{ 45.274 + va &= ~ (PSIZE(ps)-1); 45.275 + if ( va == 0x2000000002908000UL || 45.276 + va == 0x600000000000C000UL ) { 45.277 + stop(); 45.278 + } 45.279 + if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps); 45.280 +} 45.281 +#endif 45.282 + 45.283 + 45.284 +u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa) 45.285 +{ 45.286 + ia64_rr rr,rr1; 45.287 + rr=vmx_vcpu_rr(vcpu,ifa); 45.288 + rr1.rrval=0; 45.289 + rr1.ps=rr.ps; 45.290 + rr1.rid=rr.rid; 45.291 + return (rr1.rrval); 45.292 +} 45.293 + 45.294 + 45.295 + 45.296 + 45.297 +IA64FAULT vmx_vcpu_rfi(VCPU *vcpu) 45.298 +{ 45.299 + // TODO: Only allowed for current vcpu 45.300 + UINT64 ifs, psr; 45.301 + REGS *regs = vcpu_regs(vcpu); 45.302 + psr = VPD_CR(vcpu,ipsr); 45.303 + vmx_vcpu_set_psr(vcpu,psr); 45.304 + ifs=VPD_CR(vcpu,ifs); 45.305 + if((ifs>>63)&&(ifs<<1)){ 45.306 + ifs=(regs->cr_ifs)&0x7f; 45.307 + regs->rfi_pfs = (ifs<<7)|ifs; 45.308 + regs->cr_ifs = VPD_CR(vcpu,ifs); 45.309 + } 45.310 + regs->cr_iip = VPD_CR(vcpu,iip); 45.311 + return (IA64_NO_FAULT); 45.312 +} 45.313 + 45.314 + 45.315 +UINT64 45.316 +vmx_vcpu_get_psr(VCPU *vcpu) 45.317 +{ 45.318 + return VMX_VPD(vcpu,vpsr); 45.319 +} 45.320 + 45.321 + 45.322 +IA64FAULT 45.323 +vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val) 45.324 +{ 45.325 + IA64_PSR vpsr; 45.326 + 45.327 + vpsr.val = vmx_vcpu_get_psr(vcpu); 45.328 + if ( vpsr.bn ) { 45.329 + *val=VMX_VPD(vcpu,vgr[reg-16]); 45.330 + // Check NAT bit 45.331 + if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) { 45.332 + // TODO 45.333 + //panic ("NAT consumption fault\n"); 45.334 + return IA64_FAULT; 45.335 + } 45.336 + 45.337 + } 45.338 + else { 45.339 + *val=VMX_VPD(vcpu,vbgr[reg-16]); 45.340 + if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) { 45.341 + //panic ("NAT consumption fault\n"); 45.342 + return IA64_FAULT; 45.343 + } 45.344 + 45.345 + } 45.346 + return IA64_NO_FAULT; 45.347 +} 45.348 + 45.349 +IA64FAULT 45.350 +vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat) 45.351 +{ 45.352 + IA64_PSR vpsr; 45.353 + vpsr.val = vmx_vcpu_get_psr(vcpu); 45.354 + if ( vpsr.bn ) { 45.355 + VMX_VPD(vcpu,vgr[reg-16]) = val; 45.356 + if(nat){ 45.357 + VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) ); 45.358 + }else{ 45.359 + VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) ); 45.360 + } 45.361 + } 45.362 + else { 45.363 + VMX_VPD(vcpu,vbgr[reg-16]) = val; 45.364 + if(nat){ 45.365 + VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) ); 45.366 + }else{ 45.367 + VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) ); 45.368 + } 45.369 + } 45.370 + return IA64_NO_FAULT; 45.371 +} 45.372 + 45.373 + 45.374 + 45.375 +IA64FAULT 45.376 +vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val) 45.377 +{ 45.378 + REGS *regs=vcpu_regs(vcpu); 45.379 + u64 nat; 45.380 + //TODO, Eddie 45.381 + if (!regs) return 0; 45.382 + if (reg >= 16 && reg < 32) { 45.383 + return vmx_vcpu_get_bgr(vcpu,reg,val); 45.384 + } 45.385 + getreg(reg,val,&nat,regs); // FIXME: handle NATs later 45.386 + if(nat){ 45.387 + return IA64_FAULT; 45.388 + } 45.389 + return IA64_NO_FAULT; 45.390 +} 45.391 + 45.392 +// returns: 45.393 +// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault 45.394 +// IA64_NO_FAULT otherwise 45.395 + 45.396 +IA64FAULT 45.397 +vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat) 45.398 +{ 45.399 + REGS *regs = vcpu_regs(vcpu); 45.400 + long sof = (regs->cr_ifs) & 0x7f; 45.401 + //TODO Eddie 45.402 + 45.403 + if (!regs) return IA64_ILLOP_FAULT; 45.404 + if (reg >= sof + 32) return IA64_ILLOP_FAULT; 45.405 + if ( reg >= 16 && reg < 32 ) { 45.406 + return vmx_vcpu_set_bgr(vcpu,reg, value, nat); 45.407 + } 45.408 + setreg(reg,value,nat,regs); 45.409 + return IA64_NO_FAULT; 45.410 +} 45.411 + 45.412 + 45.413 +IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24) 45.414 +{ 45.415 + UINT64 vpsr; 45.416 + vpsr = vmx_vcpu_get_psr(vcpu); 45.417 + vpsr &= (~imm24); 45.418 + vmx_vcpu_set_psr(vcpu, vpsr); 45.419 + return IA64_NO_FAULT; 45.420 +} 45.421 + 45.422 + 45.423 +IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24) 45.424 +{ 45.425 + UINT64 vpsr; 45.426 + vpsr = vmx_vcpu_get_psr(vcpu); 45.427 + vpsr |= imm24; 45.428 + vmx_vcpu_set_psr(vcpu, vpsr); 45.429 + return IA64_NO_FAULT; 45.430 +} 45.431 + 45.432 + 45.433 +IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val) 45.434 +{ 45.435 + vmx_vcpu_set_psr(vcpu, val); 45.436 + return IA64_NO_FAULT; 45.437 +} 45.438 + 45.439 +
46.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 46.2 +++ b/xen/arch/ia64/vmx_virt.c Mon May 23 15:29:59 2005 +0000 46.3 @@ -0,0 +1,1501 @@ 46.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 46.5 +/* 46.6 + * vmx_virt.c: 46.7 + * Copyright (c) 2005, Intel Corporation. 46.8 + * 46.9 + * This program is free software; you can redistribute it and/or modify it 46.10 + * under the terms and conditions of the GNU General Public License, 46.11 + * version 2, as published by the Free Software Foundation. 46.12 + * 46.13 + * This program is distributed in the hope it will be useful, but WITHOUT 46.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 46.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 46.16 + * more details. 46.17 + * 46.18 + * You should have received a copy of the GNU General Public License along with 46.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 46.20 + * Place - Suite 330, Boston, MA 02111-1307 USA. 46.21 + * 46.22 + * Fred yang (fred.yang@intel.com) 46.23 + * Shaofan Li (Susue Li) <susie.li@intel.com> 46.24 + * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) 46.25 + */ 46.26 + 46.27 + 46.28 + 46.29 +#include <asm/privop.h> 46.30 +#include <asm/vmx_vcpu.h> 46.31 +#include <asm/processor.h> 46.32 +#include <asm/delay.h> // Debug only 46.33 +#include <asm/vmmu.h> 46.34 +#include <asm/vmx_mm_def.h> 46.35 +#include <asm/smp.h> 46.36 + 46.37 +#include <asm/virt_event.h> 46.38 +extern UINT64 privop_trace; 46.39 + 46.40 +void 46.41 +ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause) 46.42 +{ 46.43 + *cause=0; 46.44 + switch (slot_type) { 46.45 + case M: 46.46 + if (inst.generic.major==0){ 46.47 + if(inst.M28.x3==0){ 46.48 + if(inst.M44.x4==6){ 46.49 + *cause=EVENT_SSM; 46.50 + }else if(inst.M44.x4==7){ 46.51 + *cause=EVENT_RSM; 46.52 + }else if(inst.M30.x4==8&&inst.M30.x2==2){ 46.53 + *cause=EVENT_MOV_TO_AR_IMM; 46.54 + } 46.55 + } 46.56 + } 46.57 + else if(inst.generic.major==1){ 46.58 + if(inst.M28.x3==0){ 46.59 + if(inst.M32.x6==0x2c){ 46.60 + *cause=EVENT_MOV_TO_CR; 46.61 + }else if(inst.M33.x6==0x24){ 46.62 + *cause=EVENT_MOV_FROM_CR; 46.63 + }else if(inst.M35.x6==0x2d){ 46.64 + *cause=EVENT_MOV_TO_PSR; 46.65 + }else if(inst.M36.x6==0x25){ 46.66 + *cause=EVENT_MOV_FROM_PSR; 46.67 + }else if(inst.M29.x6==0x2A){ 46.68 + *cause=EVENT_MOV_TO_AR; 46.69 + }else if(inst.M31.x6==0x22){ 46.70 + *cause=EVENT_MOV_FROM_AR; 46.71 + }else if(inst.M45.x6==0x09){ 46.72 + *cause=EVENT_PTC_L; 46.73 + }else if(inst.M45.x6==0x0A){ 46.74 + *cause=EVENT_PTC_G; 46.75 + }else if(inst.M45.x6==0x0B){ 46.76 + *cause=EVENT_PTC_GA; 46.77 + }else if(inst.M45.x6==0x0C){ 46.78 + *cause=EVENT_PTR_D; 46.79 + }else if(inst.M45.x6==0x0D){ 46.80 + *cause=EVENT_PTR_I; 46.81 + }else if(inst.M46.x6==0x1A){ 46.82 + *cause=EVENT_THASH; 46.83 + }else if(inst.M46.x6==0x1B){ 46.84 + *cause=EVENT_TTAG; 46.85 + }else if(inst.M46.x6==0x1E){ 46.86 + *cause=EVENT_TPA; 46.87 + }else if(inst.M46.x6==0x1F){ 46.88 + *cause=EVENT_TAK; 46.89 + }else if(inst.M47.x6==0x34){ 46.90 + *cause=EVENT_PTC_E; 46.91 + }else if(inst.M41.x6==0x2E){ 46.92 + *cause=EVENT_ITC_D; 46.93 + }else if(inst.M41.x6==0x2F){ 46.94 + *cause=EVENT_ITC_I; 46.95 + }else if(inst.M42.x6==0x00){ 46.96 + *cause=EVENT_MOV_TO_RR; 46.97 + }else if(inst.M42.x6==0x01){ 46.98 + *cause=EVENT_MOV_TO_DBR; 46.99 + }else if(inst.M42.x6==0x02){ 46.100 + *cause=EVENT_MOV_TO_IBR; 46.101 + }else if(inst.M42.x6==0x03){ 46.102 + *cause=EVENT_MOV_TO_PKR; 46.103 + }else if(inst.M42.x6==0x04){ 46.104 + *cause=EVENT_MOV_TO_PMC; 46.105 + }else if(inst.M42.x6==0x05){ 46.106 + *cause=EVENT_MOV_TO_PMD; 46.107 + }else if(inst.M42.x6==0x0E){ 46.108 + *cause=EVENT_ITR_D; 46.109 + }else if(inst.M42.x6==0x0F){ 46.110 + *cause=EVENT_ITR_I; 46.111 + }else if(inst.M43.x6==0x10){ 46.112 + *cause=EVENT_MOV_FROM_RR; 46.113 + }else if(inst.M43.x6==0x11){ 46.114 + *cause=EVENT_MOV_FROM_DBR; 46.115 + }else if(inst.M43.x6==0x12){ 46.116 + *cause=EVENT_MOV_FROM_IBR; 46.117 + }else if(inst.M43.x6==0x13){ 46.118 + *cause=EVENT_MOV_FROM_PKR; 46.119 + }else if(inst.M43.x6==0x14){ 46.120 + *cause=EVENT_MOV_FROM_PMC; 46.121 +/* 46.122 + }else if(inst.M43.x6==0x15){ 46.123 + *cause=EVENT_MOV_FROM_PMD; 46.124 +*/ 46.125 + }else if(inst.M43.x6==0x17){ 46.126 + *cause=EVENT_MOV_FROM_CPUID; 46.127 + } 46.128 + } 46.129 + } 46.130 + break; 46.131 + case B: 46.132 + if(inst.generic.major==0){ 46.133 + if(inst.B8.x6==0x02){ 46.134 + *cause=EVENT_COVER; 46.135 + }else if(inst.B8.x6==0x08){ 46.136 + *cause=EVENT_RFI; 46.137 + }else if(inst.B8.x6==0x0c){ 46.138 + *cause=EVENT_BSW_0; 46.139 + }else if(inst.B8.x6==0x0d){ 46.140 + *cause=EVENT_BSW_1; 46.141 + } 46.142 + } 46.143 + } 46.144 +} 46.145 + 46.146 +IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst) 46.147 +{ 46.148 + UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm; 46.149 + return vmx_vcpu_reset_psr_sm(vcpu,imm24); 46.150 +} 46.151 + 46.152 +IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst) 46.153 +{ 46.154 + UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm; 46.155 + return vmx_vcpu_set_psr_sm(vcpu,imm24); 46.156 +} 46.157 + 46.158 +unsigned long last_guest_psr = 0x0; 46.159 +IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst) 46.160 +{ 46.161 + UINT64 tgt = inst.M33.r1; 46.162 + UINT64 val; 46.163 + IA64FAULT fault; 46.164 + 46.165 +/* 46.166 + if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT) 46.167 + return vmx_vcpu_set_gr(vcpu, tgt, val); 46.168 + else return fault; 46.169 + */ 46.170 + val = vmx_vcpu_get_psr(vcpu); 46.171 + val = (val & MASK(0, 32)) | (val & MASK(35, 2)); 46.172 + last_guest_psr = val; 46.173 + return vmx_vcpu_set_gr(vcpu, tgt, val, 0); 46.174 +} 46.175 + 46.176 +/** 46.177 + * @todo Check for reserved bits and return IA64_RSVDREG_FAULT. 46.178 + */ 46.179 +IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst) 46.180 +{ 46.181 + UINT64 val; 46.182 + IA64FAULT fault; 46.183 + if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT) 46.184 + panic(" get_psr nat bit fault\n"); 46.185 + 46.186 + val = (val & MASK(0, 32)) | (VMX_VPD(vcpu, vpsr) & MASK(32, 32)); 46.187 +#if 0 46.188 + if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32)))) 46.189 + while(1); 46.190 + else 46.191 + last_mov_from_psr = 0; 46.192 +#endif 46.193 + return vmx_vcpu_set_psr_l(vcpu,val); 46.194 +} 46.195 + 46.196 + 46.197 +/************************************************************************** 46.198 +Privileged operation emulation routines 46.199 +**************************************************************************/ 46.200 + 46.201 +IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst) 46.202 +{ 46.203 + IA64_PSR vpsr; 46.204 + REGS *regs; 46.205 +#ifdef CHECK_FAULT 46.206 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.207 + if ( vpsr.cpl != 0) { 46.208 + /* Inject Privileged Operation fault into guest */ 46.209 + set_privileged_operation_isr (vcpu, 0); 46.210 + privilege_op (vcpu); 46.211 + return IA64_FAULT; 46.212 + } 46.213 +#endif // CHECK_FAULT 46.214 + regs=vcpu_regs(vcpu); 46.215 + vpsr.val=regs->cr_ipsr; 46.216 + if ( vpsr.is == 1 ) { 46.217 + panic ("We do not support IA32 instruction yet"); 46.218 + } 46.219 + 46.220 + return vmx_vcpu_rfi(vcpu); 46.221 +} 46.222 + 46.223 +IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst) 46.224 +{ 46.225 +#ifdef CHECK_FAULT 46.226 + IA64_PSR vpsr; 46.227 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.228 + if ( vpsr.cpl != 0) { 46.229 + /* Inject Privileged Operation fault into guest */ 46.230 + set_privileged_operation_isr (vcpu, 0); 46.231 + privilege_op (vcpu); 46.232 + return IA64_FAULT; 46.233 + } 46.234 +#endif // CHECK_FAULT 46.235 + return vmx_vcpu_bsw0(vcpu); 46.236 +} 46.237 + 46.238 +IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst) 46.239 +{ 46.240 +#ifdef CHECK_FAULT 46.241 + IA64_PSR vpsr; 46.242 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.243 + if ( vpsr.cpl != 0) { 46.244 + /* Inject Privileged Operation fault into guest */ 46.245 + set_privileged_operation_isr (vcpu, 0); 46.246 + privilege_op (vcpu); 46.247 + return IA64_FAULT; 46.248 + } 46.249 +#endif // CHECK_FAULT 46.250 + return vmx_vcpu_bsw1(vcpu); 46.251 +} 46.252 + 46.253 +IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst) 46.254 +{ 46.255 + return vmx_vcpu_cover(vcpu); 46.256 +} 46.257 + 46.258 +IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst) 46.259 +{ 46.260 + u64 r2,r3; 46.261 + ISR isr; 46.262 + IA64_PSR vpsr; 46.263 + 46.264 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.265 + if ( vpsr.cpl != 0) { 46.266 + /* Inject Privileged Operation fault into guest */ 46.267 + set_privileged_operation_isr (vcpu, 0); 46.268 + privilege_op (vcpu); 46.269 + return IA64_FAULT; 46.270 + } 46.271 + if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){ 46.272 +#ifdef VMAL_NO_FAULT_CHECK 46.273 + set_isr_reg_nat_consumption(vcpu,0,0); 46.274 + rnat_comsumption(vcpu); 46.275 + return IA64_FAULT; 46.276 +#endif // VMAL_NO_FAULT_CHECK 46.277 + } 46.278 +#ifdef VMAL_NO_FAULT_CHECK 46.279 + if (unimplemented_gva(vcpu,r3) ) { 46.280 + isr.val = set_isr_ei_ni(vcpu); 46.281 + isr.code = IA64_RESERVED_REG_FAULT; 46.282 + vcpu_set_isr(vcpu, isr.val); 46.283 + unimpl_daddr(vcpu); 46.284 + return IA64_FAULT; 46.285 + } 46.286 +#endif // VMAL_NO_FAULT_CHECK 46.287 + return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7)); 46.288 +} 46.289 + 46.290 +IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst) 46.291 +{ 46.292 + u64 r3; 46.293 + ISR isr; 46.294 + IA64_PSR vpsr; 46.295 + 46.296 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.297 +#ifdef VMAL_NO_FAULT_CHECK 46.298 + if ( vpsr.cpl != 0) { 46.299 + /* Inject Privileged Operation fault into guest */ 46.300 + set_privileged_operation_isr (vcpu, 0); 46.301 + privilege_op (vcpu); 46.302 + return IA64_FAULT; 46.303 + } 46.304 +#endif // VMAL_NO_FAULT_CHECK 46.305 + if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){ 46.306 +#ifdef VMAL_NO_FAULT_CHECK 46.307 + set_isr_reg_nat_consumption(vcpu,0,0); 46.308 + rnat_comsumption(vcpu); 46.309 + return IA64_FAULT; 46.310 +#endif // VMAL_NO_FAULT_CHECK 46.311 + } 46.312 + return vmx_vcpu_ptc_e(vcpu,r3); 46.313 +} 46.314 + 46.315 +IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst) 46.316 +{ 46.317 + return vmx_emul_ptc_l(vcpu, inst); 46.318 +} 46.319 + 46.320 +IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst) 46.321 +{ 46.322 + return vmx_emul_ptc_l(vcpu, inst); 46.323 +} 46.324 + 46.325 +IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3) 46.326 +{ 46.327 + ISR isr; 46.328 + IA64FAULT ret1, ret2; 46.329 + 46.330 +#ifdef VMAL_NO_FAULT_CHECK 46.331 + IA64_PSR vpsr; 46.332 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.333 + if ( vpsr.cpl != 0) { 46.334 + /* Inject Privileged Operation fault into guest */ 46.335 + set_privileged_operation_isr (vcpu, 0); 46.336 + privilege_op (vcpu); 46.337 + return IA64_FAULT; 46.338 + } 46.339 +#endif // VMAL_NO_FAULT_CHECK 46.340 + ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3); 46.341 + ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2); 46.342 +#ifdef VMAL_NO_FAULT_CHECK 46.343 + if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) { 46.344 + set_isr_reg_nat_consumption(vcpu,0,0); 46.345 + rnat_comsumption(vcpu); 46.346 + return IA64_FAULT; 46.347 + } 46.348 + if (unimplemented_gva(vcpu,r3) ) { 46.349 + isr.val = set_isr_ei_ni(vcpu); 46.350 + isr.code = IA64_RESERVED_REG_FAULT; 46.351 + vcpu_set_isr(vcpu, isr.val); 46.352 + unimpl_daddr(vcpu); 46.353 + return IA64_FAULT; 46.354 + } 46.355 +#endif // VMAL_NO_FAULT_CHECK 46.356 + return IA64_NO_FAULT; 46.357 +} 46.358 + 46.359 +IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst) 46.360 +{ 46.361 + u64 r2,r3; 46.362 + if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) 46.363 + return IA64_FAULT; 46.364 + return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7)); 46.365 +} 46.366 + 46.367 +IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst) 46.368 +{ 46.369 + u64 r2,r3; 46.370 + if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) 46.371 + return IA64_FAULT; 46.372 + return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7)); 46.373 +} 46.374 + 46.375 + 46.376 +IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst) 46.377 +{ 46.378 + u64 r1,r3; 46.379 + ISR visr; 46.380 + IA64_PSR vpsr; 46.381 +#ifdef CHECK_FAULT 46.382 + if(check_target_register(vcpu, inst.M46.r1)){ 46.383 + set_illegal_op_isr(vcpu); 46.384 + illegal_op(vcpu); 46.385 + return IA64_FAULT; 46.386 + } 46.387 +#endif //CHECK_FAULT 46.388 + if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){ 46.389 +#ifdef CHECK_FAULT 46.390 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); 46.391 + return IA64_NO_FAULT; 46.392 +#endif //CHECK_FAULT 46.393 + } 46.394 +#ifdef CHECK_FAULT 46.395 + if(unimplemented_gva(vcpu, r3)){ 46.396 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); 46.397 + return IA64_NO_FAULT; 46.398 + } 46.399 +#endif //CHECK_FAULT 46.400 + vmx_vcpu_thash(vcpu, r3, &r1); 46.401 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 46.402 + return(IA64_NO_FAULT); 46.403 +} 46.404 + 46.405 + 46.406 +IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst) 46.407 +{ 46.408 + u64 r1,r3; 46.409 + ISR visr; 46.410 + IA64_PSR vpsr; 46.411 + #ifdef CHECK_FAULT 46.412 + if(check_target_register(vcpu, inst.M46.r1)){ 46.413 + set_illegal_op_isr(vcpu); 46.414 + illegal_op(vcpu); 46.415 + return IA64_FAULT; 46.416 + } 46.417 +#endif //CHECK_FAULT 46.418 + if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){ 46.419 +#ifdef CHECK_FAULT 46.420 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); 46.421 + return IA64_NO_FAULT; 46.422 +#endif //CHECK_FAULT 46.423 + } 46.424 +#ifdef CHECK_FAULT 46.425 + if(unimplemented_gva(vcpu, r3)){ 46.426 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); 46.427 + return IA64_NO_FAULT; 46.428 + } 46.429 +#endif //CHECK_FAULT 46.430 + vmx_vcpu_ttag(vcpu, r3, &r1); 46.431 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 46.432 + return(IA64_NO_FAULT); 46.433 +} 46.434 + 46.435 + 46.436 +IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst) 46.437 +{ 46.438 + u64 r1,r3; 46.439 + ISR visr; 46.440 +#ifdef CHECK_FAULT 46.441 + if(check_target_register(vcpu, inst.M46.r1)){ 46.442 + set_illegal_op_isr(vcpu); 46.443 + illegal_op(vcpu); 46.444 + return IA64_FAULT; 46.445 + } 46.446 + IA64_PSR vpsr; 46.447 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.448 + if(vpsr.cpl!=0){ 46.449 + visr.val=0; 46.450 + vcpu_set_isr(vcpu, visr.val); 46.451 + return IA64_FAULT; 46.452 + } 46.453 +#endif //CHECK_FAULT 46.454 + if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){ 46.455 +#ifdef CHECK_FAULT 46.456 + set_isr_reg_nat_consumption(vcpu,0,1); 46.457 + rnat_comsumption(vcpu); 46.458 + return IA64_FAULT; 46.459 +#endif //CHECK_FAULT 46.460 + } 46.461 +#ifdef CHECK_FAULT 46.462 + if (unimplemented_gva(vcpu,r3) ) { 46.463 + // inject unimplemented_data_address_fault 46.464 + visr.val = set_isr_ei_ni(vcpu); 46.465 + visr.code = IA64_RESERVED_REG_FAULT; 46.466 + vcpu_set_isr(vcpu, isr.val); 46.467 + // FAULT_UNIMPLEMENTED_DATA_ADDRESS. 46.468 + unimpl_daddr(vcpu); 46.469 + return IA64_FAULT; 46.470 + } 46.471 +#endif //CHECK_FAULT 46.472 + 46.473 + if(vmx_vcpu_tpa(vcpu, r3, &r1)){ 46.474 + return IA64_FAULT; 46.475 + } 46.476 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 46.477 + return(IA64_NO_FAULT); 46.478 +} 46.479 + 46.480 +IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst) 46.481 +{ 46.482 + u64 r1,r3; 46.483 + ISR visr; 46.484 + IA64_PSR vpsr; 46.485 + int fault=IA64_NO_FAULT; 46.486 +#ifdef CHECK_FAULT 46.487 + visr.val=0; 46.488 + if(check_target_register(vcpu, inst.M46.r1)){ 46.489 + set_illegal_op_isr(vcpu); 46.490 + illegal_op(vcpu); 46.491 + return IA64_FAULT; 46.492 + } 46.493 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.494 + if(vpsr.cpl!=0){ 46.495 + vcpu_set_isr(vcpu, visr.val); 46.496 + return IA64_FAULT; 46.497 + } 46.498 +#endif 46.499 + if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){ 46.500 +#ifdef CHECK_FAULT 46.501 + set_isr_reg_nat_consumption(vcpu,0,1); 46.502 + rnat_comsumption(vcpu); 46.503 + return IA64_FAULT; 46.504 +#endif 46.505 + } 46.506 + if(vmx_vcpu_tak(vcpu, r3, &r1)){ 46.507 + return IA64_FAULT; 46.508 + } 46.509 + vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 46.510 + return(IA64_NO_FAULT); 46.511 +} 46.512 + 46.513 + 46.514 +/************************************ 46.515 + * Insert translation register/cache 46.516 +************************************/ 46.517 + 46.518 +IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst) 46.519 +{ 46.520 + UINT64 fault, itir, ifa, pte, slot; 46.521 + ISR isr; 46.522 + IA64_PSR vpsr; 46.523 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.524 + if ( vpsr.ic ) { 46.525 + set_illegal_op_isr(vcpu); 46.526 + illegal_op(vcpu); 46.527 + return IA64_FAULT; 46.528 + } 46.529 +#ifdef VMAL_NO_FAULT_CHECK 46.530 + if ( vpsr.cpl != 0) { 46.531 + /* Inject Privileged Operation fault into guest */ 46.532 + set_privileged_operation_isr (vcpu, 0); 46.533 + privilege_op (vcpu); 46.534 + return IA64_FAULT; 46.535 + } 46.536 +#endif // VMAL_NO_FAULT_CHECK 46.537 + if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){ 46.538 +#ifdef VMAL_NO_FAULT_CHECK 46.539 + set_isr_reg_nat_consumption(vcpu,0,0); 46.540 + rnat_comsumption(vcpu); 46.541 + return IA64_FAULT; 46.542 +#endif // VMAL_NO_FAULT_CHECK 46.543 + } 46.544 +#ifdef VMAL_NO_FAULT_CHECK 46.545 + if(is_reserved_rr_register(vcpu, slot)){ 46.546 + set_illegal_op_isr(vcpu); 46.547 + illegal_op(vcpu); 46.548 + return IA64_FAULT; 46.549 + } 46.550 +#endif // VMAL_NO_FAULT_CHECK 46.551 + 46.552 + if (vmx_vcpu_get_itir(vcpu,&itir)){ 46.553 + return(IA64_FAULT); 46.554 + } 46.555 + if (vmx_vcpu_get_ifa(vcpu,&ifa)){ 46.556 + return(IA64_FAULT); 46.557 + } 46.558 +#ifdef VMAL_NO_FAULT_CHECK 46.559 + if (is_reserved_itir_field(vcpu, itir)) { 46.560 + // TODO 46.561 + return IA64_FAULT; 46.562 + } 46.563 + if (unimplemented_gva(vcpu,ifa) ) { 46.564 + isr.val = set_isr_ei_ni(vcpu); 46.565 + isr.code = IA64_RESERVED_REG_FAULT; 46.566 + vcpu_set_isr(vcpu, isr.val); 46.567 + unimpl_daddr(vcpu); 46.568 + return IA64_FAULT; 46.569 + } 46.570 +#endif // VMAL_NO_FAULT_CHECK 46.571 + 46.572 + return (vmx_vcpu_itr_d(vcpu,pte,itir,ifa,slot)); 46.573 +} 46.574 + 46.575 +IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) 46.576 +{ 46.577 + UINT64 fault, itir, ifa, pte, slot; 46.578 + ISR isr; 46.579 + IA64_PSR vpsr; 46.580 + vpsr.val=vmx_vcpu_get_psr(vcpu); 46.581 + if ( vpsr.ic ) { 46.582 + set_illegal_op_isr(vcpu); 46.583 + illegal_op(vcpu); 46.584 + return IA64_FAULT; 46.585 + } 46.586 +#ifdef VMAL_NO_FAULT_CHECK 46.587 + if ( vpsr.cpl != 0) { 46.588 + /* Inject Privileged Operation fault into guest */ 46.589 + set_privileged_operation_isr (vcpu, 0); 46.590 + privilege_op (vcpu); 46.591 + return IA64_FAULT; 46.592 + } 46.593 +#endif // VMAL_NO_FAULT_CHECK 46.594 + if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){ 46.595 +#ifdef VMAL_NO_FAULT_CHECK 46.596 + set_isr_reg_nat_consumption(vcpu,0,0); 46.597 + rnat_comsumption(vcpu); 46.598 + return IA64_FAULT; 46.599 +#endif // VMAL_NO_FAULT_CHECK 46.600 + } 46.601 +#ifdef VMAL_NO_FAULT_CHECK 46.602 + if(is_reserved_rr_register(vcpu, slot)){ 46.603 + set_illegal_op_isr(vcpu); 46.604 + illegal_op(vcpu); 46.605 + return IA64_FAULT; 46.606 + } 46.607 +#endif // VMAL_NO_FAULT_CHECK 46.608 + 46.609 + if (vmx_vcpu_get_itir(vcpu,&itir)){ 46.610 + return(IA64_FAULT); 46.611 + } 46.612 + if (vmx_vcpu_get_ifa(vcpu,&