ia64/xen-unstable

changeset 5093:c91f74efda05

bitkeeper revision 1.1514 (4291f6f7i2aAlgdzvcq6xJ3W4hjYzg)

Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@firebug.cl.cam.ac.uk
date Mon May 23 15:29:59 2005 +0000 (2005-05-23)
parents 2d1acc84d984 6cbe0dc99129
children 53d5ba4a4aed
files .rootkeys xen/arch/ia64/Makefile xen/arch/ia64/Rules.mk xen/arch/ia64/acpi.c xen/arch/ia64/asm-offsets.c xen/arch/ia64/dom_fw.c xen/arch/ia64/domain.c xen/arch/ia64/lib/Makefile xen/arch/ia64/mm_init.c xen/arch/ia64/mmio.c xen/arch/ia64/patch/linux-2.6.11/entry.S xen/arch/ia64/patch/linux-2.6.11/entry.h xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h xen/arch/ia64/patch/linux-2.6.11/head.S xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h xen/arch/ia64/patch/linux-2.6.11/ia64regs.h xen/arch/ia64/patch/linux-2.6.11/interrupt.h xen/arch/ia64/patch/linux-2.6.11/io.h xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c xen/arch/ia64/patch/linux-2.6.11/kregs.h xen/arch/ia64/patch/linux-2.6.11/mca_asm.h xen/arch/ia64/patch/linux-2.6.11/page.h xen/arch/ia64/patch/linux-2.6.11/pal.S xen/arch/ia64/patch/linux-2.6.11/pal.h xen/arch/ia64/patch/linux-2.6.11/processor.h xen/arch/ia64/patch/linux-2.6.11/ptrace.h xen/arch/ia64/patch/linux-2.6.11/setup.c xen/arch/ia64/patch/linux-2.6.11/swiotlb.c xen/arch/ia64/patch/linux-2.6.11/system.h xen/arch/ia64/patch/linux-2.6.11/unaligned.c xen/arch/ia64/process.c xen/arch/ia64/tools/README.RunVT xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/vcpu.c xen/arch/ia64/vlsapic.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_entry.S xen/arch/ia64/vmx_init.c xen/arch/ia64/vmx_interrupt.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_minstate.h xen/arch/ia64/vmx_phy_mode.c xen/arch/ia64/vmx_process.c xen/arch/ia64/vmx_utility.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vmx_virt.c xen/arch/ia64/vmx_vsa.S xen/arch/ia64/vtlb.c xen/arch/ia64/xenmem.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/arch/ia64/xentime.c xen/common/elf.c xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/mmu_context.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/regs.h xen/include/asm-ia64/serial.h xen/include/asm-ia64/tlb.h xen/include/asm-ia64/virt_event.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_mm_def.h xen/include/asm-ia64/vmx_pal.h xen/include/asm-ia64/vmx_pal_vsa.h xen/include/asm-ia64/vmx_phy_mode.h xen/include/asm-ia64/vmx_platform.h xen/include/asm-ia64/vmx_ptrace.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/vtm.h xen/include/asm-ia64/xenprocessor.h xen/include/asm-ia64/xensystem.h xen/include/xen/sched.h
line diff
     1.1 --- a/.rootkeys	Mon May 23 15:22:15 2005 +0000
     1.2 +++ b/.rootkeys	Mon May 23 15:29:59 2005 +0000
     1.3 @@ -1068,12 +1068,16 @@ 421098b3ys5GAr4z6_H1jD33oem82g xen/arch/
     1.4  4272a8e4lavI6DrTvqaIhXeR5RuKBw xen/arch/ia64/ivt.S
     1.5  421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile
     1.6  421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c
     1.7 +428b9f38Gp0KcPokG9Nq5v1rGk2FkA xen/arch/ia64/mmio.c
     1.8  425ae516maKAsHBJVSzs19cdRgt3Nw xen/arch/ia64/patch/linux-2.6.11/cpumask.h
     1.9  425ae516cGqvMzGtihTEsQXAXsuOhQ xen/arch/ia64/patch/linux-2.6.11/efi.c
    1.10  425ae516Y1A4q4_Kfre3qnDj7lbHJg xen/arch/ia64/patch/linux-2.6.11/entry.S
    1.11 +428bb037eJ4qs48I-tUdhht5_95obA xen/arch/ia64/patch/linux-2.6.11/entry.h
    1.12 +428bb037jPbybWNkNymaqkFr83vT6Q xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h
    1.13  425ae516txAP-owjzpTJ7ThfzWR8nw xen/arch/ia64/patch/linux-2.6.11/hardirq.h
    1.14  425ae516PDO1ESDHXHVeDNvlqUfmdQ xen/arch/ia64/patch/linux-2.6.11/head.S
    1.15  425ae516JR7HWvt1zxJ-wLvEWmJGgg xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h
    1.16 +428bb037UxfxIhZaslk-qHazO4w0yg xen/arch/ia64/patch/linux-2.6.11/ia64regs.h
    1.17  425ae516AHRNmaVuZjJY-9YjmKRDqg xen/arch/ia64/patch/linux-2.6.11/interrupt.h
    1.18  425ae516U2wFUzrUJQUpy3z38jZHsQ xen/arch/ia64/patch/linux-2.6.11/io.h
    1.19  425ae516GGRmXijPBLC5ii6yWOn0rg xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
    1.20 @@ -1083,11 +1087,12 @@ 425ae5160-9wHxh0tOnIjavEjt6W0A xen/arch/
    1.21  425ae516N7SaORdbodDr90tmtCzYXw xen/arch/ia64/patch/linux-2.6.11/mm_contig.c
    1.22  425ae516WDLrfEA4zr40d00z0VIWPg xen/arch/ia64/patch/linux-2.6.11/page.h
    1.23  425ae516pVQ75NhdItT593SiWI0lbQ xen/arch/ia64/patch/linux-2.6.11/pal.S
    1.24 +428bb037THuiyhERFP8RhRgapNkWXg xen/arch/ia64/patch/linux-2.6.11/pal.h
    1.25  425ae516QfmjiF_a-mabAXqV8Imzkg xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
    1.26  425ae516EWaNOBEnc1xnphTbRmNZsw xen/arch/ia64/patch/linux-2.6.11/processor.h
    1.27 +428bb037KSxe7_UyqseK5bWhGe3KwA xen/arch/ia64/patch/linux-2.6.11/ptrace.h
    1.28  425ae516LecDyXlwh3NLBtHZKXmMcA xen/arch/ia64/patch/linux-2.6.11/series
    1.29  425ae516RFiPn2CGkpJ21LM-1lJcQg xen/arch/ia64/patch/linux-2.6.11/setup.c
    1.30 -425ae516FX_10YaKGMU8Ysf7kkdm_A xen/arch/ia64/patch/linux-2.6.11/swiotlb.c
    1.31  425ae516p4ICTkjqNYEfYFxqULj4dw xen/arch/ia64/patch/linux-2.6.11/system.h
    1.32  425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c
    1.33  425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c
    1.34 @@ -1142,6 +1147,7 @@ 41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/
    1.35  41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
    1.36  421098b69pUiIJrqu_w0JMUnZ2uc2A xen/arch/ia64/smp.c
    1.37  421098b6_ToSGrf6Pk1Uwg5aMAIBxg xen/arch/ia64/smpboot.c
    1.38 +428b9f38JJDW35iDn5DlfXTu700rkQ xen/arch/ia64/tools/README.RunVT
    1.39  421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64
    1.40  42376c6dfyY0eq8MS2dK3BW2rFuEGg xen/arch/ia64/tools/README.xenia64linux
    1.41  421098b6rQ2BQ103qu1n1HNofbS2Og xen/arch/ia64/tools/mkbuildtree
    1.42 @@ -1152,6 +1158,20 @@ 4252ace7H2dIMPFeFwczAVoP4yAHxA xen/arch/
    1.43  4252ace74lKUPFnO8PmF0Dtpk7Xkng xen/arch/ia64/tools/privify/privify_elf64.c
    1.44  41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
    1.45  421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c
    1.46 +428b9f38PglyXM-mJJfo19ycuQrEhw xen/arch/ia64/vlsapic.c
    1.47 +428b9f38EmpBsMHL3WbOZoieteBGdQ xen/arch/ia64/vmmu.c
    1.48 +428b9f38hU-X5aX0MIY3EU0Yw4PjcA xen/arch/ia64/vmx_entry.S
    1.49 +428b9f38S76bWI96g7uPLmE-uAcmdg xen/arch/ia64/vmx_init.c
    1.50 +428b9f385AMSyCRYBsckQClQY4ZgHA xen/arch/ia64/vmx_interrupt.c
    1.51 +428b9f380IOjPmj0N6eelH-WJjl1xg xen/arch/ia64/vmx_ivt.S
    1.52 +428b9f38Y7tp9uyNRdru3lPDXLjOCA xen/arch/ia64/vmx_minstate.h
    1.53 +428b9f38H9Pz0ZhRUT0-11A6jceE1w xen/arch/ia64/vmx_phy_mode.c
    1.54 +428b9f38pXU56r2OjoFW2Z8H1XY17w xen/arch/ia64/vmx_process.c
    1.55 +428b9f38GmZxD-GMDnQB3m7tOoukTA xen/arch/ia64/vmx_utility.c
    1.56 +428b9f38Pflg6Z4CtXeVGv7dyEOM4g xen/arch/ia64/vmx_vcpu.c
    1.57 +428b9f38Y7p7hXHWx9QF_oYUjdD__g xen/arch/ia64/vmx_virt.c
    1.58 +428b9f38EL7qKbbKkhBNr0KzMLS4Gg xen/arch/ia64/vmx_vsa.S
    1.59 +428b9f3805WejQ1E-OqAPANPAu8vPw xen/arch/ia64/vtlb.c
    1.60  41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
    1.61  4272adaeit9raZ9KnjO_wR4Ii9LJNQ xen/arch/ia64/xenirq.c
    1.62  427162263zDUiPmTj-lP4eGyXs5eIg xen/arch/ia64/xenmem.c
    1.63 @@ -1291,7 +1311,21 @@ 421098b7GkWOnlzSmPvNAhByOSZ1Dw xen/inclu
    1.64  421098b7FK3xgShpnH0I0Ou3O4fJ2Q xen/include/asm-ia64/tlb.h
    1.65  421098b78IGdFOGUlPmpS7h_QBmoFg xen/include/asm-ia64/vcpu.h
    1.66  421098b7PiAencgmBFGAqALU-V5rqQ xen/include/asm-ia64/vhpt.h
    1.67 +428b9f38_b0DgWwkJcBEsTdEmO9WNQ xen/include/asm-ia64/virt_event.h
    1.68 +428b9f38B0KbUj3o2FBQJ5tmIIMDHg xen/include/asm-ia64/vmmu.h
    1.69 +428b9f38ewjoJ-RL-2lsXFT04H2aag xen/include/asm-ia64/vmx.h
    1.70 +428b9f38coGlYeXx-7hpvfCTAPOd7w xen/include/asm-ia64/vmx_mm_def.h
    1.71 +428b9f387tov0OtOEeF8fVWSR2v5Pg xen/include/asm-ia64/vmx_pal.h
    1.72 +428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
    1.73 +428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
    1.74 +428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
    1.75 +428b9f38lm0ntDBusHggeQXkx1-1HQ xen/include/asm-ia64/vmx_ptrace.h
    1.76 +428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
    1.77 +428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
    1.78 +428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
    1.79 +428e120a-H-bqn10zOlnhlzlVEuW8A xen/include/asm-ia64/xenprocessor.h
    1.80  421098b7LfwIHQ2lRYWhO4ruEXqIuQ xen/include/asm-ia64/xenserial.h
    1.81 +428e120esS-Tp1mX5VoUrsGJDNY_ow xen/include/asm-ia64/xensystem.h
    1.82  40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-x86/acpi.h
    1.83  3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-x86/apic.h
    1.84  3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-x86/apicdef.h
     2.1 --- a/xen/arch/ia64/Makefile	Mon May 23 15:22:15 2005 +0000
     2.2 +++ b/xen/arch/ia64/Makefile	Mon May 23 15:29:59 2005 +0000
     2.3 @@ -10,6 +10,12 @@ OBJS = xensetup.o setup.o time.o irq.o i
     2.4  	extable.o linuxextable.o xenirq.o xentime.o \
     2.5  	regionreg.o entry.o unaligned.o privop.o vcpu.o \
     2.6  	irq_ia64.o irq_lsapic.o vhpt.o xenasm.o dom_fw.o
     2.7 +
     2.8 +ifeq ($(CONFIG_VTI),y)
     2.9 +OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
    2.10 +	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
    2.11 +	vtlb.o mmio.o vlsapic.o
    2.12 +endif
    2.13  # perfmon.o
    2.14  # unwind.o needed for kernel unwinding (rare)
    2.15  
    2.16 @@ -24,9 +30,22 @@ default: $(OBJS) head.o ia64lib.o xen.ld
    2.17  		-Map map.out head.o $(ALL_OBJS) -o $(TARGET)-syms
    2.18  	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
    2.19  
    2.20 -asm-offsets.s: asm-offsets.c
    2.21 +asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
    2.22  	$(CC) $(CFLAGS) -S -o $@ $<
    2.23  
    2.24 +$(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
    2.25 +# Need such symbol link to make linux headers available
    2.26 +	[ -e $(BASEDIR)/include/linux ] \
    2.27 +	 || ln -s $(BASEDIR)/include/xen $(BASEDIR)/include/linux
    2.28 +	[ -e $(BASEDIR)/include/asm-ia64/xen ] \
    2.29 +	 || ln -s $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen
    2.30 +# Solve circular reference on asm-offsets.h
    2.31 +	[ -f $(BASEDIR)/include/asm-ia64/asm-offsets.h ] \
    2.32 +	 || echo "#define IA64_TASK_SIZE 0" > $(BASEDIR)/include/asm-ia64/asm-offsets.h
    2.33 +#Bad hack. Force asm-offsets.h out-of-date
    2.34 +	 sleep 1
    2.35 +	 touch $@
    2.36 +
    2.37  # I'm sure a Makefile wizard would know a better way to do this
    2.38  xen.lds.s: xen.lds.S
    2.39  	$(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
    2.40 @@ -36,7 +55,7 @@ ia64lib.o:
    2.41  	$(MAKE) -C lib && cp lib/ia64lib.o .
    2.42  
    2.43  clean:
    2.44 -	rm -f *.o *~ core  xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
    2.45 +	rm -f *.o *~ core  xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s
    2.46  	rm -f lib/*.o
    2.47  
    2.48  # setup.o contains bits of compile.h so it must be blown away
     3.1 --- a/xen/arch/ia64/Rules.mk	Mon May 23 15:22:15 2005 +0000
     3.2 +++ b/xen/arch/ia64/Rules.mk	Mon May 23 15:29:59 2005 +0000
     3.3 @@ -1,6 +1,7 @@
     3.4  ########################################
     3.5  # ia64-specific definitions
     3.6  
     3.7 +CONFIG_VTI	?= n
     3.8  ifneq ($(COMPILE_ARCH),$(TARGET_ARCH))
     3.9  CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux-
    3.10  endif
    3.11 @@ -17,4 +18,7 @@ CFLAGS  += -Wno-pointer-arith -Wredundan
    3.12  CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
    3.13  CFLAGS	+= -ffixed-r13 -mfixed-range=f12-f15,f32-f127
    3.14  CFLAGS	+= -w -g
    3.15 +ifeq ($(CONFIG_VTI),y)
    3.16 +CFLAGS  += -DCONFIG_VTI
    3.17 +endif
    3.18  LDFLAGS := -g
     4.1 --- a/xen/arch/ia64/acpi.c	Mon May 23 15:22:15 2005 +0000
     4.2 +++ b/xen/arch/ia64/acpi.c	Mon May 23 15:29:59 2005 +0000
     4.3 @@ -1,9 +1,16 @@
     4.4  /*
     4.5   *  acpi.c - Architecture-Specific Low-Level ACPI Support
     4.6   *
     4.7 - *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
     4.8 - *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
     4.9 - *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
    4.10 + *  Copyright (C) 1999 VA Linux Systems
    4.11 + *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
    4.12 + *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
    4.13 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    4.14 + *  Copyright (C) 2000 Intel Corp.
    4.15 + *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
    4.16 + *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
    4.17 + *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
    4.18 + *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
    4.19 + *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
    4.20   *
    4.21   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    4.22   *
    4.23 @@ -19,667 +26,651 @@
    4.24   *
    4.25   *  You should have received a copy of the GNU General Public License
    4.26   *  along with this program; if not, write to the Free Software
    4.27 - *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    4.28 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
    4.29   *
    4.30   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    4.31   */
    4.32  
    4.33 -#include <xen/config.h>
    4.34 -#include <xen/kernel.h>
    4.35 -#include <xen/init.h>
    4.36 -#include <xen/types.h>
    4.37 -/*#include <xen/stddef.h>*/
    4.38 -#include <xen/slab.h>
    4.39 -//#include <xen/pci.h>
    4.40 -/*#include <xen/bootmem.h>*/
    4.41 -#include <xen/irq.h>
    4.42 -#include <xen/acpi.h>
    4.43 -//#include <asm/mpspec.h>
    4.44 +#include <linux/config.h>
    4.45 +#include <linux/module.h>
    4.46 +#include <linux/init.h>
    4.47 +#include <linux/kernel.h>
    4.48 +#include <linux/sched.h>
    4.49 +#include <linux/smp.h>
    4.50 +#include <linux/string.h>
    4.51 +#include <linux/types.h>
    4.52 +#include <linux/irq.h>
    4.53 +#include <linux/acpi.h>
    4.54 +#include <linux/efi.h>
    4.55 +#include <linux/mmzone.h>
    4.56  #include <asm/io.h>
    4.57 -//#include <asm/apic.h>
    4.58 -//#include <asm/apicdef.h>
    4.59 +//#include <asm/iosapic.h>
    4.60 +#include <asm/machvec.h>
    4.61  #include <asm/page.h>
    4.62 -/*#include <asm/pgtable.h>*/
    4.63 -#include <asm/pgalloc.h>
    4.64 -//#include <asm/io_apic.h>
    4.65 -#include <asm/acpi.h>
    4.66 -/*#include <asm/save_state.h>*/
    4.67 -//#include <asm/smpboot.h>
    4.68 +#include <asm/system.h>
    4.69 +#include <asm/numa.h>
    4.70 +#include <asm/sal.h>
    4.71 +//#include <asm/cyclone.h>
    4.72  
    4.73 +#define BAD_MADT_ENTRY(entry, end) (                                        \
    4.74 +		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
    4.75 +		((acpi_table_entry_header *)entry)->length != sizeof(*entry))
    4.76  
    4.77  #define PREFIX			"ACPI: "
    4.78  
    4.79 -int acpi_lapic = 0;
    4.80 -int acpi_ioapic = 0;
    4.81 +void (*pm_idle) (void);
    4.82 +EXPORT_SYMBOL(pm_idle);
    4.83 +void (*pm_power_off) (void);
    4.84  
    4.85 -/* --------------------------------------------------------------------------
    4.86 -                              Boot-time Configuration
    4.87 -   -------------------------------------------------------------------------- */
    4.88 +unsigned char acpi_kbd_controller_present = 1;
    4.89 +unsigned char acpi_legacy_devices;
    4.90 +
    4.91 +const char *
    4.92 +acpi_get_sysname (void)
    4.93 +{
    4.94 +#ifdef CONFIG_IA64_GENERIC
    4.95 +	unsigned long rsdp_phys;
    4.96 +	struct acpi20_table_rsdp *rsdp;
    4.97 +	struct acpi_table_xsdt *xsdt;
    4.98 +	struct acpi_table_header *hdr;
    4.99 +
   4.100 +	rsdp_phys = acpi_find_rsdp();
   4.101 +	if (!rsdp_phys) {
   4.102 +		printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n");
   4.103 +		return "dig";
   4.104 +	}
   4.105 +
   4.106 +	rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys);
   4.107 +	if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
   4.108 +		printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
   4.109 +		return "dig";
   4.110 +	}
   4.111 +
   4.112 +	xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address);
   4.113 +	hdr = &xsdt->header;
   4.114 +	if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
   4.115 +		printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
   4.116 +		return "dig";
   4.117 +	}
   4.118 +
   4.119 +	if (!strcmp(hdr->oem_id, "HP")) {
   4.120 +		return "hpzx1";
   4.121 +	}
   4.122 +	else if (!strcmp(hdr->oem_id, "SGI")) {
   4.123 +		return "sn2";
   4.124 +	}
   4.125 +
   4.126 +	return "dig";
   4.127 +#else
   4.128 +# if defined (CONFIG_IA64_HP_SIM)
   4.129 +	return "hpsim";
   4.130 +# elif defined (CONFIG_IA64_HP_ZX1)
   4.131 +	return "hpzx1";
   4.132 +# elif defined (CONFIG_IA64_SGI_SN2)
   4.133 +	return "sn2";
   4.134 +# elif defined (CONFIG_IA64_DIG)
   4.135 +	return "dig";
   4.136 +# else
   4.137 +#	error Unknown platform.  Fix acpi.c.
   4.138 +# endif
   4.139 +#endif
   4.140 +}
   4.141  
   4.142  #ifdef CONFIG_ACPI_BOOT
   4.143 -//int acpi_noirq __initdata = 0;  /* skip ACPI IRQ initialization */
   4.144 -int acpi_ht __initdata = 1;     /* enable HT */
   4.145  
   4.146 -enum acpi_irq_model_id		acpi_irq_model;
   4.147 +#define ACPI_MAX_PLATFORM_INTERRUPTS	256
   4.148  
   4.149 +#if 0
   4.150 +/* Array to record platform interrupt vectors for generic interrupt routing. */
   4.151 +int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
   4.152 +	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
   4.153 +};
   4.154 +
   4.155 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
   4.156  
   4.157  /*
   4.158 - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
   4.159 - * to map the target physical address. The problem is that set_fixmap()
   4.160 - * provides a single page, and it is possible that the page is not
   4.161 - * sufficient.
   4.162 - * By using this area, we can map up to MAX_IO_APICS pages temporarily,
   4.163 - * i.e. until the next __va_range() call.
   4.164 - *
   4.165 - * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
   4.166 - * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
   4.167 - * count idx down while incrementing the phys address.
   4.168 + * Interrupt routing API for device drivers.  Provides interrupt vector for
   4.169 + * a generic platform event.  Currently only CPEI is implemented.
   4.170   */
   4.171 -char *__acpi_map_table(unsigned long phys, unsigned long size)
   4.172 +int
   4.173 +acpi_request_vector (u32 int_type)
   4.174  {
   4.175 -	unsigned long base, offset, mapped_size;
   4.176 -	int idx;
   4.177 -
   4.178 -	if (phys + size < 8*1024*1024) 
   4.179 -		return __va(phys); 
   4.180 +	int vector = -1;
   4.181  
   4.182 -	offset = phys & (PAGE_SIZE - 1);
   4.183 -	mapped_size = PAGE_SIZE - offset;
   4.184 -#ifndef XEN
   4.185 -// where is FIX_ACPI_*? hack for now, FIXME later
   4.186 -	set_fixmap(FIX_ACPI_END, phys);
   4.187 -	base = fix_to_virt(FIX_ACPI_END);
   4.188 -
   4.189 -	/*
   4.190 -	 * Most cases can be covered by the below.
   4.191 -	 */
   4.192 -	idx = FIX_ACPI_END;
   4.193 -	while (mapped_size < size) {
   4.194 -		if (--idx < FIX_ACPI_BEGIN)
   4.195 -			return 0;	/* cannot handle this */
   4.196 -		phys += PAGE_SIZE;
   4.197 -		set_fixmap(idx, phys);
   4.198 -		mapped_size += PAGE_SIZE;
   4.199 -	}
   4.200 +	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
   4.201 +		/* corrected platform error interrupt */
   4.202 +		vector = platform_intr_list[int_type];
   4.203 +	} else
   4.204 +		printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n");
   4.205 +	return vector;
   4.206 +}
   4.207  #endif
   4.208 -
   4.209 -	return ((unsigned char *) base + offset);
   4.210 +char *
   4.211 +__acpi_map_table (unsigned long phys_addr, unsigned long size)
   4.212 +{
   4.213 +	return __va(phys_addr);
   4.214  }
   4.215  
   4.216 -
   4.217 -#ifdef CONFIG_X86_LOCAL_APIC
   4.218 +/* --------------------------------------------------------------------------
   4.219 +                            Boot-time Table Parsing
   4.220 +   -------------------------------------------------------------------------- */
   4.221  
   4.222 -static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
   4.223 +static int			total_cpus __initdata;
   4.224 +static int			available_cpus __initdata;
   4.225 +struct acpi_table_madt *	acpi_madt __initdata;
   4.226 +static u8			has_8259;
   4.227  
   4.228 -
   4.229 +#if 0
   4.230  static int __init
   4.231 -acpi_parse_madt (
   4.232 -	unsigned long		phys_addr,
   4.233 -	unsigned long		size)
   4.234 +acpi_parse_lapic_addr_ovr (
   4.235 +	acpi_table_entry_header *header, const unsigned long end)
   4.236  {
   4.237 -	struct acpi_table_madt	*madt = NULL;
   4.238 +	struct acpi_table_lapic_addr_ovr *lapic;
   4.239  
   4.240 -	if (!phys_addr || !size)
   4.241 +	lapic = (struct acpi_table_lapic_addr_ovr *) header;
   4.242 +
   4.243 +	if (BAD_MADT_ENTRY(lapic, end))
   4.244  		return -EINVAL;
   4.245  
   4.246 -	madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
   4.247 -	if (!madt) {
   4.248 -		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
   4.249 -		return -ENODEV;
   4.250 -	}
   4.251 +	acpi_table_print_madt_entry(header);
   4.252  
   4.253 -	if (madt->lapic_address)
   4.254 -		acpi_lapic_addr = (u64) madt->lapic_address;
   4.255 -
   4.256 -	printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n",
   4.257 -		madt->lapic_address);
   4.258 -
   4.259 -	detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id);
   4.260 -
   4.261 +	if (lapic->address) {
   4.262 +		iounmap((void *) ipi_base_addr);
   4.263 +		ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
   4.264 +	}
   4.265  	return 0;
   4.266  }
   4.267  
   4.268  
   4.269  static int __init
   4.270 -acpi_parse_lapic (
   4.271 -	acpi_table_entry_header *header)
   4.272 +acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end)
   4.273  {
   4.274 -	struct acpi_table_lapic	*processor = NULL;
   4.275 +	struct acpi_table_lsapic *lsapic;
   4.276  
   4.277 -	processor = (struct acpi_table_lapic*) header;
   4.278 -	if (!processor)
   4.279 +	lsapic = (struct acpi_table_lsapic *) header;
   4.280 +
   4.281 +	if (BAD_MADT_ENTRY(lsapic, end))
   4.282  		return -EINVAL;
   4.283  
   4.284  	acpi_table_print_madt_entry(header);
   4.285  
   4.286 -	mp_register_lapic (
   4.287 -		processor->id,					   /* APIC ID */
   4.288 -		processor->flags.enabled);			  /* Enabled? */
   4.289 +	printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
   4.290  
   4.291 +	if (!lsapic->flags.enabled)
   4.292 +		printk(" disabled");
   4.293 +	else {
   4.294 +		printk(" enabled");
   4.295 +#ifdef CONFIG_SMP
   4.296 +		smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
   4.297 +		if (hard_smp_processor_id()
   4.298 +		    == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
   4.299 +			printk(" (BSP)");
   4.300 +#endif
   4.301 +		++available_cpus;
   4.302 +	}
   4.303 +
   4.304 +	printk("\n");
   4.305 +
   4.306 +	total_cpus++;
   4.307  	return 0;
   4.308  }
   4.309  
   4.310  
   4.311  static int __init
   4.312 -acpi_parse_lapic_addr_ovr (
   4.313 -	acpi_table_entry_header *header)
   4.314 +acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end)
   4.315  {
   4.316 -	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
   4.317 -
   4.318 -	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
   4.319 -	if (!lapic_addr_ovr)
   4.320 -		return -EINVAL;
   4.321 -
   4.322 -	acpi_lapic_addr = lapic_addr_ovr->address;
   4.323 +	struct acpi_table_lapic_nmi *lacpi_nmi;
   4.324  
   4.325 -	return 0;
   4.326 -}
   4.327 +	lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
   4.328  
   4.329 -static int __init
   4.330 -acpi_parse_lapic_nmi (
   4.331 -	acpi_table_entry_header *header)
   4.332 -{
   4.333 -	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
   4.334 -
   4.335 -	lapic_nmi = (struct acpi_table_lapic_nmi*) header;
   4.336 -	if (!lapic_nmi)
   4.337 +	if (BAD_MADT_ENTRY(lacpi_nmi, end))
   4.338  		return -EINVAL;
   4.339  
   4.340  	acpi_table_print_madt_entry(header);
   4.341  
   4.342 -	if (lapic_nmi->lint != 1)
   4.343 -		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
   4.344 -
   4.345 -	return 0;
   4.346 -}
   4.347 -
   4.348 -#endif /*CONFIG_X86_LOCAL_APIC*/
   4.349 -
   4.350 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   4.351 -
   4.352 -static int __init
   4.353 -acpi_parse_ioapic (
   4.354 -	acpi_table_entry_header *header)
   4.355 -{
   4.356 -	struct acpi_table_ioapic *ioapic = NULL;
   4.357 -
   4.358 -	ioapic = (struct acpi_table_ioapic*) header;
   4.359 -	if (!ioapic)
   4.360 -		return -EINVAL;
   4.361 - 
   4.362 -	acpi_table_print_madt_entry(header);
   4.363 -
   4.364 -	mp_register_ioapic (
   4.365 -		ioapic->id,
   4.366 -		ioapic->address,
   4.367 -		ioapic->global_irq_base);
   4.368 - 
   4.369 +	/* TBD: Support lapic_nmi entries */
   4.370  	return 0;
   4.371  }
   4.372  
   4.373  
   4.374  static int __init
   4.375 -acpi_parse_int_src_ovr (
   4.376 -	acpi_table_entry_header *header)
   4.377 +acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
   4.378  {
   4.379 -	struct acpi_table_int_src_ovr *intsrc = NULL;
   4.380 +	struct acpi_table_iosapic *iosapic;
   4.381  
   4.382 -	intsrc = (struct acpi_table_int_src_ovr*) header;
   4.383 -	if (!intsrc)
   4.384 +	iosapic = (struct acpi_table_iosapic *) header;
   4.385 +
   4.386 +	if (BAD_MADT_ENTRY(iosapic, end))
   4.387  		return -EINVAL;
   4.388  
   4.389  	acpi_table_print_madt_entry(header);
   4.390  
   4.391 -	mp_override_legacy_irq (
   4.392 -		intsrc->bus_irq,
   4.393 -		intsrc->flags.polarity,
   4.394 -		intsrc->flags.trigger,
   4.395 -		intsrc->global_irq);
   4.396 +	iosapic_init(iosapic->address, iosapic->global_irq_base);
   4.397  
   4.398  	return 0;
   4.399  }
   4.400  
   4.401  
   4.402  static int __init
   4.403 -acpi_parse_nmi_src (
   4.404 -	acpi_table_entry_header *header)
   4.405 +acpi_parse_plat_int_src (
   4.406 +	acpi_table_entry_header *header, const unsigned long end)
   4.407  {
   4.408 -	struct acpi_table_nmi_src *nmi_src = NULL;
   4.409 +	struct acpi_table_plat_int_src *plintsrc;
   4.410 +	int vector;
   4.411  
   4.412 -	nmi_src = (struct acpi_table_nmi_src*) header;
   4.413 -	if (!nmi_src)
   4.414 +	plintsrc = (struct acpi_table_plat_int_src *) header;
   4.415 +
   4.416 +	if (BAD_MADT_ENTRY(plintsrc, end))
   4.417  		return -EINVAL;
   4.418  
   4.419  	acpi_table_print_madt_entry(header);
   4.420  
   4.421 -	/* TBD: Support nimsrc entries? */
   4.422 +	/*
   4.423 +	 * Get vector assignment for this interrupt, set attributes,
   4.424 +	 * and program the IOSAPIC routing table.
   4.425 +	 */
   4.426 +	vector = iosapic_register_platform_intr(plintsrc->type,
   4.427 +						plintsrc->global_irq,
   4.428 +						plintsrc->iosapic_vector,
   4.429 +						plintsrc->eid,
   4.430 +						plintsrc->id,
   4.431 +						(plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   4.432 +						(plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
   4.433  
   4.434 +	platform_intr_list[plintsrc->type] = vector;
   4.435  	return 0;
   4.436  }
   4.437  
   4.438 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   4.439 +
   4.440 +static int __init
   4.441 +acpi_parse_int_src_ovr (
   4.442 +	acpi_table_entry_header *header, const unsigned long end)
   4.443 +{
   4.444 +	struct acpi_table_int_src_ovr *p;
   4.445 +
   4.446 +	p = (struct acpi_table_int_src_ovr *) header;
   4.447 +
   4.448 +	if (BAD_MADT_ENTRY(p, end))
   4.449 +		return -EINVAL;
   4.450 +
   4.451 +	acpi_table_print_madt_entry(header);
   4.452 +
   4.453 +	iosapic_override_isa_irq(p->bus_irq, p->global_irq,
   4.454 +				 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   4.455 +				 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
   4.456 +	return 0;
   4.457 +}
   4.458  
   4.459  
   4.460 -static unsigned long __init
   4.461 -acpi_scan_rsdp (
   4.462 -	unsigned long		start,
   4.463 -	unsigned long		length)
   4.464 +static int __init
   4.465 +acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
   4.466  {
   4.467 -	unsigned long		offset = 0;
   4.468 -	unsigned long		sig_len = sizeof("RSD PTR ") - 1;
   4.469 +	struct acpi_table_nmi_src *nmi_src;
   4.470  
   4.471 -	/*
   4.472 -	 * Scan all 16-byte boundaries of the physical memory region for the
   4.473 -	 * RSDP signature.
   4.474 -	 */
   4.475 -	for (offset = 0; offset < length; offset += 16) {
   4.476 -		if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
   4.477 -			continue;
   4.478 -		return (start + offset);
   4.479 +	nmi_src = (struct acpi_table_nmi_src*) header;
   4.480 +
   4.481 +	if (BAD_MADT_ENTRY(nmi_src, end))
   4.482 +		return -EINVAL;
   4.483 +
   4.484 +	acpi_table_print_madt_entry(header);
   4.485 +
   4.486 +	/* TBD: Support nimsrc entries */
   4.487 +	return 0;
   4.488 +}
   4.489 +/* Hook from generic ACPI tables.c */
   4.490 +void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
   4.491 +{
   4.492 +	if (!strncmp(oem_id, "IBM", 3) &&
   4.493 +	    (!strncmp(oem_table_id, "SERMOW", 6))){
   4.494 +
   4.495 +		/* Unfortunatly ITC_DRIFT is not yet part of the
   4.496 +		 * official SAL spec, so the ITC_DRIFT bit is not
   4.497 +		 * set by the BIOS on this hardware.
   4.498 +		 */
   4.499 +		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
   4.500 +
   4.501 +		/*Start cyclone clock*/
   4.502 +		cyclone_setup(0);
   4.503  	}
   4.504 +}
   4.505 +
   4.506 +static int __init
   4.507 +acpi_parse_madt (unsigned long phys_addr, unsigned long size)
   4.508 +{
   4.509 +	if (!phys_addr || !size)
   4.510 +		return -EINVAL;
   4.511 +
   4.512 +	acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
   4.513 +
   4.514 +	/* remember the value for reference after free_initmem() */
   4.515 +#ifdef CONFIG_ITANIUM
   4.516 +	has_8259 = 1; /* Firmware on old Itanium systems is broken */
   4.517 +#else
   4.518 +	has_8259 = acpi_madt->flags.pcat_compat;
   4.519 +#endif
   4.520 +	iosapic_system_init(has_8259);
   4.521 +
   4.522 +	/* Get base address of IPI Message Block */
   4.523 +
   4.524 +	if (acpi_madt->lapic_address)
   4.525 +		ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0);
   4.526 +
   4.527 +	printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
   4.528 +
   4.529 +	acpi_madt_oem_check(acpi_madt->header.oem_id,
   4.530 +		acpi_madt->header.oem_table_id);
   4.531  
   4.532  	return 0;
   4.533  }
   4.534 +#endif
   4.535  
   4.536 +#ifdef CONFIG_ACPI_NUMA
   4.537 +
   4.538 +#undef SLIT_DEBUG
   4.539 +
   4.540 +#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
   4.541 +
   4.542 +static int __initdata srat_num_cpus;			/* number of cpus */
   4.543 +static u32 __initdata pxm_flag[PXM_FLAG_LEN];
   4.544 +#define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
   4.545 +#define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
   4.546 +/* maps to convert between proximity domain and logical node ID */
   4.547 +int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
   4.548 +int __initdata nid_to_pxm_map[MAX_NUMNODES];
   4.549 +static struct acpi_table_slit __initdata *slit_table;
   4.550 +
   4.551 +/*
   4.552 + * ACPI 2.0 SLIT (System Locality Information Table)
   4.553 + * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
   4.554 + */
   4.555 +void __init
   4.556 +acpi_numa_slit_init (struct acpi_table_slit *slit)
   4.557 +{
   4.558 +	u32 len;
   4.559 +
   4.560 +	len = sizeof(struct acpi_table_header) + 8
   4.561 +		+ slit->localities * slit->localities;
   4.562 +	if (slit->header.length != len) {
   4.563 +		printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
   4.564 +		       len, slit->header.length);
   4.565 +		memset(numa_slit, 10, sizeof(numa_slit));
   4.566 +		return;
   4.567 +	}
   4.568 +	slit_table = slit;
   4.569 +}
   4.570 +
   4.571 +void __init
   4.572 +acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
   4.573 +{
   4.574 +	/* record this node in proximity bitmap */
   4.575 +	pxm_bit_set(pa->proximity_domain);
   4.576 +
   4.577 +	node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
   4.578 +	/* nid should be overridden as logical node id later */
   4.579 +	node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
   4.580 +	srat_num_cpus++;
   4.581 +}
   4.582 +
   4.583 +void __init
   4.584 +acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
   4.585 +{
   4.586 +	unsigned long paddr, size;
   4.587 +	u8 pxm;
   4.588 +	struct node_memblk_s *p, *q, *pend;
   4.589 +
   4.590 +	pxm = ma->proximity_domain;
   4.591 +
   4.592 +	/* fill node memory chunk structure */
   4.593 +	paddr = ma->base_addr_hi;
   4.594 +	paddr = (paddr << 32) | ma->base_addr_lo;
   4.595 +	size = ma->length_hi;
   4.596 +	size = (size << 32) | ma->length_lo;
   4.597 +
   4.598 +	/* Ignore disabled entries */
   4.599 +	if (!ma->flags.enabled)
   4.600 +		return;
   4.601 +
   4.602 +	/* record this node in proximity bitmap */
   4.603 +	pxm_bit_set(pxm);
   4.604 +
   4.605 +	/* Insertion sort based on base address */
   4.606 +	pend = &node_memblk[num_node_memblks];
   4.607 +	for (p = &node_memblk[0]; p < pend; p++) {
   4.608 +		if (paddr < p->start_paddr)
   4.609 +			break;
   4.610 +	}
   4.611 +	if (p < pend) {
   4.612 +		for (q = pend - 1; q >= p; q--)
   4.613 +			*(q + 1) = *q;
   4.614 +	}
   4.615 +	p->start_paddr = paddr;
   4.616 +	p->size = size;
   4.617 +	p->nid = pxm;
   4.618 +	num_node_memblks++;
   4.619 +}
   4.620 +
   4.621 +void __init
   4.622 +acpi_numa_arch_fixup (void)
   4.623 +{
   4.624 +	int i, j, node_from, node_to;
   4.625 +
   4.626 +	/* If there's no SRAT, fix the phys_id */
   4.627 +	if (srat_num_cpus == 0) {
   4.628 +		node_cpuid[0].phys_id = hard_smp_processor_id();
   4.629 +		return;
   4.630 +	}
   4.631 +
   4.632 +	/* calculate total number of nodes in system from PXM bitmap */
   4.633 +	numnodes = 0;		/* init total nodes in system */
   4.634 +
   4.635 +	memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
   4.636 +	memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
   4.637 +	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
   4.638 +		if (pxm_bit_test(i)) {
   4.639 +			pxm_to_nid_map[i] = numnodes;
   4.640 +			node_set_online(numnodes);
   4.641 +			nid_to_pxm_map[numnodes++] = i;
   4.642 +		}
   4.643 +	}
   4.644 +
   4.645 +	/* set logical node id in memory chunk structure */
   4.646 +	for (i = 0; i < num_node_memblks; i++)
   4.647 +		node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
   4.648 +
   4.649 +	/* assign memory bank numbers for each chunk on each node */
   4.650 +	for (i = 0; i < numnodes; i++) {
   4.651 +		int bank;
   4.652 +
   4.653 +		bank = 0;
   4.654 +		for (j = 0; j < num_node_memblks; j++)
   4.655 +			if (node_memblk[j].nid == i)
   4.656 +				node_memblk[j].bank = bank++;
   4.657 +	}
   4.658 +
   4.659 +	/* set logical node id in cpu structure */
   4.660 +	for (i = 0; i < srat_num_cpus; i++)
   4.661 +		node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
   4.662 +
   4.663 +	printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes);
   4.664 +	printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks);
   4.665 +
   4.666 +	if (!slit_table) return;
   4.667 +	memset(numa_slit, -1, sizeof(numa_slit));
   4.668 +	for (i=0; i<slit_table->localities; i++) {
   4.669 +		if (!pxm_bit_test(i))
   4.670 +			continue;
   4.671 +		node_from = pxm_to_nid_map[i];
   4.672 +		for (j=0; j<slit_table->localities; j++) {
   4.673 +			if (!pxm_bit_test(j))
   4.674 +				continue;
   4.675 +			node_to = pxm_to_nid_map[j];
   4.676 +			node_distance(node_from, node_to) =
   4.677 +				slit_table->entry[i*slit_table->localities + j];
   4.678 +		}
   4.679 +	}
   4.680 +
   4.681 +#ifdef SLIT_DEBUG
   4.682 +	printk("ACPI 2.0 SLIT locality table:\n");
   4.683 +	for (i = 0; i < numnodes; i++) {
   4.684 +		for (j = 0; j < numnodes; j++)
   4.685 +			printk("%03d ", node_distance(i,j));
   4.686 +		printk("\n");
   4.687 +	}
   4.688 +#endif
   4.689 +}
   4.690 +#endif /* CONFIG_ACPI_NUMA */
   4.691 +
   4.692 +#if 0
   4.693 +unsigned int
   4.694 +acpi_register_gsi (u32 gsi, int polarity, int trigger)
   4.695 +{
   4.696 +	return acpi_register_irq(gsi, polarity, trigger);
   4.697 +}
   4.698 +EXPORT_SYMBOL(acpi_register_gsi);
   4.699 +static int __init
   4.700 +acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
   4.701 +{
   4.702 +	struct acpi_table_header *fadt_header;
   4.703 +	struct fadt_descriptor_rev2 *fadt;
   4.704 +
   4.705 +	if (!phys_addr || !size)
   4.706 +		return -EINVAL;
   4.707 +
   4.708 +	fadt_header = (struct acpi_table_header *) __va(phys_addr);
   4.709 +	if (fadt_header->revision != 3)
   4.710 +		return -ENODEV;		/* Only deal with ACPI 2.0 FADT */
   4.711 +
   4.712 +	fadt = (struct fadt_descriptor_rev2 *) fadt_header;
   4.713 +
   4.714 +	if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
   4.715 +		acpi_kbd_controller_present = 0;
   4.716 +
   4.717 +	if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
   4.718 +		acpi_legacy_devices = 1;
   4.719 +
   4.720 +	acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
   4.721 +	return 0;
   4.722 +}
   4.723 +#endif
   4.724  
   4.725  unsigned long __init
   4.726  acpi_find_rsdp (void)
   4.727  {
   4.728 -	unsigned long		rsdp_phys = 0;
   4.729 +	unsigned long rsdp_phys = 0;
   4.730  
   4.731 -	/*
   4.732 -	 * Scan memory looking for the RSDP signature. First search EBDA (low
   4.733 -	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
   4.734 -	 */
   4.735 -	rsdp_phys = acpi_scan_rsdp (0, 0x400);
   4.736 -	if (!rsdp_phys)
   4.737 -		rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF);
   4.738 -
   4.739 +	if (efi.acpi20)
   4.740 +		rsdp_phys = __pa(efi.acpi20);
   4.741 +	else if (efi.acpi)
   4.742 +		printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
   4.743  	return rsdp_phys;
   4.744  }
   4.745  
   4.746 -
   4.747 -/*
   4.748 - * acpi_boot_init()
   4.749 - *  called from setup_arch(), always.
   4.750 - *	1. maps ACPI tables for later use
   4.751 - *	2. enumerates lapics
   4.752 - *	3. enumerates io-apics
   4.753 - *
   4.754 - * side effects:
   4.755 - * 	acpi_lapic = 1 if LAPIC found
   4.756 - *	acpi_ioapic = 1 if IOAPIC found
   4.757 - *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
   4.758 - *	if acpi_blacklisted() acpi_disabled = 1;
   4.759 - *	acpi_irq_model=...
   4.760 - *	...
   4.761 - *
   4.762 - * return value: (currently ignored)
   4.763 - *	0: success
   4.764 - *	!0: failure
   4.765 - */
   4.766 +#if 0
   4.767  int __init
   4.768  acpi_boot_init (void)
   4.769  {
   4.770 -	int			result = 0;
   4.771 -
   4.772 -	if (acpi_disabled && !acpi_ht)
   4.773 -		return(1);
   4.774  
   4.775  	/*
   4.776 -	 * The default interrupt routing model is PIC (8259).  This gets
   4.777 -	 * overriden if IOAPICs are enumerated (below).
   4.778 -	 */
   4.779 -	acpi_irq_model = ACPI_IRQ_MODEL_PIC;
   4.780 -
   4.781 -	/* 
   4.782 -	 * Initialize the ACPI boot-time table parser.
   4.783 -	 */
   4.784 -	result = acpi_table_init();
   4.785 -	if (result) {
   4.786 -#ifndef XEN
   4.787 -// hack for now, FIXME later
   4.788 -		acpi_disabled = 1;
   4.789 -#endif
   4.790 -		return result;
   4.791 -	}
   4.792 -
   4.793 -	result = acpi_blacklisted();
   4.794 -	if (result) {
   4.795 -		printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
   4.796 -#ifndef XEN
   4.797 -// hack for now, FIXME later
   4.798 -		acpi_disabled = 1;
   4.799 -#endif
   4.800 -		return result;
   4.801 -	}
   4.802 -
   4.803 -#ifdef CONFIG_X86_LOCAL_APIC
   4.804 -
   4.805 -	/* 
   4.806  	 * MADT
   4.807  	 * ----
   4.808  	 * Parse the Multiple APIC Description Table (MADT), if exists.
   4.809 -	 * Note that this table provides platform SMP configuration 
   4.810 +	 * Note that this table provides platform SMP configuration
   4.811  	 * information -- the successor to MPS tables.
   4.812  	 */
   4.813  
   4.814 -	result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
   4.815 -	if (!result) {
   4.816 -		return 0;
   4.817 -	}
   4.818 -	else if (result < 0) {
   4.819 -		printk(KERN_ERR PREFIX "Error parsing MADT\n");
   4.820 -		return result;
   4.821 -	}
   4.822 -	else if (result > 1) 
   4.823 -		printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n");
   4.824 -
   4.825 -	/* 
   4.826 -	 * Local APIC
   4.827 -	 * ----------
   4.828 -	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
   4.829 -	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
   4.830 -	 */
   4.831 -
   4.832 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
   4.833 -	if (result < 0) {
   4.834 -		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
   4.835 -		return result;
   4.836 +	if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
   4.837 +		printk(KERN_ERR PREFIX "Can't find MADT\n");
   4.838 +		goto skip_madt;
   4.839  	}
   4.840  
   4.841 -	mp_register_lapic_address(acpi_lapic_addr);
   4.842 -
   4.843 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic);
   4.844 -	if (!result) { 
   4.845 -		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
   4.846 -		/* TBD: Cleanup to allow fallback to MPS */
   4.847 -		return -ENODEV;
   4.848 -	}
   4.849 -	else if (result < 0) {
   4.850 -		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
   4.851 -		/* TBD: Cleanup to allow fallback to MPS */
   4.852 -		return result;
   4.853 -	}
   4.854 +	/* Local APIC */
   4.855  
   4.856 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
   4.857 -	if (result < 0) {
   4.858 -		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   4.859 -		/* TBD: Cleanup to allow fallback to MPS */
   4.860 -		return result;
   4.861 -	}
   4.862 +	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
   4.863 +		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
   4.864  
   4.865 -	acpi_lapic = 1;
   4.866 -
   4.867 -#endif /*CONFIG_X86_LOCAL_APIC*/
   4.868 +	if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1)
   4.869 +		printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
   4.870  
   4.871 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   4.872 +	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0)
   4.873 +		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   4.874  
   4.875 -	/* 
   4.876 -	 * I/O APIC 
   4.877 -	 * --------
   4.878 -	 */
   4.879 +	/* I/O APIC */
   4.880 +
   4.881 +	if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
   4.882 +		printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
   4.883 +
   4.884 +	/* System-Level Interrupt Routing */
   4.885 +
   4.886 +	if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
   4.887 +		printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
   4.888 +
   4.889 +	if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
   4.890 +		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
   4.891 +
   4.892 +	if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
   4.893 +		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   4.894 +  skip_madt:
   4.895  
   4.896  	/*
   4.897 -	 * ACPI interpreter is required to complete interrupt setup,
   4.898 -	 * so if it is off, don't enumerate the io-apics with ACPI.
   4.899 -	 * If MPS is present, it will handle them,
   4.900 -	 * otherwise the system will stay in PIC mode
   4.901 -	 */
   4.902 -	if (acpi_disabled || acpi_noirq) {
   4.903 -		return 1;
   4.904 -	}
   4.905 -
   4.906 -	/*
   4.907 -	 * if "noapic" boot option, don't look for IO-APICs
   4.908 +	 * FADT says whether a legacy keyboard controller is present.
   4.909 +	 * The FADT also contains an SCI_INT line, by which the system
   4.910 +	 * gets interrupts such as power and sleep buttons.  If it's not
   4.911 +	 * on a Legacy interrupt, it needs to be setup.
   4.912  	 */
   4.913 -	if (ioapic_setup_disabled()) {
   4.914 -		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
   4.915 -			"due to 'noapic' option.\n");
   4.916 -		return 1;
   4.917 -        }
   4.918 -
   4.919 +	if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
   4.920 +		printk(KERN_ERR PREFIX "Can't find FADT\n");
   4.921  
   4.922 -	result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
   4.923 -	if (!result) { 
   4.924 -		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
   4.925 -		return -ENODEV;
   4.926 -	}
   4.927 -	else if (result < 0) {
   4.928 -		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
   4.929 -		return result;
   4.930 +#ifdef CONFIG_SMP
   4.931 +	if (available_cpus == 0) {
   4.932 +		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
   4.933 +		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
   4.934 +		smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
   4.935 +		available_cpus = 1; /* We've got at least one of these, no? */
   4.936  	}
   4.937 -
   4.938 -	/* Build a default routing table for legacy (ISA) interrupts. */
   4.939 -	mp_config_acpi_legacy_irqs();
   4.940 -
   4.941 -	result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
   4.942 -	if (result < 0) {
   4.943 -		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
   4.944 -		/* TBD: Cleanup to allow fallback to MPS */
   4.945 -		return result;
   4.946 -	}
   4.947 +	smp_boot_data.cpu_count = available_cpus;
   4.948  
   4.949 -	result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
   4.950 -	if (result < 0) {
   4.951 -		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   4.952 -		/* TBD: Cleanup to allow fallback to MPS */
   4.953 -		return result;
   4.954 +	smp_build_cpu_map();
   4.955 +# ifdef CONFIG_ACPI_NUMA
   4.956 +	if (srat_num_cpus == 0) {
   4.957 +		int cpu, i = 1;
   4.958 +		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
   4.959 +			if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
   4.960 +				node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
   4.961  	}
   4.962 -
   4.963 -	acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
   4.964 +	build_cpu_to_node_map();
   4.965 +# endif
   4.966 +#endif
   4.967 +	/* Make boot-up look pretty */
   4.968 +	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
   4.969 +	return 0;
   4.970 +}
   4.971 +int
   4.972 +acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
   4.973 +{
   4.974 +	int vector;
   4.975  
   4.976 -	acpi_irq_balance_set(NULL);
   4.977 -
   4.978 -	acpi_ioapic = 1;
   4.979 +	if (has_8259 && gsi < 16)
   4.980 +		*irq = isa_irq_to_vector(gsi);
   4.981 +	else {
   4.982 +		vector = gsi_to_vector(gsi);
   4.983 +		if (vector == -1)
   4.984 +			return -1;
   4.985  
   4.986 -	if (acpi_lapic && acpi_ioapic)
   4.987 -		smp_found_config = 1;
   4.988 -
   4.989 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   4.990 -
   4.991 +		*irq = vector;
   4.992 +	}
   4.993  	return 0;
   4.994  }
   4.995  
   4.996 -#endif /*CONFIG_ACPI_BOOT*/
   4.997 -
   4.998 -#ifdef	CONFIG_ACPI_BUS
   4.999 -/*
  4.1000 - * "acpi_pic_sci=level" (current default)
  4.1001 - * programs the PIC-mode SCI to Level Trigger.
  4.1002 - * (NO-OP if the BIOS set Level Trigger already)
  4.1003 - *
  4.1004 - * If a PIC-mode SCI is not recogznied or gives spurious IRQ7's
  4.1005 - * it may require Edge Trigger -- use "acpi_pic_sci=edge"
  4.1006 - * (NO-OP if the BIOS set Edge Trigger already)
  4.1007 - *
  4.1008 - * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
  4.1009 - * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
  4.1010 - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
  4.1011 - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
  4.1012 - */
  4.1013 -
  4.1014 -static __initdata int	acpi_pic_sci_trigger;	/* 0: level, 1: edge */
  4.1015 -
  4.1016 -void __init
  4.1017 -acpi_pic_sci_set_trigger(unsigned int irq)
  4.1018 +int
  4.1019 +acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
  4.1020  {
  4.1021 -	unsigned char mask = 1 << (irq & 7);
  4.1022 -	unsigned int port = 0x4d0 + (irq >> 3);
  4.1023 -	unsigned char val = inb(port);
  4.1024 -
  4.1025 -	
  4.1026 -	printk(PREFIX "IRQ%d SCI:", irq);
  4.1027 -	if (!(val & mask)) {
  4.1028 -		printk(" Edge");
  4.1029 -
  4.1030 -		if (!acpi_pic_sci_trigger) {
  4.1031 -			printk(" set to Level");
  4.1032 -			outb(val | mask, port);
  4.1033 -		}
  4.1034 -	} else {
  4.1035 -		printk(" Level");
  4.1036 -
  4.1037 -		if (acpi_pic_sci_trigger) {
  4.1038 -			printk(" set to Edge");
  4.1039 -			outb(val | mask, port);
  4.1040 -		}
  4.1041 -	}
  4.1042 -	printk(" Trigger.\n");
  4.1043 -}
  4.1044 -
  4.1045 -int __init
  4.1046 -acpi_pic_sci_setup(char *str)
  4.1047 -{
  4.1048 -	while (str && *str) {
  4.1049 -		if (strncmp(str, "level", 5) == 0)
  4.1050 -			acpi_pic_sci_trigger = 0;	/* force level trigger */
  4.1051 -		if (strncmp(str, "edge", 4) == 0)
  4.1052 -			acpi_pic_sci_trigger = 1;	/* force edge trigger */
  4.1053 -		str = strchr(str, ',');
  4.1054 -		if (str)
  4.1055 -			str += strspn(str, ", \t");
  4.1056 -	}
  4.1057 -	return 1;
  4.1058 -}
  4.1059 -
  4.1060 -__setup("acpi_pic_sci=", acpi_pic_sci_setup);
  4.1061 -
  4.1062 -#endif /* CONFIG_ACPI_BUS */
  4.1063 -
  4.1064 -
  4.1065 -
  4.1066 -/* --------------------------------------------------------------------------
  4.1067 -                              Low-Level Sleep Support
  4.1068 -   -------------------------------------------------------------------------- */
  4.1069 -
  4.1070 -#ifdef CONFIG_ACPI_SLEEP
  4.1071 -
  4.1072 -#define DEBUG
  4.1073 -
  4.1074 -#ifdef DEBUG
  4.1075 -#include <xen/serial.h>
  4.1076 -#endif
  4.1077 -
  4.1078 -/* address in low memory of the wakeup routine. */
  4.1079 -unsigned long acpi_wakeup_address = 0;
  4.1080 -
  4.1081 -/* new page directory that we will be using */
  4.1082 -static pmd_t *pmd;
  4.1083 -
  4.1084 -/* saved page directory */
  4.1085 -static pmd_t saved_pmd;
  4.1086 -
  4.1087 -/* page which we'll use for the new page directory */
  4.1088 -static pte_t *ptep;
  4.1089 -
  4.1090 -extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
  4.1091 +	if (has_8259 && gsi < 16)
  4.1092 +		return isa_irq_to_vector(gsi);
  4.1093  
  4.1094 -/*
  4.1095 - * acpi_create_identity_pmd
  4.1096 - *
  4.1097 - * Create a new, identity mapped pmd.
  4.1098 - *
  4.1099 - * Do this by creating new page directory, and marking all the pages as R/W
  4.1100 - * Then set it as the new Page Middle Directory.
  4.1101 - * And, of course, flush the TLB so it takes effect.
  4.1102 - *
  4.1103 - * We save the address of the old one, for later restoration.
  4.1104 - */
  4.1105 -static void acpi_create_identity_pmd (void)
  4.1106 -{
  4.1107 -	pgd_t *pgd;
  4.1108 -	int i;
  4.1109 -
  4.1110 -	ptep = (pte_t*)__get_free_page(GFP_KERNEL);
  4.1111 -
  4.1112 -	/* fill page with low mapping */
  4.1113 -	for (i = 0; i < PTRS_PER_PTE; i++)
  4.1114 -		set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
  4.1115 -
  4.1116 -	pgd = pgd_offset(current->active_mm, 0);
  4.1117 -	pmd = pmd_alloc(current->mm,pgd, 0);
  4.1118 -
  4.1119 -	/* save the old pmd */
  4.1120 -	saved_pmd = *pmd;
  4.1121 -
  4.1122 -	/* set the new one */
  4.1123 -	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep)));
  4.1124 -
  4.1125 -	/* flush the TLB */
  4.1126 -	local_flush_tlb();
  4.1127 -}
  4.1128 -
  4.1129 -/*
  4.1130 - * acpi_restore_pmd
  4.1131 - *
  4.1132 - * Restore the old pmd saved by acpi_create_identity_pmd and
  4.1133 - * free the page that said function alloc'd
  4.1134 - */
  4.1135 -static void acpi_restore_pmd (void)
  4.1136 -{
  4.1137 -	set_pmd(pmd, saved_pmd);
  4.1138 -	local_flush_tlb();
  4.1139 -	free_page((unsigned long)ptep);
  4.1140 +	return iosapic_register_intr(gsi,
  4.1141 +			(polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
  4.1142 +			(trigger == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
  4.1143  }
  4.1144 -
  4.1145 -/**
  4.1146 - * acpi_save_state_mem - save kernel state
  4.1147 - *
  4.1148 - * Create an identity mapped page table and copy the wakeup routine to
  4.1149 - * low memory.
  4.1150 - */
  4.1151 -int acpi_save_state_mem (void)
  4.1152 -{
  4.1153 -	acpi_create_identity_pmd();
  4.1154 -	acpi_copy_wakeup_routine(acpi_wakeup_address);
  4.1155 -
  4.1156 -	return 0;
  4.1157 -}
  4.1158 -
  4.1159 -/**
  4.1160 - * acpi_save_state_disk - save kernel state to disk
  4.1161 - *
  4.1162 - */
  4.1163 -int acpi_save_state_disk (void)
  4.1164 -{
  4.1165 -	return 1;
  4.1166 -}
  4.1167 -
  4.1168 -/*
  4.1169 - * acpi_restore_state
  4.1170 - */
  4.1171 -void acpi_restore_state_mem (void)
  4.1172 -{
  4.1173 -	acpi_restore_pmd();
  4.1174 -}
  4.1175 -
  4.1176 -/**
  4.1177 - * acpi_reserve_bootmem - do _very_ early ACPI initialisation
  4.1178 - *
  4.1179 - * We allocate a page in low memory for the wakeup
  4.1180 - * routine for when we come back from a sleep state. The
  4.1181 - * runtime allocator allows specification of <16M pages, but not
  4.1182 - * <1M pages.
  4.1183 - */
  4.1184 -void __init acpi_reserve_bootmem(void)
  4.1185 -{
  4.1186 -	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
  4.1187 -	printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
  4.1188 -}
  4.1189 -
  4.1190 -void do_suspend_lowlevel_s4bios(int resume)
  4.1191 -{
  4.1192 -	if (!resume) {
  4.1193 -		save_processor_context();
  4.1194 -		acpi_save_register_state((unsigned long)&&acpi_sleep_done);
  4.1195 -		acpi_enter_sleep_state_s4bios();
  4.1196 -		return;
  4.1197 -	}
  4.1198 -acpi_sleep_done:
  4.1199 -	restore_processor_context();
  4.1200 -}
  4.1201 -
  4.1202 -
  4.1203 -#endif /*CONFIG_ACPI_SLEEP*/
  4.1204 -
  4.1205 +EXPORT_SYMBOL(acpi_register_irq);
  4.1206 +#endif
  4.1207 +#endif /* CONFIG_ACPI_BOOT */
     5.1 --- a/xen/arch/ia64/asm-offsets.c	Mon May 23 15:22:15 2005 +0000
     5.2 +++ b/xen/arch/ia64/asm-offsets.c	Mon May 23 15:29:59 2005 +0000
     5.3 @@ -9,6 +9,9 @@
     5.4  #include <asm/processor.h>
     5.5  #include <asm/ptrace.h>
     5.6  #include <public/xen.h>
     5.7 +#ifdef CONFIG_VTI
     5.8 +#include <asm/tlb.h>
     5.9 +#endif // CONFIG_VTI
    5.10  
    5.11  #define task_struct exec_domain
    5.12  
    5.13 @@ -93,6 +96,24 @@ void foo(void)
    5.14  	DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
    5.15  	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
    5.16  	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
    5.17 +#ifdef CONFIG_VTI
    5.18 +	DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct xen_regs, r4));
    5.19 +	DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct xen_regs, r5));
    5.20 +	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct xen_regs, r6));
    5.21 +	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct xen_regs, r7));
    5.22 +	DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct xen_regs, cr_iipa));
    5.23 +	DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr));
    5.24 +	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs, eml_unat));
    5.25 +	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs, rfi_pfs));
    5.26 +	DEFINE(RFI_IIP_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_iip));
    5.27 +	DEFINE(RFI_IPSR_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_ipsr));
    5.28 +	DEFINE(RFI_IFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_ifs));
    5.29 +	DEFINE(RFI_PFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_pfs));
    5.30 +	DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr5));
    5.31 +	DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr6));
    5.32 +	DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr7));
    5.33 +	DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mpta));
    5.34 +#endif  //CONFIG_VTI
    5.35  	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
    5.36  	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
    5.37  	DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
    5.38 @@ -164,6 +185,13 @@ void foo(void)
    5.39  
    5.40  	BLANK();
    5.41  
    5.42 +#ifdef  CONFIG_VTI
    5.43 +	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain, arch.arch_vmx.vpd));
    5.44 +	DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
    5.45 +	DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
    5.46 +
    5.47 +	BLANK();
    5.48 +#endif  //CONFIG_VTI
    5.49  	//DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
    5.50  	//DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
    5.51  	//DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
     6.1 --- a/xen/arch/ia64/dom_fw.c	Mon May 23 15:22:15 2005 +0000
     6.2 +++ b/xen/arch/ia64/dom_fw.c	Mon May 23 15:29:59 2005 +0000
     6.3 @@ -13,6 +13,7 @@
     6.4  #include <asm/io.h>
     6.5  #include <asm/pal.h>
     6.6  #include <asm/sal.h>
     6.7 +#include <xen/acpi.h>
     6.8  
     6.9  #include <asm/dom_fw.h>
    6.10  
    6.11 @@ -297,6 +298,71 @@ void print_md(efi_memory_desc_t *md)
    6.12  #endif
    6.13  }
    6.14  
    6.15 +#define LSAPIC_NUM 16	// TEMP
    6.16 +static u32 lsapic_flag=1;
    6.17 +
    6.18 +/* Provide only one LP to guest */
    6.19 +static int 
    6.20 +acpi_update_lsapic (acpi_table_entry_header *header)
    6.21 +{
    6.22 +	struct acpi_table_lsapic *lsapic;
    6.23 +
    6.24 +	lsapic = (struct acpi_table_lsapic *) header;
    6.25 +	if (!lsapic)
    6.26 +		return -EINVAL;
    6.27 +
    6.28 +	if (lsapic->flags.enabled && lsapic_flag) {
    6.29 +		printk("enable lsapic entry: 0x%lx\n", (u64)lsapic);
    6.30 +		lsapic_flag = 0; /* disable all the following processros */
    6.31 +	} else if (lsapic->flags.enabled) {
    6.32 +		printk("DISABLE lsapic entry: 0x%lx\n", (u64)lsapic);
    6.33 +		lsapic->flags.enabled = 0;
    6.34 +	} else
    6.35 +		printk("lsapic entry is already disabled: 0x%lx\n", (u64)lsapic);
    6.36 +
    6.37 +	return 0;
    6.38 +}
    6.39 +
    6.40 +static int
    6.41 +acpi_update_madt_checksum (unsigned long phys_addr, unsigned long size)
    6.42 +{
    6.43 +	u8 checksum=0;
    6.44 +    	u8* ptr;
    6.45 +	int len;
    6.46 +	struct acpi_table_madt* acpi_madt;
    6.47 +
    6.48 +	if (!phys_addr || !size)
    6.49 +		return -EINVAL;
    6.50 +
    6.51 +	acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
    6.52 +	acpi_madt->header.checksum=0;
    6.53 +
    6.54 +    	/* re-calculate MADT checksum */
    6.55 +	ptr = (u8*)acpi_madt;
    6.56 +    	len = acpi_madt->header.length;
    6.57 +	while (len>0){
    6.58 +		checksum = (u8)( checksum + (*ptr++) );
    6.59 +		len--;
    6.60 +	}
    6.61 +    	acpi_madt->header.checksum = 0x0 - checksum;	
    6.62 +	
    6.63 +	return 0;
    6.64 +}
    6.65 +
    6.66 +/* base is physical address of acpi table */
    6.67 +void touch_acpi_table(void)
    6.68 +{
    6.69 +	u64 count = 0;
    6.70 +	count = acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, NR_CPUS);
    6.71 +	if ( count < 1)
    6.72 +		printk("Error parsing MADT - no LAPIC entires\n");
    6.73 +	printk("Total %d lsapic entry\n", count);
    6.74 +	acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
    6.75 +
    6.76 +	return;
    6.77 +}
    6.78 +
    6.79 +
    6.80  struct ia64_boot_param *
    6.81  dom_fw_init (struct domain *d, char *args, int arglen, char *fw_mem, int fw_mem_size)
    6.82  {
    6.83 @@ -414,6 +480,9 @@ dom_fw_init (struct domain *d, char *arg
    6.84  			printf(" MPS=%0xlx",efi_tables[i].table);
    6.85  			i++;
    6.86  		}
    6.87 +
    6.88 +		touch_acpi_table();
    6.89 +
    6.90  		if (efi.acpi20) {
    6.91  			efi_tables[i].guid = ACPI_20_TABLE_GUID;
    6.92  			efi_tables[i].table = __pa(efi.acpi20);
     7.1 --- a/xen/arch/ia64/domain.c	Mon May 23 15:22:15 2005 +0000
     7.2 +++ b/xen/arch/ia64/domain.c	Mon May 23 15:29:59 2005 +0000
     7.3 @@ -3,6 +3,11 @@
     7.4   *
     7.5   *  Pentium III FXSR, SSE support
     7.6   *	Gareth Hughes <gareth@valinux.com>, May 2000
     7.7 + *
     7.8 + *  Copyright (C) 2005 Intel Co
     7.9 + *	Kun Tian (Kevin Tian) <kevin.tian@intel.com>
    7.10 + *
    7.11 + * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add CONFIG_VTI domain support
    7.12   */
    7.13  
    7.14  #include <xen/config.h>
    7.15 @@ -32,12 +37,23 @@
    7.16  #include <asm/asm-offsets.h>  /* for IA64_THREAD_INFO_SIZE */
    7.17  
    7.18  #include <asm/vcpu.h>   /* for function declarations */
    7.19 +#ifdef CONFIG_VTI
    7.20 +#include <asm/vmx.h>
    7.21 +#include <asm/vmx_vcpu.h>
    7.22 +#include <asm/pal.h>
    7.23 +#endif // CONFIG_VTI
    7.24  
    7.25  #define CONFIG_DOMAIN0_CONTIGUOUS
    7.26  unsigned long dom0_start = -1L;
    7.27 +#ifdef CONFIG_VTI
    7.28  unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
    7.29  //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    7.30 +unsigned long dom0_align = 256*1024*1024;
    7.31 +#else // CONFIG_VTI
    7.32 +unsigned long dom0_size = 256*1024*1024; //FIXME: Should be configurable
    7.33 +//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    7.34  unsigned long dom0_align = 64*1024*1024;
    7.35 +#endif // CONFIG_VTI
    7.36  #ifdef DOMU_BUILD_STAGING
    7.37  unsigned long domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
    7.38  unsigned long domU_staging_start;
    7.39 @@ -151,6 +167,58 @@ void arch_free_exec_domain_struct(struct
    7.40  	free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
    7.41  }
    7.42  
    7.43 +#ifdef CONFIG_VTI
    7.44 +void arch_do_createdomain(struct exec_domain *ed)
    7.45 +{
    7.46 +	struct domain *d = ed->domain;
    7.47 +	struct thread_info *ti = alloc_thread_info(ed);
    7.48 +
    7.49 +	/* If domain is VMX domain, shared info area is created
    7.50 +	 * by domain and then domain notifies HV by specific hypercall.
    7.51 +	 * If domain is xenolinux, shared info area is created by
    7.52 +	 * HV.
    7.53 +	 * Since we have no idea about whether domain is VMX now,
    7.54 +	 * (dom0 when parse and domN when build), postpone possible
    7.55 +	 * allocation.
    7.56 +	 */
    7.57 +
    7.58 +	/* FIXME: Because full virtual cpu info is placed in this area,
    7.59 +	 * it's unlikely to put it into one shareinfo page. Later
    7.60 +	 * need split vcpu context from vcpu_info and conforms to
    7.61 +	 * normal xen convention.
    7.62 +	 */
    7.63 +	d->shared_info = NULL;
    7.64 +	ed->vcpu_info = (void *)alloc_xenheap_page();
    7.65 +	if (!ed->vcpu_info) {
    7.66 +   		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
    7.67 +   		while (1);
    7.68 +	}
    7.69 +	memset(ed->vcpu_info, 0, PAGE_SIZE);
    7.70 +
    7.71 +	/* Clear thread_info to clear some important fields, like preempt_count */
    7.72 +	memset(ti, 0, sizeof(struct thread_info));
    7.73 +
    7.74 +	/* Allocate per-domain vTLB and vhpt */
    7.75 +	ed->arch.vtlb = init_domain_tlb(ed);
    7.76 +
    7.77 +	/* Physical->machine page table will be allocated when 
    7.78 +	 * final setup, since we have no the maximum pfn number in 
    7.79 +	 * this stage
    7.80 +	 */
    7.81 +
    7.82 +	/* FIXME: This is identity mapped address for xenheap. 
    7.83 +	 * Do we need it at all?
    7.84 +	 */
    7.85 +	d->xen_vastart = 0xf000000000000000;
    7.86 +	d->xen_vaend = 0xf300000000000000;
    7.87 +	d->breakimm = 0x1000;
    7.88 +
    7.89 +	// stay on kernel stack because may get interrupts!
    7.90 +	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    7.91 +	// to user stack
    7.92 +	ed->arch._thread.on_ustack = 0;
    7.93 +}
    7.94 +#else // CONFIG_VTI
    7.95  void arch_do_createdomain(struct exec_domain *ed)
    7.96  {
    7.97  	struct domain *d = ed->domain;
    7.98 @@ -193,6 +261,7 @@ void arch_do_createdomain(struct exec_do
    7.99  	// to user stack
   7.100  	ed->arch._thread.on_ustack = 0;
   7.101  }
   7.102 +#endif // CONFIG_VTI
   7.103  
   7.104  void arch_do_boot_vcpu(struct exec_domain *p)
   7.105  {
   7.106 @@ -216,6 +285,70 @@ void domain_relinquish_resources(struct 
   7.107  	dummy();
   7.108  }
   7.109  
   7.110 +#ifdef CONFIG_VTI
   7.111 +void new_thread(struct exec_domain *ed,
   7.112 +                unsigned long start_pc,
   7.113 +                unsigned long start_stack,
   7.114 +                unsigned long start_info)
   7.115 +{
   7.116 +	struct domain *d = ed->domain;
   7.117 +	struct switch_stack *sw;
   7.118 +	struct xen_regs *regs;
   7.119 +	struct ia64_boot_param *bp;
   7.120 +	extern char ia64_ret_from_clone;
   7.121 +	extern char saved_command_line[];
   7.122 +	//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
   7.123 +
   7.124 +
   7.125 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   7.126 +	if (d == dom0) start_pc += dom0_start;
   7.127 +#endif
   7.128 +	regs = (struct xen_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
   7.129 +	sw = (struct switch_stack *) regs - 1;
   7.130 +	/* Sanity Clear */
   7.131 +	memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
   7.132 +
   7.133 +	if (VMX_DOMAIN(ed)) {
   7.134 +		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
   7.135 +		regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
   7.136 +	} else {
   7.137 +		regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   7.138 +			| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
   7.139 +			& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
   7.140 +		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   7.141 +	}
   7.142 +	regs->cr_iip = start_pc;
   7.143 +	regs->ar_rsc = 0x0;
   7.144 +	regs->cr_ifs = 0x0;
   7.145 +	regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
   7.146 +	sw->ar_bspstore = (unsigned long)ed + IA64_RBS_OFFSET;
   7.147 +	printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   7.148 +		ed,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
   7.149 +	printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
   7.150 +
   7.151 +	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   7.152 +	ed->arch._thread.ksp = (unsigned long) sw - 16;
   7.153 +	printk("new_thread, about to call init_all_rr\n");
   7.154 +	if (VMX_DOMAIN(ed)) {
   7.155 +		vmx_init_all_rr(ed);
   7.156 +	} else
   7.157 +		init_all_rr(ed);
   7.158 +	// set up boot parameters (and fake firmware)
   7.159 +	printk("new_thread, about to call dom_fw_setup\n");
   7.160 +	VMX_VPD(ed,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   7.161 +	printk("new_thread, done with dom_fw_setup\n");
   7.162 +
   7.163 +	if (VMX_DOMAIN(ed)) {
   7.164 +		/* Virtual processor context setup */
   7.165 +		VMX_VPD(ed, vpsr) = IA64_PSR_BN;
   7.166 +		VPD_CR(ed, dcr) = 0;
   7.167 +	} else {
   7.168 +		// don't forget to set this!
   7.169 +		ed->vcpu_info->arch.banknum = 1;
   7.170 +	}
   7.171 +}
   7.172 +#else // CONFIG_VTI
   7.173 +
   7.174  // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
   7.175  // and linux/arch/ia64/kernel/process.c:kernel_thread()
   7.176  void new_thread(struct exec_domain *ed,
   7.177 @@ -272,6 +405,7 @@ printk("new_thread, done with dom_fw_set
   7.178  	// don't forget to set this!
   7.179  	ed->vcpu_info->arch.banknum = 1;
   7.180  }
   7.181 +#endif // CONFIG_VTI
   7.182  
   7.183  static struct page * map_new_domain0_page(unsigned long mpaddr)
   7.184  {
   7.185 @@ -599,6 +733,213 @@ domU_staging_write_32(unsigned long at, 
   7.186  }
   7.187  #endif
   7.188  
   7.189 +#ifdef CONFIG_VTI
   7.190 +/* Up to whether domain is vmx one, different context may be setup
   7.191 + * here.
   7.192 + */
   7.193 +void
   7.194 +post_arch_do_create_domain(struct exec_domain *ed, int vmx_domain)
   7.195 +{
   7.196 +    struct domain *d = ed->domain;
   7.197 +
   7.198 +    if (!vmx_domain) {
   7.199 +	d->shared_info = (void*)alloc_xenheap_page();
   7.200 +	if (!d->shared_info)
   7.201 +		panic("Allocate share info for non-vmx domain failed.\n");
   7.202 +	d->shared_info_va = 0xfffd000000000000;
   7.203 +
   7.204 +	printk("Build shared info for non-vmx domain\n");
   7.205 +	build_shared_info(d);
   7.206 +	/* Setup start info area */
   7.207 +    }
   7.208 +}
   7.209 +
   7.210 +/* For VMX domain, this is invoked when kernel model in domain
   7.211 + * request actively
   7.212 + */
   7.213 +void build_shared_info(struct domain *d)
   7.214 +{
   7.215 +    int i;
   7.216 +
   7.217 +    /* Set up shared-info area. */
   7.218 +    update_dom_time(d);
   7.219 +    d->shared_info->domain_time = 0;
   7.220 +
   7.221 +    /* Mask all upcalls... */
   7.222 +    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   7.223 +        d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   7.224 +
   7.225 +    /* ... */
   7.226 +}
   7.227 +
   7.228 +extern unsigned long running_on_sim;
   7.229 +unsigned int vmx_dom0 = 0;
   7.230 +int construct_dom0(struct domain *d, 
   7.231 +	               unsigned long image_start, unsigned long image_len, 
   7.232 +	               unsigned long initrd_start, unsigned long initrd_len,
   7.233 +	               char *cmdline)
   7.234 +{
   7.235 +    char *dst;
   7.236 +    int i, rc;
   7.237 +    unsigned long pfn, mfn;
   7.238 +    unsigned long nr_pt_pages;
   7.239 +    unsigned long count;
   7.240 +    unsigned long alloc_start, alloc_end;
   7.241 +    struct pfn_info *page = NULL;
   7.242 +    start_info_t *si;
   7.243 +    struct exec_domain *ed = d->exec_domain[0];
   7.244 +    struct domain_setup_info dsi;
   7.245 +    unsigned long p_start;
   7.246 +    unsigned long pkern_start;
   7.247 +    unsigned long pkern_entry;
   7.248 +    unsigned long pkern_end;
   7.249 +    unsigned long ret;
   7.250 +    unsigned long progress = 0;
   7.251 +
   7.252 +//printf("construct_dom0: starting\n");
   7.253 +    /* Sanity! */
   7.254 +#ifndef CLONE_DOMAIN0
   7.255 +    if ( d != dom0 ) 
   7.256 +        BUG();
   7.257 +    if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
   7.258 +        BUG();
   7.259 +#endif
   7.260 +
   7.261 +    printk("##Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   7.262 +    memset(&dsi, 0, sizeof(struct domain_setup_info));
   7.263 +
   7.264 +    printk("*** LOADING DOMAIN 0 ***\n");
   7.265 +
   7.266 +    alloc_start = dom0_start;
   7.267 +    alloc_end = dom0_start + dom0_size;
   7.268 +    d->tot_pages = d->max_pages = (alloc_end - alloc_start)/PAGE_SIZE;
   7.269 +    image_start = __va(ia64_boot_param->initrd_start);
   7.270 +    image_len = ia64_boot_param->initrd_size;
   7.271 +
   7.272 +    dsi.image_addr = (unsigned long)image_start;
   7.273 +    dsi.image_len  = image_len;
   7.274 +    rc = parseelfimage(&dsi);
   7.275 +    if ( rc != 0 )
   7.276 +        return rc;
   7.277 +
   7.278 +    /* Temp workaround */
   7.279 +    if (running_on_sim)
   7.280 +	dsi.xen_elf_image = 1;
   7.281 +
   7.282 +    if ((!vmx_enabled) && !dsi.xen_elf_image) {
   7.283 +	printk("Lack of hardware support for unmodified vmx dom0\n");
   7.284 +	panic("");
   7.285 +    }
   7.286 +
   7.287 +    if (vmx_enabled && !dsi.xen_elf_image) {
   7.288 +	printk("Dom0 is vmx domain!\n");
   7.289 +	vmx_dom0 = 1;
   7.290 +    }
   7.291 +
   7.292 +    p_start = dsi.v_start;
   7.293 +    pkern_start = dsi.v_kernstart;
   7.294 +    pkern_end = dsi.v_kernend;
   7.295 +    pkern_entry = dsi.v_kernentry;
   7.296 +
   7.297 +    printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",
   7.298 +	p_start,pkern_start,pkern_end,pkern_entry);
   7.299 +
   7.300 +    if ( (p_start & (PAGE_SIZE-1)) != 0 )
   7.301 +    {
   7.302 +        printk("Initial guest OS must load to a page boundary.\n");
   7.303 +        return -EINVAL;
   7.304 +    }
   7.305 +
   7.306 +    printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
   7.307 +           " Kernel image:  %lx->%lx\n"
   7.308 +           " Entry address: %lx\n"
   7.309 +           " Init. ramdisk:   (NOT IMPLEMENTED YET)\n",
   7.310 +           pkern_start, pkern_end, pkern_entry);
   7.311 +
   7.312 +    if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
   7.313 +    {
   7.314 +        printk("Initial guest OS requires too much space\n"
   7.315 +               "(%luMB is greater than %luMB limit)\n",
   7.316 +               (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
   7.317 +        return -ENOMEM;
   7.318 +    }
   7.319 +
   7.320 +    // Other sanity check about Dom0 image
   7.321 +
   7.322 +    /* Construct a frame-allocation list for the initial domain, since these
   7.323 +     * pages are allocated by boot allocator and pfns are not set properly
   7.324 +     */
   7.325 +    for ( mfn = (alloc_start>>PAGE_SHIFT); 
   7.326 +          mfn < (alloc_end>>PAGE_SHIFT); 
   7.327 +          mfn++ )
   7.328 +    {
   7.329 +        page = &frame_table[mfn];
   7.330 +        page_set_owner(page, d);
   7.331 +        page->u.inuse.type_info = 0;
   7.332 +        page->count_info        = PGC_allocated | 1;
   7.333 +        list_add_tail(&page->list, &d->page_list);
   7.334 +
   7.335 +	/* Construct 1:1 mapping */
   7.336 +	machine_to_phys_mapping[mfn] = mfn;
   7.337 +    }
   7.338 +
   7.339 +    post_arch_do_create_domain(ed, vmx_dom0);
   7.340 +
   7.341 +    /* Load Dom0 image to its own memory */
   7.342 +    loaddomainelfimage(d,image_start);
   7.343 +
   7.344 +    /* Copy the initial ramdisk. */
   7.345 +
   7.346 +    /* Sync d/i cache conservatively */
   7.347 +    ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   7.348 +    if (ret != PAL_STATUS_SUCCESS)
   7.349 +            panic("PAL CACHE FLUSH failed for dom0.\n");
   7.350 +    printk("Sync i/d cache for dom0 image SUCC\n");
   7.351 +
   7.352 +    /* Physical mode emulation initialization, including
   7.353 +     * emulation ID allcation and related memory request
   7.354 +     */
   7.355 +    physical_mode_init(ed);
   7.356 +    /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
   7.357 +     * for dom0
   7.358 +     */
   7.359 +    d->arch.pmt = NULL;
   7.360 +
   7.361 +    /* Give up the VGA console if DOM0 is configured to grab it. */
   7.362 +    if (cmdline != NULL)
   7.363 +    	console_endboot(strstr(cmdline, "tty0") != NULL);
   7.364 +
   7.365 +    /* VMX specific construction for Dom0, if hardware supports VMX
   7.366 +     * and Dom0 is unmodified image
   7.367 +     */
   7.368 +    printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   7.369 +    if (vmx_dom0)
   7.370 +	vmx_final_setup_domain(dom0);
   7.371 +    
   7.372 +    /* vpd is ready now */
   7.373 +    vlsapic_reset(ed);
   7.374 +    vtm_init(ed);
   7.375 +
   7.376 +    set_bit(_DOMF_constructed, &d->domain_flags);
   7.377 +    new_thread(ed, pkern_entry, 0, 0);
   7.378 +
   7.379 +    // FIXME: Hack for keyboard input
   7.380 +#ifdef CLONE_DOMAIN0
   7.381 +if (d == dom0)
   7.382 +#endif
   7.383 +    serial_input_init();
   7.384 +    if (d == dom0) {
   7.385 +    	ed->vcpu_info->arch.delivery_mask[0] = -1L;
   7.386 +    	ed->vcpu_info->arch.delivery_mask[1] = -1L;
   7.387 +    	ed->vcpu_info->arch.delivery_mask[2] = -1L;
   7.388 +    	ed->vcpu_info->arch.delivery_mask[3] = -1L;
   7.389 +    }
   7.390 +    else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
   7.391 +
   7.392 +    return 0;
   7.393 +}
   7.394 +#else //CONFIG_VTI
   7.395 +
   7.396  int construct_dom0(struct domain *d, 
   7.397  	               unsigned long image_start, unsigned long image_len, 
   7.398  	               unsigned long initrd_start, unsigned long initrd_len,
   7.399 @@ -771,6 +1112,7 @@ if (d == dom0)
   7.400  
   7.401  	return 0;
   7.402  }
   7.403 +#endif // CONFIG_VTI
   7.404  
   7.405  // FIXME: When dom0 can construct domains, this goes away (or is rewritten)
   7.406  int construct_domU(struct domain *d,
     8.1 --- a/xen/arch/ia64/lib/Makefile	Mon May 23 15:22:15 2005 +0000
     8.2 +++ b/xen/arch/ia64/lib/Makefile	Mon May 23 15:29:59 2005 +0000
     8.3 @@ -9,7 +9,7 @@ OBJS := __divsi3.o __udivsi3.o __modsi3.
     8.4  	bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o	\
     8.5  	clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o	\
     8.6  	flush.o ip_fast_csum.o do_csum.o copy_user.o			\
     8.7 -	memset.o strlen.o memcpy.o swiotlb.o
     8.8 +	memset.o strlen.o memcpy.o 
     8.9  
    8.10  default: $(OBJS)
    8.11  	$(LD) -r -o ia64lib.o $(OBJS)
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/ia64/mmio.c	Mon May 23 15:29:59 2005 +0000
     9.3 @@ -0,0 +1,325 @@
     9.4 +
     9.5 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     9.6 +/*
     9.7 + * mmio.c: MMIO emulation components.
     9.8 + * Copyright (c) 2004, Intel Corporation.
     9.9 + *
    9.10 + * This program is free software; you can redistribute it and/or modify it
    9.11 + * under the terms and conditions of the GNU General Public License,
    9.12 + * version 2, as published by the Free Software Foundation.
    9.13 + *
    9.14 + * This program is distributed in the hope it will be useful, but WITHOUT
    9.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    9.17 + * more details.
    9.18 + *
    9.19 + * You should have received a copy of the GNU General Public License along with
    9.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    9.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    9.22 + *
    9.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
    9.24 + *  Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
    9.25 + */
    9.26 +
    9.27 +#include <linux/sched.h>
    9.28 +#include <asm/tlb.h>
    9.29 +#include <asm/vmx_mm_def.h>
    9.30 +#include <asm/gcc_intrin.h>
    9.31 +#include <xen/interrupt.h>
    9.32 +#include <asm/vmx_vcpu.h>
    9.33 +
    9.34 +struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
    9.35 +{
    9.36 +    int     i;
    9.37 +    for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
    9.38 +        if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
    9.39 +            return &mio_base[i];
    9.40 +    }
    9.41 +    return NULL;
    9.42 +}
    9.43 +
    9.44 +
    9.45 +extern void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma);
    9.46 +static inline void mmio_write(VCPU *vcpu, void *src, u64 dest_pa, size_t s, int ma)
    9.47 +{
    9.48 +    struct virutal_platform_def *v_plat;
    9.49 +    struct mmio_list    *mio;
    9.50 +    
    9.51 +    v_plat = vmx_vcpu_get_plat(vcpu);
    9.52 +    mio = lookup_mmio(dest_pa, v_plat->mmio);
    9.53 +    if ( mio == NULL ) 
    9.54 +        panic ("Wrong address for MMIO\n");
    9.55 +    
    9.56 +    switch (mio->iot) {
    9.57 +    case PIB_MMIO:
    9.58 +        pib_write(vcpu, src, dest_pa - v_plat->pib_base, s, ma);
    9.59 +        break;
    9.60 +    case VGA_BUFF:
    9.61 +    case CHIPSET_IO:
    9.62 +    case LOW_MMIO:
    9.63 +    case LEGACY_IO:
    9.64 +    case IO_SAPIC:
    9.65 +    default:
    9.66 +        break;
    9.67 +    }
    9.68 +    return;
    9.69 +}
    9.70 +
    9.71 +static inline void mmio_read(VCPU *vcpu, u64 src_pa, void *dest, size_t s, int ma)
    9.72 +{
    9.73 +    struct virutal_platform_def *v_plat;
    9.74 +    struct mmio_list    *mio;
    9.75 +    
    9.76 +    v_plat = vmx_vcpu_get_plat(vcpu);
    9.77 +    mio = lookup_mmio(src_pa, v_plat->mmio);
    9.78 +    if ( mio == NULL ) 
    9.79 +        panic ("Wrong address for MMIO\n");
    9.80 +    
    9.81 +    switch (mio->iot) {
    9.82 +    case PIB_MMIO:
    9.83 +        pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    9.84 +        break;
    9.85 +    case VGA_BUFF:
    9.86 +    case CHIPSET_IO:
    9.87 +    case LOW_MMIO:
    9.88 +    case LEGACY_IO:
    9.89 +    case IO_SAPIC:
    9.90 +    default:
    9.91 +        break;
    9.92 +    }
    9.93 +    return;
    9.94 +}
    9.95 +
    9.96 +/*
    9.97 + * Read or write data in guest virtual address mode.
    9.98 + */
    9.99 + 
   9.100 +void
   9.101 +memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
   9.102 +{
   9.103 +    uint64_t pa;
   9.104 +
   9.105 +    if (!vtlb->nomap)
   9.106 +        panic("Normal memory write shouldn't go to this point!");
   9.107 +    pa = PPN_2_PA(vtlb->ppn);
   9.108 +    pa += POFFSET((u64)dest, vtlb->ps);
   9.109 +    mmio_write (vcpu, src, pa, s, vtlb->ma);
   9.110 +}
   9.111 +
   9.112 +
   9.113 +void
   9.114 +memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s)
   9.115 +{
   9.116 +    uint64_t pa = (uint64_t)dest;
   9.117 +    int    ma;
   9.118 +
   9.119 +    if ( pa & (1UL <<63) ) {
   9.120 +        // UC
   9.121 +        ma = 4;
   9.122 +        pa <<=1; 
   9.123 +        pa >>=1;
   9.124 +    } 
   9.125 +    else {
   9.126 +        // WBL
   9.127 +        ma = 0;     // using WB for WBL
   9.128 +    }
   9.129 +    mmio_write (vcpu, src, pa, s, ma);
   9.130 +}
   9.131 +
   9.132 +void
   9.133 +memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
   9.134 +{
   9.135 +    uint64_t pa;
   9.136 +
   9.137 +    if (!vtlb->nomap)
   9.138 +        panic("Normal memory write shouldn't go to this point!");
   9.139 +    pa = PPN_2_PA(vtlb->ppn);
   9.140 +    pa += POFFSET((u64)src, vtlb->ps);
   9.141 +    
   9.142 +    mmio_read(vcpu, pa, dest, s, vtlb->ma);
   9.143 +}
   9.144 +
   9.145 +void
   9.146 +memread_p(VCPU *vcpu, void *src, void *dest, size_t s)
   9.147 +{
   9.148 +    uint64_t pa = (uint64_t)src;
   9.149 +    int    ma;
   9.150 +
   9.151 +    if ( pa & (1UL <<63) ) {
   9.152 +        // UC
   9.153 +        ma = 4;
   9.154 +        pa <<=1; 
   9.155 +        pa >>=1;
   9.156 +    } 
   9.157 +    else {
   9.158 +        // WBL
   9.159 +        ma = 0;     // using WB for WBL
   9.160 +    }
   9.161 +    mmio_read(vcpu, pa, dest, s, ma);
   9.162 +}
   9.163 +
   9.164 +#define	PIB_LOW_HALF(ofst)	!(ofst&(1<<20))
   9.165 +#define PIB_OFST_INTA           0x1E0000
   9.166 +#define PIB_OFST_XTP            0x1E0008
   9.167 +
   9.168 +
   9.169 +/*
   9.170 + * Deliver IPI message. (Only U-VP is supported now)
   9.171 + *  offset: address offset to IPI space.
   9.172 + *  value:  deliver value.
   9.173 + */
   9.174 +static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
   9.175 +{
   9.176 +#ifdef  IPI_DEBUG
   9.177 +  printf ("deliver_ipi %lx %lx\n",dm,vector);
   9.178 +#endif
   9.179 +    switch ( dm ) {
   9.180 +    case 0:     // INT
   9.181 +        vmx_vcpu_pend_interrupt (vcpu, vector);
   9.182 +        break;
   9.183 +    case 2:     // PMI
   9.184 +        // TODO -- inject guest PMI
   9.185 +        panic ("Inject guest PMI!\n");
   9.186 +        break;
   9.187 +    case 4:     // NMI
   9.188 +        vmx_vcpu_pend_interrupt (vcpu, 2);     
   9.189 +        break;
   9.190 +    case 5:     // INIT
   9.191 +        // TODO -- inject guest INIT
   9.192 +        panic ("Inject guest INIT!\n");
   9.193 +        break;
   9.194 +    case 7:     // ExtINT
   9.195 +        vmx_vcpu_pend_interrupt (vcpu, 0);     
   9.196 +        break;
   9.197 +        
   9.198 +    case 1:
   9.199 +    case 3:
   9.200 +    case 6:
   9.201 +    default:
   9.202 +        panic ("Deliver reserved IPI!\n");
   9.203 +        break;
   9.204 +    }   
   9.205 +}
   9.206 +
   9.207 +/*
   9.208 + * TODO: Use hash table for the lookup.
   9.209 + */
   9.210 +static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
   9.211 +{
   9.212 +	int   i;
   9.213 +	VCPU  *vcpu;
   9.214 +	LID	  lid;
   9.215 +	
   9.216 +	for (i=0; i<MAX_VIRT_CPUS; i++) {
   9.217 +		vcpu = d->exec_domain[i];
   9.218 +		lid.val = VPD_CR(vcpu, lid);
   9.219 +		if ( lid.id == id && lid.eid == eid ) {
   9.220 +		    return vcpu;
   9.221 +		}
   9.222 +	}
   9.223 +	return NULL;
   9.224 +}
   9.225 +
   9.226 +/*
   9.227 + * execute write IPI op.
   9.228 + */
   9.229 +static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
   9.230 +{
   9.231 +    VCPU   *target_cpu;
   9.232 +    
   9.233 +    target_cpu = lid_2_vcpu(vcpu->domain, 
   9.234 +    				((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   9.235 +    if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
   9.236 +    if ( target_cpu == vcpu ) {
   9.237 +    	// IPI to self
   9.238 +        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
   9.239 +                ((ipi_d_t)value).vector);
   9.240 +        return 1;
   9.241 +    }
   9.242 +    else {
   9.243 +    	// TODO: send Host IPI to inject guest SMP IPI interruption
   9.244 +        panic ("No SM-VP supported!\n");
   9.245 +        return 0;
   9.246 +    }
   9.247 +}
   9.248 +
   9.249 +void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
   9.250 +{
   9.251 +    
   9.252 +    switch (pib_off) {
   9.253 +    case PIB_OFST_INTA:
   9.254 +        panic("Undefined write on PIB INTA\n");
   9.255 +        break;
   9.256 +    case PIB_OFST_XTP:
   9.257 +        if ( s == 1 && ma == 4 /* UC */) {
   9.258 +            vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
   9.259 +        }
   9.260 +        else {
   9.261 +            panic("Undefined write on PIB XTP\n");
   9.262 +        }
   9.263 +        break;
   9.264 +    default:
   9.265 +        if ( PIB_LOW_HALF(pib_off) ) {   // lower half
   9.266 +            if ( s != 8 || ma != 0x4 /* UC */ ) {
   9.267 +                panic("Undefined IPI-LHF write!\n");
   9.268 +            }
   9.269 +            else {
   9.270 +                write_ipi(vcpu, pib_off, *(uint64_t *)src);
   9.271 +                // TODO for SM-VP
   9.272 +            }
   9.273 +        }
   9.274 +        else {      // upper half
   9.275 +            printf("IPI-UHF write %lx\n",pib_off);
   9.276 +            panic("Not support yet for SM-VP\n");
   9.277 +        }
   9.278 +        break;
   9.279 +    }
   9.280 +}
   9.281 +
   9.282 +void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
   9.283 +{
   9.284 +    switch (pib_off) {
   9.285 +    case PIB_OFST_INTA:
   9.286 +        // todo --- emit on processor system bus.
   9.287 +        if ( s == 1 && ma == 4) { // 1 byte load
   9.288 +            // TODO: INTA read from IOSAPIC
   9.289 +        }
   9.290 +        else {
   9.291 +            panic("Undefined read on PIB INTA\n");
   9.292 +        }
   9.293 +        break;
   9.294 +    case PIB_OFST_XTP:
   9.295 +        if ( s == 1 && ma == 4) {
   9.296 +            *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
   9.297 +        }
   9.298 +        else {
   9.299 +            panic("Undefined read on PIB XTP\n");
   9.300 +        }
   9.301 +        break;
   9.302 +    default:
   9.303 +        if ( PIB_LOW_HALF(pib_off) ) {   // lower half
   9.304 +            if ( s != 8 || ma != 4 ) {
   9.305 +                panic("Undefined IPI-LHF read!\n");
   9.306 +            }
   9.307 +            else {
   9.308 +#ifdef  IPI_DEBUG
   9.309 +                printf("IPI-LHF read %lx\n",pib_off);
   9.310 +#endif
   9.311 +                *(uint64_t *)dest = 0;  // TODO for SM-VP
   9.312 +            }
   9.313 +        }
   9.314 +        else {      // upper half
   9.315 +            if ( s != 1 || ma != 4 ) {
   9.316 +                panic("Undefined PIB-UHF read!\n");
   9.317 +            }
   9.318 +            else {
   9.319 +#ifdef  IPI_DEBUG
   9.320 +                printf("IPI-UHF read %lx\n",pib_off);
   9.321 +#endif
   9.322 +                *(uint8_t *)dest = 0;   // TODO for SM-VP
   9.323 +            }
   9.324 +        }
   9.325 +        break;
   9.326 +    }
   9.327 +}
   9.328 +
    10.1 --- a/xen/arch/ia64/patch/linux-2.6.11/entry.S	Mon May 23 15:22:15 2005 +0000
    10.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.S	Mon May 23 15:29:59 2005 +0000
    10.3 @@ -1,5 +1,5 @@
    10.4 ---- ../../linux-2.6.11/arch/ia64/kernel/entry.S	2005-03-02 00:37:50.000000000 -0700
    10.5 -+++ arch/ia64/entry.S	2005-04-29 14:54:13.000000000 -0600
    10.6 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.S	2005-03-01 23:37:50.000000000 -0800
    10.7 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/entry.S	2005-05-18 12:40:51.000000000 -0700
    10.8  @@ -46,6 +46,7 @@
    10.9   
   10.10   #include "minstate.h"
   10.11 @@ -73,19 +73,23 @@
   10.12   
   10.13   GLOBAL_ENTRY(ia64_ret_from_clone)
   10.14   	PT_REGS_UNWIND_INFO(0)
   10.15 -@@ -604,6 +626,11 @@
   10.16 +@@ -604,6 +626,15 @@
   10.17   	 */
   10.18   	br.call.sptk.many rp=ia64_invoke_schedule_tail
   10.19   }
   10.20  +#ifdef XEN
   10.21  +	// new domains are cloned but not exec'ed so switch to user mode here
   10.22  +	cmp.ne pKStk,pUStk=r0,r0
   10.23 ++#ifdef CONFIG_VTI
   10.24 ++	br.cond.spnt ia64_leave_hypervisor
   10.25 ++#else // CONFIG_VTI
   10.26  +	br.cond.spnt ia64_leave_kernel
   10.27 ++#endif // CONFIG_VTI
   10.28  +#else
   10.29   .ret8:
   10.30   	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
   10.31   	;;
   10.32 -@@ -614,6 +641,7 @@
   10.33 +@@ -614,6 +645,7 @@
   10.34   	;;
   10.35   	cmp.ne p6,p0=r2,r0
   10.36   (p6)	br.cond.spnt .strace_check_retval
   10.37 @@ -93,7 +97,7 @@
   10.38   	;;					// added stop bits to prevent r8 dependency
   10.39   END(ia64_ret_from_clone)
   10.40   	// fall through
   10.41 -@@ -700,19 +728,25 @@
   10.42 +@@ -700,19 +732,25 @@
   10.43   .work_processed_syscall:
   10.44   	adds r2=PT(LOADRS)+16,r12
   10.45   	adds r3=PT(AR_BSPSTORE)+16,r12
   10.46 @@ -119,7 +123,7 @@
   10.47   	;;
   10.48   	// start restoring the state saved on the kernel stack (struct pt_regs):
   10.49   	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
   10.50 -@@ -757,7 +791,11 @@
   10.51 +@@ -757,7 +795,11 @@
   10.52   	;;
   10.53   	ld8.fill r12=[r2]	// restore r12 (sp)
   10.54   	ld8.fill r15=[r3]	// restore r15
   10.55 @@ -131,7 +135,7 @@
   10.56   	;;
   10.57   (pUStk)	ld4 r3=[r3]		// r3 = cpu_data->phys_stacked_size_p8
   10.58   (pUStk) st1 [r14]=r17
   10.59 -@@ -814,9 +852,18 @@
   10.60 +@@ -814,9 +856,18 @@
   10.61   (pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
   10.62   #endif
   10.63   .work_processed_kernel:
   10.64 @@ -150,7 +154,19 @@
   10.65   	adds r21=PT(PR)+16,r12
   10.66   	;;
   10.67   
   10.68 -@@ -838,7 +885,9 @@
   10.69 +@@ -828,17 +879,20 @@
   10.70 + 	ld8 r28=[r2],8		// load b6
   10.71 + 	adds r29=PT(R24)+16,r12
   10.72 + 
   10.73 +-	ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
   10.74 ++	ld8.fill r16=[r3]
   10.75 + 	adds r30=PT(AR_CCV)+16,r12
   10.76 + (p6)	and r19=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
   10.77 + 	;;
   10.78 ++	adds r3=PT(AR_CSD)-PT(R16),r3
   10.79 + 	ld8.fill r24=[r29]
   10.80 + 	ld8 r15=[r30]		// load ar.ccv
   10.81 + (p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?
   10.82   	;;
   10.83   	ld8 r29=[r2],16		// load b7
   10.84   	ld8 r30=[r3],16		// load ar.csd
   10.85 @@ -160,7 +176,7 @@
   10.86   	;;
   10.87   	ld8 r31=[r2],16		// load ar.ssd
   10.88   	ld8.fill r8=[r3],16
   10.89 -@@ -934,7 +983,11 @@
   10.90 +@@ -934,7 +988,11 @@
   10.91   	shr.u r18=r19,16	// get byte size of existing "dirty" partition
   10.92   	;;
   10.93   	mov r16=ar.bsp		// get existing backing store pointer
   10.94 @@ -172,7 +188,7 @@
   10.95   	;;
   10.96   	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
   10.97   (pKStk)	br.cond.dpnt skip_rbs_switch
   10.98 -@@ -1069,6 +1122,7 @@
   10.99 +@@ -1069,6 +1127,7 @@
  10.100   	mov pr=r31,-1		// I0
  10.101   	rfi			// B
  10.102   
  10.103 @@ -180,7 +196,7 @@
  10.104   	/*
  10.105   	 * On entry:
  10.106   	 *	r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
  10.107 -@@ -1130,6 +1184,7 @@
  10.108 +@@ -1130,6 +1189,7 @@
  10.109   	ld8 r8=[r2]
  10.110   	ld8 r10=[r3]
  10.111   	br.cond.sptk.many .work_processed_syscall	// re-check
  10.112 @@ -188,7 +204,7 @@
  10.113   
  10.114   END(ia64_leave_kernel)
  10.115   
  10.116 -@@ -1166,6 +1221,7 @@
  10.117 +@@ -1166,6 +1226,7 @@
  10.118   	br.ret.sptk.many rp
  10.119   END(ia64_invoke_schedule_tail)
  10.120   
  10.121 @@ -196,7 +212,7 @@
  10.122   	/*
  10.123   	 * Setup stack and call do_notify_resume_user().  Note that pSys and pNonSys need to
  10.124   	 * be set up by the caller.  We declare 8 input registers so the system call
  10.125 -@@ -1264,6 +1320,7 @@
  10.126 +@@ -1264,6 +1325,7 @@
  10.127   	mov ar.unat=r9
  10.128   	br.many b7
  10.129   END(sys_rt_sigreturn)
  10.130 @@ -204,7 +220,7 @@
  10.131   
  10.132   GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
  10.133   	.prologue
  10.134 -@@ -1278,6 +1335,7 @@
  10.135 +@@ -1278,6 +1340,7 @@
  10.136   	br.cond.sptk.many rp				// goes to ia64_leave_kernel
  10.137   END(ia64_prepare_handle_unaligned)
  10.138   
  10.139 @@ -212,7 +228,7 @@
  10.140   	//
  10.141   	// unw_init_running(void (*callback)(info, arg), void *arg)
  10.142   	//
  10.143 -@@ -1585,3 +1643,4 @@
  10.144 +@@ -1585,3 +1648,4 @@
  10.145   	data8 sys_ni_syscall
  10.146   
  10.147   	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.h	Mon May 23 15:29:59 2005 +0000
    11.3 @@ -0,0 +1,37 @@
    11.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.h	2005-03-01 23:38:07.000000000 -0800
    11.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/arch/ia64/entry.h	2005-05-18 14:00:53.000000000 -0700
    11.6 +@@ -7,6 +7,12 @@
    11.7 + #define PRED_LEAVE_SYSCALL	1 /* TRUE iff leave from syscall */
    11.8 + #define PRED_KERNEL_STACK	2 /* returning to kernel-stacks? */
    11.9 + #define PRED_USER_STACK		3 /* returning to user-stacks? */
   11.10 ++#ifdef CONFIG_VTI
   11.11 ++#define PRED_EMUL		2 /* Need to save r4-r7 for inst emulation */
   11.12 ++#define PRED_NON_EMUL		3 /* No need to save r4-r7 for normal path */
   11.13 ++#define PRED_BN0		6 /* Guest is in bank 0 */
   11.14 ++#define PRED_BN1		7 /* Guest is in bank 1 */
   11.15 ++#endif // CONFIG_VTI
   11.16 + #define PRED_SYSCALL		4 /* inside a system call? */
   11.17 + #define PRED_NON_SYSCALL	5 /* complement of PRED_SYSCALL */
   11.18 + 
   11.19 +@@ -17,12 +23,21 @@
   11.20 + # define pLvSys		PASTE(p,PRED_LEAVE_SYSCALL)
   11.21 + # define pKStk		PASTE(p,PRED_KERNEL_STACK)
   11.22 + # define pUStk		PASTE(p,PRED_USER_STACK)
   11.23 ++#ifdef CONFIG_VTI
   11.24 ++# define pEml		PASTE(p,PRED_EMUL)
   11.25 ++# define pNonEml	PASTE(p,PRED_NON_EMUL)
   11.26 ++# define pBN0		PASTE(p,PRED_BN0)
   11.27 ++# define pBN1		PASTE(p,PRED_BN1)
   11.28 ++#endif // CONFIG_VTI
   11.29 + # define pSys		PASTE(p,PRED_SYSCALL)
   11.30 + # define pNonSys	PASTE(p,PRED_NON_SYSCALL)
   11.31 + #endif
   11.32 + 
   11.33 + #define PT(f)		(IA64_PT_REGS_##f##_OFFSET)
   11.34 + #define SW(f)		(IA64_SWITCH_STACK_##f##_OFFSET)
   11.35 ++#ifdef CONFIG_VTI
   11.36 ++#define VPD(f)      (VPD_##f##_START_OFFSET)
   11.37 ++#endif // CONFIG_VTI
   11.38 + 
   11.39 + #define PT_REGS_SAVES(off)			\
   11.40 + 	.unwabi 3, 'i';				\
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h	Mon May 23 15:29:59 2005 +0000
    12.3 @@ -0,0 +1,69 @@
    12.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/gcc_intrin.h	2005-03-01 23:38:08.000000000 -0800
    12.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/gcc_intrin.h	2005-05-18 14:00:53.000000000 -0700
    12.6 +@@ -368,6 +368,66 @@
    12.7 + #define ia64_mf()	asm volatile ("mf" ::: "memory")
    12.8 + #define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
    12.9 + 
   12.10 ++#ifdef CONFIG_VTI
   12.11 ++/*
   12.12 ++ * Flushrs instruction stream.
   12.13 ++ */
   12.14 ++#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
   12.15 ++
   12.16 ++#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
   12.17 ++
   12.18 ++#define ia64_get_rsc()                          \
   12.19 ++({                                  \
   12.20 ++    unsigned long val;                     \
   12.21 ++    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
   12.22 ++    val;                               \
   12.23 ++})
   12.24 ++
   12.25 ++#define ia64_set_rsc(val)                       \
   12.26 ++    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
   12.27 ++
   12.28 ++#define ia64_get_bspstore()     \
   12.29 ++({                                  \
   12.30 ++    unsigned long val;                     \
   12.31 ++    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
   12.32 ++    val;                               \
   12.33 ++})
   12.34 ++
   12.35 ++#define ia64_set_bspstore(val)                       \
   12.36 ++    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
   12.37 ++
   12.38 ++#define ia64_get_rnat()     \
   12.39 ++({                                  \
   12.40 ++    unsigned long val;                     \
   12.41 ++    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
   12.42 ++    val;                               \
   12.43 ++})
   12.44 ++
   12.45 ++#define ia64_set_rnat(val)                       \
   12.46 ++    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
   12.47 ++
   12.48 ++#define ia64_ttag(addr)							\
   12.49 ++({										\
   12.50 ++	__u64 ia64_intri_res;							\
   12.51 ++	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
   12.52 ++	ia64_intri_res;								\
   12.53 ++})
   12.54 ++
   12.55 ++#define ia64_get_dcr()                          \
   12.56 ++({                                      \
   12.57 ++    __u64 result;                               \
   12.58 ++    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
   12.59 ++    result;                                 \
   12.60 ++})
   12.61 ++
   12.62 ++#define ia64_set_dcr(val)                           \
   12.63 ++({                                      \
   12.64 ++    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
   12.65 ++})
   12.66 ++
   12.67 ++#endif // CONFIG_VTI
   12.68 ++
   12.69 ++
   12.70 + #define ia64_invala() asm volatile ("invala" ::: "memory")
   12.71 + 
   12.72 + #define ia64_thash(addr)							\
    13.1 --- a/xen/arch/ia64/patch/linux-2.6.11/head.S	Mon May 23 15:22:15 2005 +0000
    13.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/head.S	Mon May 23 15:29:59 2005 +0000
    13.3 @@ -1,6 +1,62 @@
    13.4 ---- ../../linux-2.6.11/arch/ia64/kernel/head.S	2005-03-02 00:38:13.000000000 -0700
    13.5 -+++ arch/ia64/head.S	2005-04-28 10:51:19.000000000 -0600
    13.6 -@@ -187,7 +187,11 @@
    13.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/head.S	2005-03-01 23:38:13.000000000 -0800
    13.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/head.S	2005-05-18 12:40:50.000000000 -0700
    13.9 +@@ -76,21 +76,21 @@
   13.10 + 	 * We initialize all of them to prevent inadvertently assuming
   13.11 + 	 * something about the state of address translation early in boot.
   13.12 + 	 */
   13.13 +-	mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.14 ++	movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.15 + 	movl r7=(0<<61)
   13.16 +-	mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.17 ++	movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.18 + 	movl r9=(1<<61)
   13.19 +-	mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.20 ++	movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.21 + 	movl r11=(2<<61)
   13.22 +-	mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.23 ++	movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.24 + 	movl r13=(3<<61)
   13.25 +-	mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.26 ++	movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.27 + 	movl r15=(4<<61)
   13.28 +-	mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.29 ++	movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   13.30 + 	movl r17=(5<<61)
   13.31 +-	mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   13.32 ++	movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   13.33 + 	movl r19=(6<<61)
   13.34 +-	mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   13.35 ++	movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   13.36 + 	movl r21=(7<<61)
   13.37 + 	;;
   13.38 + 	mov rr[r7]=r6
   13.39 +@@ -129,8 +129,13 @@
   13.40 + 	/*
   13.41 + 	 * Switch into virtual mode:
   13.42 + 	 */
   13.43 ++#ifdef CONFIG_VTI
   13.44 ++	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
   13.45 ++		  |IA64_PSR_DI)
   13.46 ++#else // CONFIG_VTI
   13.47 + 	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
   13.48 + 		  |IA64_PSR_DI)
   13.49 ++#endif // CONFIG_VTI
   13.50 + 	;;
   13.51 + 	mov cr.ipsr=r16
   13.52 + 	movl r17=1f
   13.53 +@@ -143,7 +148,11 @@
   13.54 + 1:	// now we are in virtual mode
   13.55 + 
   13.56 + 	// set IVT entry point---can't access I/O ports without it
   13.57 ++#ifdef CONFIG_VTI
   13.58 ++    movl r3=vmx_ia64_ivt
   13.59 ++#else // CONFIG_VTI
   13.60 + 	movl r3=ia64_ivt
   13.61 ++#endif // CONFIG_VTI
   13.62 + 	;;
   13.63 + 	mov cr.iva=r3
   13.64 + 	movl r2=FPSR_DEFAULT
   13.65 +@@ -187,7 +196,11 @@
   13.66   	dep r18=0,r3,0,12
   13.67   	;;
   13.68   	or r18=r17,r18
   13.69 @@ -12,7 +68,23 @@
   13.70   	;;
   13.71   	mov r17=rr[r2]
   13.72   	shr.u r16=r3,IA64_GRANULE_SHIFT
   13.73 -@@ -227,7 +231,11 @@
   13.74 +@@ -207,8 +220,15 @@
   13.75 + 
   13.76 + .load_current:
   13.77 + 	// load the "current" pointer (r13) and ar.k6 with the current task
   13.78 ++#ifdef CONFIG_VTI
   13.79 ++	mov r21=r2		// virtual address
   13.80 ++	;;
   13.81 ++	bsw.1
   13.82 ++	;;
   13.83 ++#else // CONFIG_VTI
   13.84 + 	mov IA64_KR(CURRENT)=r2		// virtual address
   13.85 + 	mov IA64_KR(CURRENT_STACK)=r16
   13.86 ++#endif // CONFIG_VTI
   13.87 + 	mov r13=r2
   13.88 + 	/*
   13.89 + 	 * Reserve space at the top of the stack for "struct pt_regs".  Kernel threads
   13.90 +@@ -227,7 +247,11 @@
   13.91   	;;
   13.92   	mov ar.rsc=0x3		// place RSE in eager mode
   13.93   
   13.94 @@ -24,7 +96,7 @@
   13.95   (isBP)	movl r2=ia64_boot_param
   13.96   	;;
   13.97   (isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
   13.98 -@@ -254,7 +262,9 @@
   13.99 +@@ -254,7 +278,9 @@
  13.100   	br.call.sptk.many b0=console_print
  13.101   
  13.102   self:	hint @pause
  13.103 @@ -34,7 +106,7 @@
  13.104   END(_start)
  13.105   
  13.106   GLOBAL_ENTRY(ia64_save_debug_regs)
  13.107 -@@ -850,7 +860,11 @@
  13.108 +@@ -850,7 +876,11 @@
  13.109    * intermediate precision so that we can produce a full 64-bit result.
  13.110    */
  13.111   GLOBAL_ENTRY(sched_clock)
    14.1 --- a/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h	Mon May 23 15:22:15 2005 +0000
    14.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h	Mon May 23 15:29:59 2005 +0000
    14.3 @@ -1,10 +1,5 @@
    14.4 - hpsim_ssc.h |   19 +++++++++++++++++++
    14.5 - 1 files changed, 19 insertions(+)
    14.6 -
    14.7 -Index: linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h
    14.8 -===================================================================
    14.9 ---- linux-2.6.11.orig/arch/ia64/hp/sim/hpsim_ssc.h	2005-03-02 01:38:17.000000000 -0600
   14.10 -+++ linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h	2005-03-19 13:34:01.705520375 -0600
   14.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h	2005-03-01 23:38:17.000000000 -0800
   14.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/hpsim_ssc.h	2005-05-18 12:40:19.000000000 -0700
   14.13  @@ -33,4 +33,23 @@
   14.14    */
   14.15   extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ia64regs.h	Mon May 23 15:29:59 2005 +0000
    15.3 @@ -0,0 +1,38 @@
    15.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ia64regs.h	2005-03-01 23:38:07.000000000 -0800
    15.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ia64regs.h	2005-05-18 14:00:53.000000000 -0700
    15.6 +@@ -87,6 +87,35 @@
    15.7 + #define _IA64_REG_CR_LRR0	4176
    15.8 + #define _IA64_REG_CR_LRR1	4177
    15.9 + 
   15.10 ++#ifdef  CONFIG_VTI
   15.11 ++#define IA64_REG_CR_DCR   0
   15.12 ++#define IA64_REG_CR_ITM   1
   15.13 ++#define IA64_REG_CR_IVA   2
   15.14 ++#define IA64_REG_CR_PTA   8
   15.15 ++#define IA64_REG_CR_IPSR  16
   15.16 ++#define IA64_REG_CR_ISR   17
   15.17 ++#define IA64_REG_CR_IIP   19
   15.18 ++#define IA64_REG_CR_IFA   20
   15.19 ++#define IA64_REG_CR_ITIR  21
   15.20 ++#define IA64_REG_CR_IIPA  22
   15.21 ++#define IA64_REG_CR_IFS   23
   15.22 ++#define IA64_REG_CR_IIM   24
   15.23 ++#define IA64_REG_CR_IHA   25
   15.24 ++#define IA64_REG_CR_LID   64
   15.25 ++#define IA64_REG_CR_IVR   65
   15.26 ++#define IA64_REG_CR_TPR   66
   15.27 ++#define IA64_REG_CR_EOI   67
   15.28 ++#define IA64_REG_CR_IRR0  68
   15.29 ++#define IA64_REG_CR_IRR1  69
   15.30 ++#define IA64_REG_CR_IRR2  70
   15.31 ++#define IA64_REG_CR_IRR3  71
   15.32 ++#define IA64_REG_CR_ITV   72
   15.33 ++#define IA64_REG_CR_PMV   73
   15.34 ++#define IA64_REG_CR_CMCV  74
   15.35 ++#define IA64_REG_CR_LRR0  80
   15.36 ++#define IA64_REG_CR_LRR1  81
   15.37 ++#endif  //  CONFIG_VTI
   15.38 ++
   15.39 + /* Indirect Registers for getindreg() and setindreg() */
   15.40 + 
   15.41 + #define _IA64_REG_INDR_CPUID	9000	/* getindreg only */
    16.1 --- a/xen/arch/ia64/patch/linux-2.6.11/interrupt.h	Mon May 23 15:22:15 2005 +0000
    16.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/interrupt.h	Mon May 23 15:29:59 2005 +0000
    16.3 @@ -1,11 +1,6 @@
    16.4 - interrupt.h |    2 ++
    16.5 - 1 files changed, 2 insertions(+)
    16.6 -
    16.7 -Index: linux-2.6.11/include/linux/interrupt.h
    16.8 -===================================================================
    16.9 ---- linux-2.6.11.orig/include/linux/interrupt.h	2005-03-02 01:38:09.000000000 -0600
   16.10 -+++ linux-2.6.11/include/linux/interrupt.h	2005-03-19 13:41:00.739901125 -0600
   16.11 -@@ -33,6 +33,7 @@ typedef int irqreturn_t;
   16.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/linux/interrupt.h	2005-03-01 23:38:09.000000000 -0800
   16.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/linux/interrupt.h	2005-05-18 12:40:50.000000000 -0700
   16.14 +@@ -33,6 +33,7 @@
   16.15   #define IRQ_HANDLED	(1)
   16.16   #define IRQ_RETVAL(x)	((x) != 0)
   16.17   
   16.18 @@ -13,7 +8,7 @@ Index: linux-2.6.11/include/linux/interr
   16.19   struct irqaction {
   16.20   	irqreturn_t (*handler)(int, void *, struct pt_regs *);
   16.21   	unsigned long flags;
   16.22 -@@ -49,6 +50,7 @@ extern int request_irq(unsigned int,
   16.23 +@@ -49,6 +50,7 @@
   16.24   		       irqreturn_t (*handler)(int, void *, struct pt_regs *),
   16.25   		       unsigned long, const char *, void *);
   16.26   extern void free_irq(unsigned int, void *);
   16.27 @@ -21,3 +16,12 @@ Index: linux-2.6.11/include/linux/interr
   16.28   
   16.29   
   16.30   #ifdef CONFIG_GENERIC_HARDIRQS
   16.31 +@@ -121,7 +123,7 @@
   16.32 + };
   16.33 + 
   16.34 + asmlinkage void do_softirq(void);
   16.35 +-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   16.36 ++//extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   16.37 + extern void softirq_init(void);
   16.38 + #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
   16.39 + extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
    17.1 --- a/xen/arch/ia64/patch/linux-2.6.11/io.h	Mon May 23 15:22:15 2005 +0000
    17.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/io.h	Mon May 23 15:29:59 2005 +0000
    17.3 @@ -1,16 +1,11 @@
    17.4 - io.h |    4 ++++
    17.5 - 1 files changed, 4 insertions(+)
    17.6 -
    17.7 -Index: linux-2.6.11/include/asm-ia64/io.h
    17.8 -===================================================================
    17.9 ---- linux-2.6.11.orig/include/asm-ia64/io.h	2005-03-02 01:38:34.000000000 -0600
   17.10 -+++ linux-2.6.11/include/asm-ia64/io.h	2005-03-19 13:42:06.541900818 -0600
   17.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/io.h	2005-03-01 23:38:34.000000000 -0800
   17.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/io.h	2005-05-18 12:40:50.000000000 -0700
   17.13  @@ -23,7 +23,11 @@
   17.14   #define __SLOW_DOWN_IO	do { } while (0)
   17.15   #define SLOW_DOWN_IO	do { } while (0)
   17.16   
   17.17  +#ifdef XEN
   17.18 -+#define __IA64_UNCACHED_OFFSET	0xdffc000000000000UL	/* region 6 */
   17.19 ++#define __IA64_UNCACHED_OFFSET	0xd000000000000000UL	/* region 6 */
   17.20  +#else
   17.21   #define __IA64_UNCACHED_OFFSET	0xc000000000000000UL	/* region 6 */
   17.22  +#endif
    18.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Mon May 23 15:22:15 2005 +0000
    18.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Mon May 23 15:29:59 2005 +0000
    18.3 @@ -1,5 +1,5 @@
    18.4 ---- ../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c	2005-03-02 00:38:07.000000000 -0700
    18.5 -+++ arch/ia64/irq_ia64.c	2005-04-29 16:05:30.000000000 -0600
    18.6 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c	2005-03-01 23:38:07.000000000 -0800
    18.7 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/irq_ia64.c	2005-05-18 12:40:51.000000000 -0700
    18.8  @@ -106,6 +106,9 @@
    18.9   	unsigned long saved_tpr;
   18.10   
   18.11 @@ -20,3 +20,99 @@
   18.12   			__do_IRQ(local_vector_to_irq(vector), regs);
   18.13   
   18.14   			/*
   18.15 +@@ -167,6 +173,95 @@
   18.16 + 	irq_exit();
   18.17 + }
   18.18 + 
   18.19 ++#ifdef  CONFIG_VTI
   18.20 ++/*
   18.21 ++ * That's where the IVT branches when we get an external
   18.22 ++ * interrupt. This branches to the correct hardware IRQ handler via
   18.23 ++ * function ptr.
   18.24 ++ */
   18.25 ++void
   18.26 ++vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
   18.27 ++{
   18.28 ++	unsigned long saved_tpr;
   18.29 ++	int	wake_dom0 = 0;
   18.30 ++
   18.31 ++
   18.32 ++#if IRQ_DEBUG
   18.33 ++	{
   18.34 ++		unsigned long bsp, sp;
   18.35 ++
   18.36 ++		/*
   18.37 ++		 * Note: if the interrupt happened while executing in
   18.38 ++		 * the context switch routine (ia64_switch_to), we may
   18.39 ++		 * get a spurious stack overflow here.  This is
   18.40 ++		 * because the register and the memory stack are not
   18.41 ++		 * switched atomically.
   18.42 ++		 */
   18.43 ++		bsp = ia64_getreg(_IA64_REG_AR_BSP);
   18.44 ++		sp = ia64_getreg(_IA64_REG_AR_SP);
   18.45 ++
   18.46 ++		if ((sp - bsp) < 1024) {
   18.47 ++			static unsigned char count;
   18.48 ++			static long last_time;
   18.49 ++
   18.50 ++			if (jiffies - last_time > 5*HZ)
   18.51 ++				count = 0;
   18.52 ++			if (++count < 5) {
   18.53 ++				last_time = jiffies;
   18.54 ++				printk("ia64_handle_irq: DANGER: less than "
   18.55 ++				       "1KB of free stack space!!\n"
   18.56 ++				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
   18.57 ++			}
   18.58 ++		}
   18.59 ++	}
   18.60 ++#endif /* IRQ_DEBUG */
   18.61 ++
   18.62 ++	/*
   18.63 ++	 * Always set TPR to limit maximum interrupt nesting depth to
   18.64 ++	 * 16 (without this, it would be ~240, which could easily lead
   18.65 ++	 * to kernel stack overflows).
   18.66 ++	 */
   18.67 ++	irq_enter();
   18.68 ++	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
   18.69 ++	ia64_srlz_d();
   18.70 ++	while (vector != IA64_SPURIOUS_INT_VECTOR) {
   18.71 ++	    if (!IS_RESCHEDULE(vector)) {
   18.72 ++		ia64_setreg(_IA64_REG_CR_TPR, vector);
   18.73 ++		ia64_srlz_d();
   18.74 ++
   18.75 ++		if (vector != IA64_TIMER_VECTOR) {
   18.76 ++			/* FIXME: Leave IRQ re-route later */
   18.77 ++			vmx_vcpu_pend_interrupt(dom0->exec_domain[0],vector);
   18.78 ++			wake_dom0 = 1;
   18.79 ++		}
   18.80 ++		else {	// FIXME: Handle Timer only now
   18.81 ++			__do_IRQ(local_vector_to_irq(vector), regs);
   18.82 ++		}
   18.83 ++		
   18.84 ++		/*
   18.85 ++		 * Disable interrupts and send EOI:
   18.86 ++		 */
   18.87 ++		local_irq_disable();
   18.88 ++		ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
   18.89 ++	    }
   18.90 ++	    else {
   18.91 ++                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
   18.92 ++            }
   18.93 ++	    ia64_eoi();
   18.94 ++	    vector = ia64_get_ivr();
   18.95 ++	}
   18.96 ++	/*
   18.97 ++	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
   18.98 ++	 * handler needs to be able to wait for further keyboard interrupts, which can't
   18.99 ++	 * come through until ia64_eoi() has been done.
  18.100 ++	 */
  18.101 ++	irq_exit();
  18.102 ++	if ( wake_dom0 && current != dom0 ) 
  18.103 ++		domain_wake(dom0->exec_domain[0]);
  18.104 ++}
  18.105 ++#endif
  18.106 ++
  18.107 ++
  18.108 + #ifdef CONFIG_HOTPLUG_CPU
  18.109 + /*
  18.110 +  * This function emulates a interrupt processing when a cpu is about to be
    19.1 --- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h	Mon May 23 15:22:15 2005 +0000
    19.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/kregs.h	Mon May 23 15:29:59 2005 +0000
    19.3 @@ -1,18 +1,65 @@
    19.4 - kregs.h |    4 ++++
    19.5 - 1 files changed, 4 insertions(+)
    19.6 -
    19.7 -Index: linux-2.6.11/include/asm-ia64/kregs.h
    19.8 -===================================================================
    19.9 ---- linux-2.6.11.orig/include/asm-ia64/kregs.h	2005-03-02 01:37:49.000000000 -0600
   19.10 -+++ linux-2.6.11/include/asm-ia64/kregs.h	2005-03-19 13:44:24.362628092 -0600
   19.11 -@@ -31,6 +31,10 @@
   19.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h	2005-03-01 23:37:49.000000000 -0800
   19.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h	2005-05-18 12:40:50.000000000 -0700
   19.14 +@@ -29,8 +29,20 @@
   19.15 +  */
   19.16 + #define IA64_TR_KERNEL		0	/* itr0, dtr0: maps kernel image (code & data) */
   19.17   #define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
   19.18 ++#ifdef CONFIG_VTI
   19.19 ++#define IA64_TR_XEN_IN_DOM	6	/* itr6, dtr6: Double mapping for xen image in domain space */
   19.20 ++#endif // CONFIG_VTI
   19.21   #define IA64_TR_PERCPU_DATA	1	/* dtr1: percpu data */
   19.22   #define IA64_TR_CURRENT_STACK	2	/* dtr2: maps kernel's memory- & register-stacks */
   19.23  +#ifdef XEN
   19.24  +#define IA64_TR_SHARED_INFO	3	/* dtr3: page shared with domain */
   19.25  +#define	IA64_TR_VHPT		4	/* dtr4: vhpt */
   19.26 ++#ifdef CONFIG_VTI
   19.27 ++#define IA64_TR_VHPT_IN_DOM	5	/* dtr5: Double mapping for vhpt table in domain space */
   19.28 ++#define IA64_TR_RR7_SWITCH_STUB	7	/* dtr7: mapping for rr7 switch stub */
   19.29 ++#define IA64_TEMP_PHYSICAL	8	/* itr8, dtr8: temp mapping for guest physical memory 256M */
   19.30 ++#endif // CONFIG_VTI
   19.31  +#endif
   19.32   
   19.33   /* Processor status register bits: */
   19.34   #define IA64_PSR_BE_BIT		1
   19.35 +@@ -66,6 +78,9 @@
   19.36 + #define IA64_PSR_ED_BIT		43
   19.37 + #define IA64_PSR_BN_BIT		44
   19.38 + #define IA64_PSR_IA_BIT		45
   19.39 ++#ifdef CONFIG_VTI
   19.40 ++#define IA64_PSR_VM_BIT		46
   19.41 ++#endif // CONFIG_VTI
   19.42 + 
   19.43 + /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
   19.44 +    execve().  Only list flags here that need to be cleared/set for BOTH clone2() and
   19.45 +@@ -107,6 +122,9 @@
   19.46 + #define IA64_PSR_ED	(__IA64_UL(1) << IA64_PSR_ED_BIT)
   19.47 + #define IA64_PSR_BN	(__IA64_UL(1) << IA64_PSR_BN_BIT)
   19.48 + #define IA64_PSR_IA	(__IA64_UL(1) << IA64_PSR_IA_BIT)
   19.49 ++#ifdef CONFIG_VTI
   19.50 ++#define IA64_PSR_VM	(__IA64_UL(1) << IA64_PSR_VM_BIT)
   19.51 ++#endif // CONFIG_VTI
   19.52 + 
   19.53 + /* User mask bits: */
   19.54 + #define IA64_PSR_UM	(IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
   19.55 +@@ -160,4 +178,21 @@
   19.56 + #define IA64_ISR_CODE_LFETCH	4
   19.57 + #define IA64_ISR_CODE_PROBEF	5
   19.58 + 
   19.59 ++#ifdef CONFIG_VTI
   19.60 ++/* Interruption Function State */
   19.61 ++#define IA64_IFS_V_BIT		63
   19.62 ++#define IA64_IFS_V	(__IA64_UL(1) << IA64_IFS_V_BIT)
   19.63 ++
   19.64 ++/* Page Table Address */
   19.65 ++#define IA64_PTA_VE_BIT 0
   19.66 ++#define IA64_PTA_SIZE_BIT 2
   19.67 ++#define IA64_PTA_VF_BIT 8
   19.68 ++#define IA64_PTA_BASE_BIT 15
   19.69 ++
   19.70 ++#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
   19.71 ++#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
   19.72 ++#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
   19.73 ++#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
   19.74 ++#endif // CONFIG_VTI
   19.75 ++
   19.76 + #endif /* _ASM_IA64_kREGS_H */
    20.1 --- a/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h	Mon May 23 15:22:15 2005 +0000
    20.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h	Mon May 23 15:29:59 2005 +0000
    20.3 @@ -1,10 +1,5 @@
    20.4 - mca_asm.h |   11 +++++++++++
    20.5 - 1 files changed, 11 insertions(+)
    20.6 -
    20.7 -Index: linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h
    20.8 -===================================================================
    20.9 ---- linux-2.6.11-xendiffs.orig/include/asm-ia64/mca_asm.h	2005-03-02 01:38:38.000000000 -0600
   20.10 -+++ linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h	2005-04-06 22:41:57.392411032 -0500
   20.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/mca_asm.h	2005-03-01 23:38:38.000000000 -0800
   20.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/mca_asm.h	2005-05-18 12:40:19.000000000 -0700
   20.13  @@ -26,8 +26,13 @@
   20.14    * direct mapped to physical addresses.
   20.15    *	1. Lop off bits 61 thru 63 in the virtual address
    21.1 --- a/xen/arch/ia64/patch/linux-2.6.11/page.h	Mon May 23 15:22:15 2005 +0000
    21.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/page.h	Mon May 23 15:29:59 2005 +0000
    21.3 @@ -1,6 +1,14 @@
    21.4 ---- ../../linux-2.6.11/include/asm-ia64/page.h	2005-03-02 00:37:48.000000000 -0700
    21.5 -+++ include/asm-ia64/page.h	2005-05-02 11:25:33.000000000 -0600
    21.6 -@@ -95,9 +95,15 @@
    21.7 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/page.h	2005-03-01 23:37:48.000000000 -0800
    21.8 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/page.h	2005-05-20 09:36:02.000000000 -0700
    21.9 +@@ -32,6 +32,7 @@
   21.10 + #define PAGE_ALIGN(addr)	(((addr) + PAGE_SIZE - 1) & PAGE_MASK)
   21.11 + 
   21.12 + #define PERCPU_PAGE_SHIFT	16	/* log2() of max. size of per-CPU area */
   21.13 ++
   21.14 + #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
   21.15 + 
   21.16 + #define RGN_MAP_LIMIT	((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)	/* per region addr limit */
   21.17 +@@ -95,9 +96,15 @@
   21.18   #endif
   21.19   
   21.20   #ifndef CONFIG_DISCONTIGMEM
   21.21 @@ -16,7 +24,7 @@
   21.22   #else
   21.23   extern struct page *vmem_map;
   21.24   extern unsigned long max_low_pfn;
   21.25 -@@ -109,6 +115,11 @@
   21.26 +@@ -109,6 +116,11 @@
   21.27   #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
   21.28   #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   21.29   
   21.30 @@ -28,7 +36,7 @@
   21.31   typedef union ia64_va {
   21.32   	struct {
   21.33   		unsigned long off : 61;		/* intra-region offset */
   21.34 -@@ -124,8 +135,23 @@
   21.35 +@@ -124,8 +136,23 @@
   21.36    * expressed in this way to ensure they result in a single "dep"
   21.37    * instruction.
   21.38    */
   21.39 @@ -52,7 +60,7 @@
   21.40   
   21.41   #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
   21.42   #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
   21.43 -@@ -197,7 +223,11 @@
   21.44 +@@ -197,7 +224,11 @@
   21.45   # define __pgprot(x)	(x)
   21.46   #endif /* !STRICT_MM_TYPECHECKS */
   21.47   
    22.1 --- a/xen/arch/ia64/patch/linux-2.6.11/pal.S	Mon May 23 15:22:15 2005 +0000
    22.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/pal.S	Mon May 23 15:29:59 2005 +0000
    22.3 @@ -1,11 +1,6 @@
    22.4 - pal.S |    8 ++++++++
    22.5 - 1 files changed, 8 insertions(+)
    22.6 -
    22.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S
    22.8 -===================================================================
    22.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/pal.S	2005-03-02 01:38:33.000000000 -0600
   22.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S	2005-04-06 22:43:53.817885390 -0500
   22.11 -@@ -166,7 +166,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
   22.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/pal.S	2005-03-01 23:38:33.000000000 -0800
   22.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/pal.S	2005-05-18 12:40:19.000000000 -0700
   22.14 +@@ -166,7 +166,11 @@
   22.15   	adds r8  = 1f-1b,r8		// calculate return address for call
   22.16   	;;
   22.17   	mov loc4=ar.rsc			// save RSE configuration
   22.18 @@ -17,7 +12,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k
   22.19   	tpa r8=r8			// convert rp to physical
   22.20   	;;
   22.21   	mov b7 = loc2			// install target to branch reg
   22.22 -@@ -225,7 +229,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
   22.23 +@@ -225,7 +229,11 @@
   22.24   	mov loc3 = psr		// save psr
   22.25   	;;
   22.26   	mov loc4=ar.rsc			// save RSE configuration
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/pal.h	Mon May 23 15:29:59 2005 +0000
    23.3 @@ -0,0 +1,12 @@
    23.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/pal.h	2005-03-01 23:38:13.000000000 -0800
    23.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/pal.h	2005-05-18 14:00:53.000000000 -0700
    23.6 +@@ -1559,6 +1559,9 @@
    23.7 + 	return iprv.status;
    23.8 + }
    23.9 + 
   23.10 ++#ifdef CONFIG_VTI
   23.11 ++#include <asm/vmx_pal.h>
   23.12 ++#endif // CONFIG_VTI
   23.13 + #endif /* __ASSEMBLY__ */
   23.14 + 
   23.15 + #endif /* _ASM_IA64_PAL_H */
    24.1 --- a/xen/arch/ia64/patch/linux-2.6.11/processor.h	Mon May 23 15:22:15 2005 +0000
    24.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/processor.h	Mon May 23 15:29:59 2005 +0000
    24.3 @@ -1,17 +1,30 @@
    24.4 - processor.h |    4 ++++
    24.5 - 1 files changed, 4 insertions(+)
    24.6 -
    24.7 -Index: linux-2.6.11/include/asm-ia64/processor.h
    24.8 -===================================================================
    24.9 ---- linux-2.6.11.orig/include/asm-ia64/processor.h	2005-03-02 01:37:58.000000000 -0600
   24.10 -+++ linux-2.6.11/include/asm-ia64/processor.h	2005-03-19 14:26:01.062135543 -0600
   24.11 -@@ -408,12 +408,16 @@ extern void ia64_setreg_unknown_kr (void
   24.12 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/processor.h	2005-03-01 23:37:58.000000000 -0800
   24.13 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/processor.h	2005-05-20 09:36:02.000000000 -0700
   24.14 +@@ -94,7 +94,11 @@
   24.15 + #ifdef CONFIG_NUMA
   24.16 + #include <asm/nodedata.h>
   24.17 + #endif
   24.18 ++#ifdef XEN
   24.19 ++#include <asm/xenprocessor.h>
   24.20 ++#endif
   24.21 + 
   24.22 ++#ifndef XEN
   24.23 + /* like above but expressed as bitfields for more efficient access: */
   24.24 + struct ia64_psr {
   24.25 + 	__u64 reserved0 : 1;
   24.26 +@@ -133,6 +137,7 @@
   24.27 + 	__u64 bn : 1;
   24.28 + 	__u64 reserved4 : 19;
   24.29 + };
   24.30 ++#endif
   24.31 + 
   24.32 + /*
   24.33 +  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
   24.34 +@@ -408,12 +413,14 @@
   24.35    */
   24.36   
   24.37   /* Return TRUE if task T owns the fph partition of the CPU we're running on. */
   24.38 -+#ifdef XEN
   24.39 -+#define ia64_is_local_fpu_owner(t) 0
   24.40 -+#else
   24.41 ++#ifndef XEN
   24.42   #define ia64_is_local_fpu_owner(t)								\
   24.43   ({												\
   24.44   	struct task_struct *__ia64_islfo_task = (t);						\
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h	Mon May 23 15:29:59 2005 +0000
    25.3 @@ -0,0 +1,20 @@
    25.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ptrace.h	2005-03-01 23:38:38.000000000 -0800
    25.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ptrace.h	2005-05-18 14:00:53.000000000 -0700
    25.6 +@@ -95,6 +95,9 @@
    25.7 +  * (because the memory stack pointer MUST ALWAYS be aligned this way)
    25.8 +  *
    25.9 +  */
   25.10 ++#ifdef CONFIG_VTI
   25.11 ++#include "vmx_ptrace.h"
   25.12 ++#else  //CONFIG_VTI
   25.13 + struct pt_regs {
   25.14 + 	/* The following registers are saved by SAVE_MIN: */
   25.15 + 	unsigned long b6;		/* scratch */
   25.16 +@@ -170,6 +173,7 @@
   25.17 + 	struct ia64_fpreg f10;		/* scratch */
   25.18 + 	struct ia64_fpreg f11;		/* scratch */
   25.19 + };
   25.20 ++#endif // CONFIG_VTI
   25.21 + 
   25.22 + /*
   25.23 +  * This structure contains the addition registers that need to
    26.1 --- a/xen/arch/ia64/patch/linux-2.6.11/setup.c	Mon May 23 15:22:15 2005 +0000
    26.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/setup.c	Mon May 23 15:29:59 2005 +0000
    26.3 @@ -1,6 +1,16 @@
    26.4 ---- ../../linux-2.6.11/arch/ia64/kernel/setup.c	2005-03-02 00:37:49.000000000 -0700
    26.5 -+++ arch/ia64/setup.c	2005-05-02 10:04:03.000000000 -0600
    26.6 -@@ -127,7 +127,16 @@
    26.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/setup.c	2005-03-01 23:37:49.000000000 -0800
    26.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/setup.c	2005-05-18 12:40:50.000000000 -0700
    26.9 +@@ -51,6 +51,9 @@
   26.10 + #include <asm/smp.h>
   26.11 + #include <asm/system.h>
   26.12 + #include <asm/unistd.h>
   26.13 ++#ifdef CONFIG_VTI
   26.14 ++#include <asm/vmx.h>
   26.15 ++#endif // CONFIG_VTI
   26.16 + 
   26.17 + #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
   26.18 + # error "struct cpuinfo_ia64 too big!"
   26.19 +@@ -127,7 +130,16 @@
   26.20   		range_end   = min(end, rsvd_region[i].start);
   26.21   
   26.22   		if (range_start < range_end)
   26.23 @@ -17,7 +27,7 @@
   26.24   
   26.25   		/* nothing more available in this segment */
   26.26   		if (range_end == end) return 0;
   26.27 -@@ -185,7 +194,12 @@
   26.28 +@@ -185,7 +197,12 @@
   26.29   	n++;
   26.30   
   26.31   	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
   26.32 @@ -30,7 +40,7 @@
   26.33   	n++;
   26.34   
   26.35   #ifdef CONFIG_BLK_DEV_INITRD
   26.36 -@@ -299,7 +313,11 @@
   26.37 +@@ -299,7 +316,11 @@
   26.38   }
   26.39   
   26.40   void __init
   26.41 @@ -42,7 +52,7 @@
   26.42   {
   26.43   	unw_init();
   26.44   
   26.45 -@@ -308,8 +326,14 @@
   26.46 +@@ -308,8 +329,14 @@
   26.47   	*cmdline_p = __va(ia64_boot_param->command_line);
   26.48   	strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
   26.49   
   26.50 @@ -57,7 +67,7 @@
   26.51   
   26.52   #ifdef CONFIG_IA64_GENERIC
   26.53   	{
   26.54 -@@ -351,8 +375,17 @@
   26.55 +@@ -351,8 +378,18 @@
   26.56   # endif
   26.57   #endif /* CONFIG_APCI_BOOT */
   26.58   
   26.59 @@ -71,11 +81,23 @@
   26.60  +late_setup_arch (char **cmdline_p)
   26.61  +{
   26.62  +#undef CONFIG_ACPI_BOOT
   26.63 ++	acpi_table_init();
   26.64  +#endif
   26.65   	/* process SAL system table: */
   26.66   	ia64_sal_init(efi.sal_systab);
   26.67   
   26.68 -@@ -492,12 +525,14 @@
   26.69 +@@ -360,6 +397,10 @@
   26.70 + 	cpu_physical_id(0) = hard_smp_processor_id();
   26.71 + #endif
   26.72 + 
   26.73 ++#ifdef CONFIG_VTI
   26.74 ++	identify_vmx_feature();
   26.75 ++#endif // CONFIG_VTI
   26.76 ++
   26.77 + 	cpu_init();	/* initialize the bootstrap CPU */
   26.78 + 
   26.79 + #ifdef CONFIG_ACPI_BOOT
   26.80 +@@ -492,12 +533,14 @@
   26.81   {
   26.82   }
   26.83   
   26.84 @@ -90,7 +112,20 @@
   26.85   
   26.86   void
   26.87   identify_cpu (struct cpuinfo_ia64 *c)
   26.88 -@@ -659,7 +694,11 @@
   26.89 +@@ -551,6 +594,12 @@
   26.90 + 	}
   26.91 + 	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
   26.92 + 	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
   26.93 ++
   26.94 ++#ifdef CONFIG_VTI
   26.95 ++	/* If vmx feature is on, do necessary initialization for vmx */
   26.96 ++	if (vmx_enabled)
   26.97 ++		vmx_init_env();
   26.98 ++#endif
   26.99 + }
  26.100 + 
  26.101 + void
  26.102 +@@ -659,7 +708,11 @@
  26.103   					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
  26.104   	atomic_inc(&init_mm.mm_count);
  26.105   	current->active_mm = &init_mm;
    27.1 --- a/xen/arch/ia64/patch/linux-2.6.11/swiotlb.c	Mon May 23 15:22:15 2005 +0000
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,29 +0,0 @@
    27.4 ---- ../../linux-2.6.11/arch/ia64/lib/swiotlb.c	2005-03-02 00:38:17.000000000 -0700
    27.5 -+++ arch/ia64/lib/swiotlb.c	2005-05-02 13:04:15.000000000 -0600
    27.6 -@@ -49,6 +49,15 @@
    27.7 -  */
    27.8 - #define IO_TLB_SHIFT 11
    27.9 - 
   27.10 -+#ifdef XEN
   27.11 -+#define __order_to_size(_order) (1 << (_order+PAGE_SHIFT))
   27.12 -+#define alloc_bootmem_low_pages(_x) alloc_xenheap_pages(get_order(_x))
   27.13 -+#define alloc_bootmem_low(_x) alloc_xenheap_pages(get_order(_x))
   27.14 -+#define alloc_bootmem(_x) alloc_xenheap_pages(get_order(_x))
   27.15 -+#define __get_free_pages(_x,_y) alloc_xenheap_pages(__order_to_size(_y))
   27.16 -+#define free_pages(_x,_y) free_xenheap_pages(_x,_y)
   27.17 -+#endif
   27.18 -+
   27.19 - int swiotlb_force;
   27.20 - 
   27.21 - /*
   27.22 -@@ -388,8 +397,10 @@
   27.23 - 	 * When the mapping is small enough return a static buffer to limit
   27.24 - 	 * the damage, or panic when the transfer is too big.
   27.25 - 	 */
   27.26 -+#ifndef XEN
   27.27 - 	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
   27.28 - 	       "device %s\n", size, dev ? dev->bus_id : "?");
   27.29 -+#endif
   27.30 - 
   27.31 - 	if (size > io_tlb_overflow && do_panic) {
   27.32 - 		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
    28.1 --- a/xen/arch/ia64/patch/linux-2.6.11/system.h	Mon May 23 15:22:15 2005 +0000
    28.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/system.h	Mon May 23 15:29:59 2005 +0000
    28.3 @@ -1,32 +1,38 @@
    28.4 ---- ../../linux-2.6.11/include/asm-ia64/system.h	2005-03-02 00:38:07.000000000 -0700
    28.5 -+++ include/asm-ia64/system.h	2005-05-02 10:18:30.000000000 -0600
    28.6 -@@ -24,8 +24,15 @@
    28.7 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/system.h	2005-03-01 23:38:07.000000000 -0800
    28.8 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/system.h	2005-05-20 09:36:02.000000000 -0700
    28.9 +@@ -18,14 +18,19 @@
   28.10 + #include <asm/page.h>
   28.11 + #include <asm/pal.h>
   28.12 + #include <asm/percpu.h>
   28.13 ++#ifdef XEN
   28.14 ++#include <asm/xensystem.h>
   28.15 ++#endif
   28.16 + 
   28.17 + #define GATE_ADDR		__IA64_UL_CONST(0xa000000000000000)
   28.18 + /*
   28.19    * 0xa000000000000000+2*PERCPU_PAGE_SIZE
   28.20    * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
   28.21    */
   28.22 -+#ifdef XEN
   28.23 -+#define KERNEL_START		 0xf000000004000000
   28.24 -+#define PERCPU_ADDR		 0xf100000000000000-PERCPU_PAGE_SIZE
   28.25 -+#define SHAREDINFO_ADDR		 0xf100000000000000
   28.26 -+#define VHPT_ADDR		 0xf200000000000000
   28.27 -+#else
   28.28 ++#ifndef XEN
   28.29   #define KERNEL_START		 __IA64_UL_CONST(0xa000000100000000)
   28.30   #define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
   28.31  +#endif
   28.32   
   28.33   #ifndef __ASSEMBLY__
   28.34   
   28.35 -@@ -218,9 +225,13 @@
   28.36 +@@ -218,6 +223,7 @@
   28.37   # define PERFMON_IS_SYSWIDE() (0)
   28.38   #endif
   28.39   
   28.40 -+#ifdef XEN
   28.41 -+#define IA64_HAS_EXTRA_STATE(t) 0
   28.42 -+#else
   28.43 ++#ifndef XEN
   28.44   #define IA64_HAS_EXTRA_STATE(t)							\
   28.45   	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
   28.46   	 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
   28.47 -+#endif
   28.48 +@@ -230,6 +236,7 @@
   28.49 + 	ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
   28.50 + 	(last) = ia64_switch_to((next));							 \
   28.51 + } while (0)
   28.52 ++#endif 
   28.53   
   28.54 - #define __switch_to(prev,next,last) do {							 \
   28.55 - 	if (IA64_HAS_EXTRA_STATE(prev))								 \
   28.56 + #ifdef CONFIG_SMP
   28.57 + /*
    29.1 --- a/xen/arch/ia64/patch/linux-2.6.11/unaligned.c	Mon May 23 15:22:15 2005 +0000
    29.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/unaligned.c	Mon May 23 15:29:59 2005 +0000
    29.3 @@ -1,8 +1,145 @@
    29.4 ---- ../../linux-2.6.11/arch/ia64/kernel/unaligned.c	2005-03-02 00:38:25.000000000 -0700
    29.5 -+++ arch/ia64/unaligned.c	2005-05-10 15:46:09.000000000 -0600
    29.6 -@@ -437,7 +437,11 @@
    29.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/unaligned.c	2005-03-01 23:38:25.000000000 -0800
    29.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/unaligned.c	2005-05-18 12:40:50.000000000 -0700
    29.9 +@@ -201,7 +201,11 @@
   29.10 + 
   29.11 + 	RPT(r1), RPT(r2), RPT(r3),
   29.12 + 
   29.13 ++#ifdef  CONFIG_VTI
   29.14 ++	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
   29.15 ++#else   //CONFIG_VTI
   29.16 + 	RSW(r4), RSW(r5), RSW(r6), RSW(r7),
   29.17 ++#endif  //CONFIG_VTI
   29.18 + 
   29.19 + 	RPT(r8), RPT(r9), RPT(r10), RPT(r11),
   29.20 + 	RPT(r12), RPT(r13), RPT(r14), RPT(r15),
   29.21 +@@ -291,6 +295,121 @@
   29.22 + 	return reg;
   29.23   }
   29.24   
   29.25 ++#ifdef CONFIG_VTI
   29.26 ++static void
   29.27 ++set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
   29.28 ++{
   29.29 ++	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   29.30 ++	unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
   29.31 ++	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   29.32 ++	unsigned long rnats, nat_mask;
   29.33 ++    unsigned long old_rsc,new_rsc;
   29.34 ++	unsigned long on_kbs,rnat;
   29.35 ++	long sof = (regs->cr_ifs) & 0x7f;
   29.36 ++	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
   29.37 ++	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
   29.38 ++	long ridx = r1 - 32;
   29.39 ++
   29.40 ++	if (ridx >= sof) {
   29.41 ++		/* this should never happen, as the "rsvd register fault" has higher priority */
   29.42 ++		DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
   29.43 ++		return;
   29.44 ++	}
   29.45 ++
   29.46 ++	if (ridx < sor)
   29.47 ++		ridx = rotate_reg(sor, rrb_gr, ridx);
   29.48 ++
   29.49 ++    old_rsc=ia64_get_rsc();
   29.50 ++    new_rsc=old_rsc&(~0x3);
   29.51 ++    ia64_set_rsc(new_rsc);
   29.52 ++
   29.53 ++    bspstore = ia64_get_bspstore();
   29.54 ++    bsp =kbs + (regs->loadrs >> 19);//16+3
   29.55 ++
   29.56 ++	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
   29.57 ++    nat_mask = 1UL << ia64_rse_slot_num(addr);
   29.58 ++	rnat_addr = ia64_rse_rnat_addr(addr);
   29.59 ++
   29.60 ++    if(addr >= bspstore){
   29.61 ++
   29.62 ++        ia64_flushrs ();
   29.63 ++        ia64_mf ();
   29.64 ++		*addr = val;
   29.65 ++        bspstore = ia64_get_bspstore();
   29.66 ++    	rnat = ia64_get_rnat ();
   29.67 ++        if(bspstore < rnat_addr){
   29.68 ++            rnat=rnat&(~nat_mask);
   29.69 ++        }else{
   29.70 ++            *rnat_addr = (*rnat_addr)&(~nat_mask);
   29.71 ++        }
   29.72 ++        ia64_mf();
   29.73 ++        ia64_loadrs();
   29.74 ++        ia64_set_rnat(rnat);
   29.75 ++    }else{
   29.76 ++
   29.77 ++    	rnat = ia64_get_rnat ();
   29.78 ++		*addr = val;
   29.79 ++        if(bspstore < rnat_addr){
   29.80 ++            rnat=rnat&(~nat_mask);
   29.81 ++        }else{
   29.82 ++            *rnat_addr = (*rnat_addr)&(~nat_mask);
   29.83 ++        }
   29.84 ++        ia64_set_bspstore (bspstore);
   29.85 ++        ia64_set_rnat(rnat);
   29.86 ++    }
   29.87 ++    ia64_set_rsc(old_rsc);
   29.88 ++}
   29.89 ++
   29.90 ++
   29.91 ++static void
   29.92 ++get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
   29.93 ++{
   29.94 ++	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   29.95 ++	unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
   29.96 ++	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   29.97 ++	unsigned long rnats, nat_mask;
   29.98 ++	unsigned long on_kbs;
   29.99 ++    unsigned long old_rsc, new_rsc;
  29.100 ++	long sof = (regs->cr_ifs) & 0x7f;
  29.101 ++	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
  29.102 ++	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  29.103 ++	long ridx = r1 - 32;
  29.104 ++
  29.105 ++	if (ridx >= sof) {
  29.106 ++		/* read of out-of-frame register returns an undefined value; 0 in our case.  */
  29.107 ++		DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
  29.108 ++		panic("wrong stack register number");
  29.109 ++	}
  29.110 ++
  29.111 ++	if (ridx < sor)
  29.112 ++		ridx = rotate_reg(sor, rrb_gr, ridx);
  29.113 ++
  29.114 ++    old_rsc=ia64_get_rsc();
  29.115 ++    new_rsc=old_rsc&(~(0x3));
  29.116 ++    ia64_set_rsc(new_rsc);
  29.117 ++
  29.118 ++    bspstore = ia64_get_bspstore();
  29.119 ++    bsp =kbs + (regs->loadrs >> 19); //16+3;
  29.120 ++
  29.121 ++	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
  29.122 ++    nat_mask = 1UL << ia64_rse_slot_num(addr);
  29.123 ++	rnat_addr = ia64_rse_rnat_addr(addr);
  29.124 ++
  29.125 ++    if(addr >= bspstore){
  29.126 ++
  29.127 ++        ia64_flushrs ();
  29.128 ++        ia64_mf ();
  29.129 ++        bspstore = ia64_get_bspstore();
  29.130 ++    }
  29.131 ++	*val=*addr;
  29.132 ++    if(bspstore < rnat_addr){
  29.133 ++        *nat=!!(ia64_get_rnat()&nat_mask);
  29.134 ++    }else{
  29.135 ++        *nat = !!((*rnat_addr)&nat_mask);
  29.136 ++    }
  29.137 ++    ia64_set_rsc(old_rsc);
  29.138 ++}
  29.139 ++#else // CONFIG_VTI
  29.140 + static void
  29.141 + set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
  29.142 + {
  29.143 +@@ -435,9 +554,14 @@
  29.144 + 		*nat = 0;
  29.145 + 	return;
  29.146 + }
  29.147 ++#endif // CONFIG_VTI
  29.148 + 
  29.149   
  29.150  +#ifdef XEN
  29.151  +void
  29.152 @@ -12,7 +149,19 @@
  29.153   setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
  29.154   {
  29.155   	struct switch_stack *sw = (struct switch_stack *) regs - 1;
  29.156 -@@ -522,7 +526,11 @@
  29.157 +@@ -466,7 +590,11 @@
  29.158 + 		unat = &sw->ar_unat;
  29.159 + 	} else {
  29.160 + 		addr = (unsigned long)regs;
  29.161 ++#ifdef CONFIG_VTI
  29.162 ++		unat = &regs->eml_unat;
  29.163 ++#else //CONFIG_VTI
  29.164 + 		unat = &sw->caller_unat;
  29.165 ++#endif  //CONFIG_VTI
  29.166 + 	}
  29.167 + 	DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
  29.168 + 	       addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
  29.169 +@@ -522,7 +650,11 @@
  29.170   	 */
  29.171   	if (regnum >= IA64_FIRST_ROTATING_FR) {
  29.172   		ia64_sync_fph(current);
  29.173 @@ -24,7 +173,7 @@
  29.174   	} else {
  29.175   		/*
  29.176   		 * pt_regs or switch_stack ?
  29.177 -@@ -581,7 +589,11 @@
  29.178 +@@ -581,7 +713,11 @@
  29.179   	 */
  29.180   	if (regnum >= IA64_FIRST_ROTATING_FR) {
  29.181   		ia64_flush_fph(current);
  29.182 @@ -36,7 +185,7 @@
  29.183   	} else {
  29.184   		/*
  29.185   		 * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
  29.186 -@@ -611,7 +623,11 @@
  29.187 +@@ -611,7 +747,11 @@
  29.188   }
  29.189   
  29.190   
  29.191 @@ -48,7 +197,19 @@
  29.192   getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
  29.193   {
  29.194   	struct switch_stack *sw = (struct switch_stack *) regs - 1;
  29.195 -@@ -1294,6 +1310,9 @@
  29.196 +@@ -640,7 +780,11 @@
  29.197 + 		unat = &sw->ar_unat;
  29.198 + 	} else {
  29.199 + 		addr = (unsigned long)regs;
  29.200 ++#ifdef  CONFIG_VTI
  29.201 ++		unat = &regs->eml_unat;;
  29.202 ++#else   //CONFIG_VTI
  29.203 + 		unat = &sw->caller_unat;
  29.204 ++#endif  //CONFIG_VTI
  29.205 + 	}
  29.206 + 
  29.207 + 	DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
  29.208 +@@ -1294,6 +1438,9 @@
  29.209   void
  29.210   ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
  29.211   {
  29.212 @@ -58,7 +219,7 @@
  29.213   	struct ia64_psr *ipsr = ia64_psr(regs);
  29.214   	mm_segment_t old_fs = get_fs();
  29.215   	unsigned long bundle[2];
  29.216 -@@ -1502,4 +1521,5 @@
  29.217 +@@ -1502,4 +1649,5 @@
  29.218   	si.si_imm = 0;
  29.219   	force_sig_info(SIGBUS, &si, current);
  29.220   	goto done;
    30.1 --- a/xen/arch/ia64/process.c	Mon May 23 15:22:15 2005 +0000
    30.2 +++ b/xen/arch/ia64/process.c	Mon May 23 15:29:59 2005 +0000
    30.3 @@ -64,11 +64,16 @@ long do_iopl(domid_t domain, unsigned in
    30.4  void schedule_tail(struct exec_domain *next)
    30.5  {
    30.6  	unsigned long rr7;
    30.7 -	printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
    30.8 -	printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
    30.9 +	//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
   30.10 +	//printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
   30.11 +#ifdef CONFIG_VTI
   30.12 +	/* rr7 will be postponed to last point when resuming back to guest */
   30.13 +	vmx_load_all_rr(current);
   30.14 +#else // CONFIG_VTI
   30.15  	if (rr7 = load_region_regs(current)) {
   30.16  		printk("schedule_tail: change to rr7 not yet implemented\n");
   30.17  	}
   30.18 +#endif // CONFIG_VTI
   30.19  }
   30.20  
   30.21  extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
   30.22 @@ -346,8 +351,8 @@ void ia64_do_page_fault (unsigned long a
   30.23  		// FIXME should validate mpaddr here
   30.24  		if (d == dom0) {
   30.25  			if (address < dom0_start || address >= dom0_start + dom0_size) {
   30.26 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   30.27 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
   30.28 +				//printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   30.29 +				//printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
   30.30  				tdpfoo();
   30.31  			}
   30.32  		}
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/xen/arch/ia64/tools/README.RunVT	Mon May 23 15:29:59 2005 +0000
    31.3 @@ -0,0 +1,59 @@
    31.4 +INSTRUCTIONS FOR Running IPF/Xen with VT-enabled Tiger4 pltform
    31.5 +
    31.6 +Note: the Domain0 must be an unmodified Linux
    31.7 +
    31.8 +1) Perform operations in README.xenia64 to get a flattened Xen IPF source tree
    31.9 +
   31.10 +2) Build an unmodified Linux 2.6 kernel
   31.11 +	a) tar xvfz  linux-2.6.11.tar.gz
   31.12 +        b) cp arch/ia64/configs/tiger_defconfig .config
   31.13 +	c) Build linux.
   31.14 +   		1) yes "" | make oldconfig
   31.15 +   		2) make
   31.16 +
   31.17 +3) Build IPF VT-enabled Xen image
   31.18 +	edit xen/arch/ia64/Rules.mk for
   31.19 +		CONFIG_VTI	?= y 	to enable VT-enable build
   31.20 +4) Setup ELILO.CONF
   31.21 +	image=xen
   31.22 +        	label=xen
   31.23 +        	initrd=vmlinux2.6.11		// unmodified Linux kernel image
   31.24 +        	read-only
   31.25 +        	append="nomca root=/dev/sda3"
   31.26 +
   31.27 +STATUS as 4/28/05 - Features implemented for Domain0
   31.28 +
   31.29 +0. Runs unmodified Linux kernel as Domain0
   31.30 +    Validated with Linux 2.6.11 to run Xwindow and NIC on UP logical processor
   31.31 +
   31.32 +1. Take advantage of VT-enabled processor
   31.33 +   a. Processor intercepts guest privileged instruction and deliver Opcode/Cause to Hypervisor
   31.34 +   b. One VPD (Virtual Processor Descriptor) per Virtual Processor
   31.35 +   c. Domains are in a different virtual address space from hypervisor. Domains have one less VA bit than hypervisor, where hypervisor runs in 0xF00000... address protected by the processor from Domains.
   31.36 +
   31.37 +2. vTLB and guest_VHPT
   31.38 +   a. vTLB extending machine TLB entries through hypervisor internal data structure
   31.39 +      vTLB caches Domains installed TR's and TC's, and then installs TC's for Domains instead.
   31.40 +      vTLB implements collision chains
   31.41 +   b. Processor walks hypervisor internal VHPT, not the domain VHPT.  On TLB miss, vTLB is consulted first to put hypervisor cached entry into VHPT without inject TLB miss to domain.
   31.42 +
   31.43 +3. Region ID fix-partitioning
   31.44 +   a. currently hard partition 24bits of RIDs into 16 partitions by using top 4bit.
   31.45 +   b. Hypervisor uses the very last partition RIDs, i.e., 0xFxxxxx RIDs
   31.46 +   c. Effectively supports Domain0 and 14 other DomainN
   31.47 +
   31.48 +4. HyperVisor is mapped with 2 sets of RIDs during runtime, its own RIDs and the active Domain RIDs
   31.49 +   a. Domain RIDs are used by processor to access guest_VHPT during Domain runtime
   31.50 +   b. Hypervisor RIDs are used when Hypervisor is running
   31.51 +   c. Implies there are some Region registers transition on entering/exiting hypervisor
   31.52 +
   31.53 +5. Linux styled pt_regs with minor modification for VT and instruction emulation
   31.54 +   a. Part of Domain registers are saved/restored from VPD
   31.55 +   b. Extended pt_regs to include r4~r7 and Domain's iipa & isr for possible instruction emulation, so no need to save a complete set of switch_stack on IVT entry
   31.56 +
   31.57 +6. Linux styled per virtual processor memory/RSE stacks, which is the same as non-VT domain0
   31.58 +
   31.59 +7. Handles splitted I/DCache design
   31.60 +   Newer IPF processors has split I/Dcaches.  The design takes this into consideration when Xen recopy Domain0 to target address for execution
   31.61 +
   31.62 +
    32.1 --- a/xen/arch/ia64/tools/mkbuildtree	Mon May 23 15:22:15 2005 +0000
    32.2 +++ b/xen/arch/ia64/tools/mkbuildtree	Mon May 23 15:29:59 2005 +0000
    32.3 @@ -46,13 +46,13 @@ mkdir include/asm-generic
    32.4  mkdir include/asm-ia64/linux
    32.5  mkdir include/asm-ia64/linux/byteorder
    32.6  # use "gcc -Iinclude/asm-ia64" to find these linux includes
    32.7 -ln -s $XEN/include/xen $XEN/include/linux
    32.8 -ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen 
    32.9 +#ln -s $XEN/include/xen $XEN/include/linux
   32.10 +#ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen 
   32.11  
   32.12  # prepare for building asm-offsets (circular dependency)
   32.13 -echo '#define IA64_TASK_SIZE 0' > include/asm-ia64/asm-offsets.h
   32.14 -sleep 2
   32.15 -touch arch/ia64/asm-offsets.c
   32.16 +#echo '#define IA64_TASK_SIZE 0' > include/asm-ia64/asm-offsets.h
   32.17 +#sleep 2
   32.18 +#touch arch/ia64/asm-offsets.c
   32.19  
   32.20  # patches to xen/common files
   32.21  #xen_patch common/domain.c domain.c
   32.22 @@ -107,7 +107,7 @@ cp_patch arch/ia64/mm/tlb.c arch/ia64/tl
   32.23  #cp_patch arch/ia64/hp/sim/hpsim_irq.c arch/ia64/hpsim_irq.c hpsim_irq.c
   32.24  
   32.25  softlink arch/ia64/kernel/efi_stub.S arch/ia64/efi_stub.S
   32.26 -softlink arch/ia64/kernel/entry.h arch/ia64/entry.h
   32.27 +cp_patch arch/ia64/kernel/entry.h arch/ia64/entry.h entry.h
   32.28  softlink arch/ia64/kernel/ia64_ksyms.c arch/ia64/ia64_ksyms.c
   32.29  softlink arch/ia64/kernel/irq_lsapic.c arch/ia64/irq_lsapic.c
   32.30  softlink arch/ia64/kernel/machvec.c arch/ia64/machvec.c
   32.31 @@ -141,8 +141,6 @@ softlink arch/ia64/lib/strlen.S arch/ia6
   32.32  softlink arch/ia64/lib/strlen_user.S arch/ia64/lib/strlen_user.S
   32.33  softlink arch/ia64/lib/strncpy_from_user.S arch/ia64/lib/strncpy_from_user.S
   32.34  softlink arch/ia64/lib/strnlen_user.S arch/ia64/lib/strnlen_user.S
   32.35 -#softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
   32.36 -cp_patch arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c swiotlb.c
   32.37  softlink arch/ia64/lib/xor.S arch/ia64/lib/xor.S
   32.38  
   32.39  softlink lib/cmdline.c arch/ia64/cmdline.c
   32.40 @@ -172,8 +170,8 @@ cp_patch arch/ia64/hp/sim/hpsim_ssc.h in
   32.41  
   32.42  #cp_patch include/asm-ia64/current.h include/asm-ia64/current.h current.h
   32.43  softlink include/asm-ia64/current.h include/asm-ia64/current.h
   32.44 -#cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
   32.45 -softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h
   32.46 +cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
   32.47 +#softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h
   32.48  #cp_patch include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h hardirq.h
   32.49  softlink include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h
   32.50  #cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h
   32.51 @@ -217,7 +215,7 @@ softlink include/asm-ia64/errno.h includ
   32.52  softlink include/asm-ia64/fpu.h include/asm-ia64/fpu.h
   32.53  softlink include/asm-ia64/hdreg.h include/asm-ia64/hdreg.h
   32.54  #softlink include/asm-ia64/ia32.h include/asm-ia64/ia32.h
   32.55 -softlink include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h
   32.56 +cp_patch include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h ia64regs.h
   32.57  softlink include/asm-ia64/intrinsics.h include/asm-ia64/intrinsics.h
   32.58  softlink include/asm-ia64/ioctl.h include/asm-ia64/ioctl.h
   32.59  softlink include/asm-ia64/linkage.h include/asm-ia64/linkage.h
   32.60 @@ -229,7 +227,7 @@ softlink include/asm-ia64/mca.h include/
   32.61  softlink include/asm-ia64/meminit.h include/asm-ia64/meminit.h
   32.62  softlink include/asm-ia64/mman.h include/asm-ia64/mman.h
   32.63  softlink include/asm-ia64/numa.h include/asm-ia64/numa.h
   32.64 -softlink include/asm-ia64/pal.h include/asm-ia64/pal.h
   32.65 +cp_patch include/asm-ia64/pal.h include/asm-ia64/pal.h pal.h
   32.66  softlink include/asm-ia64/param.h include/asm-ia64/param.h
   32.67  softlink include/asm-ia64/patch.h include/asm-ia64/patch.h
   32.68  softlink include/asm-ia64/pci.h include/asm-ia64/pci.h
   32.69 @@ -237,7 +235,7 @@ softlink include/asm-ia64/percpu.h inclu
   32.70  #softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
   32.71  cp_patch include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h pgalloc.h
   32.72  softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h
   32.73 -softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h
   32.74 +cp_patch include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h ptrace.h
   32.75  softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h
   32.76  softlink include/asm-ia64/rse.h include/asm-ia64/rse.h
   32.77  softlink include/asm-ia64/rwsem.h include/asm-ia64/rwsem.h
    33.1 --- a/xen/arch/ia64/vcpu.c	Mon May 23 15:22:15 2005 +0000
    33.2 +++ b/xen/arch/ia64/vcpu.c	Mon May 23 15:29:59 2005 +0000
    33.3 @@ -14,6 +14,9 @@
    33.4  #include <asm/tlb.h>
    33.5  #include <asm/processor.h>
    33.6  #include <asm/delay.h>
    33.7 +#ifdef CONFIG_VTI
    33.8 +#include <asm/vmx_vcpu.h>
    33.9 +#endif // CONFIG_VTI
   33.10  
   33.11  typedef	union {
   33.12  	struct ia64_psr ia64_psr;
   33.13 @@ -523,12 +526,19 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   33.14  		printf("vcpu_pend_interrupt: bad vector\n");
   33.15  		return;
   33.16  	}
   33.17 +#ifdef CONFIG_VTI
   33.18 +    if ( VMX_DOMAIN(vcpu) ) {
   33.19 + 	    set_bit(vector,VPD_CR(vcpu,irr));
   33.20 +    } else
   33.21 +#endif // CONFIG_VTI
   33.22 +    {
   33.23  	if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
   33.24  	if (test_bit(vector,PSCBX(vcpu,irr))) {
   33.25  //printf("vcpu_pend_interrupt: overrun\n");
   33.26  	}
   33.27  	set_bit(vector,PSCBX(vcpu,irr));
   33.28  	PSCB(vcpu,pending_interruption) = 1;
   33.29 +    }
   33.30  }
   33.31  
   33.32  void early_tick(VCPU *vcpu)
   33.33 @@ -619,7 +629,8 @@ extern unsigned long privop_trace;
   33.34  //privop_trace=1;
   33.35  	//TODO: Implement this
   33.36  	printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
   33.37 -	*pval = 0;
   33.38 +	//*pval = 0;
   33.39 +	*pval = ia64_getreg(_IA64_REG_CR_LID);
   33.40  	return IA64_NO_FAULT;
   33.41  }
   33.42  
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/xen/arch/ia64/vlsapic.c	Mon May 23 15:29:59 2005 +0000
    34.3 @@ -0,0 +1,504 @@
    34.4 +
    34.5 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    34.6 +/*
    34.7 + * vlsapic.c: virtual lsapic model including ITC timer.
    34.8 + * Copyright (c) 2005, Intel Corporation.
    34.9 + *
   34.10 + * This program is free software; you can redistribute it and/or modify it
   34.11 + * under the terms and conditions of the GNU General Public License,
   34.12 + * version 2, as published by the Free Software Foundation.
   34.13 + *
   34.14 + * This program is distributed in the hope it will be useful, but WITHOUT
   34.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   34.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   34.17 + * more details.
   34.18 + *
   34.19 + * You should have received a copy of the GNU General Public License along with
   34.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   34.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   34.22 + *
   34.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   34.24 + */
   34.25 +
   34.26 +#include <linux/sched.h>
   34.27 +#include <public/arch-ia64.h>
   34.28 +#include <asm/ia64_int.h>
   34.29 +#include <asm/vcpu.h>
   34.30 +#include <asm/regionreg.h>
   34.31 +#include <asm/tlb.h>
   34.32 +#include <asm/processor.h>
   34.33 +#include <asm/delay.h>
   34.34 +#include <asm/vmx_vcpu.h>
   34.35 +#include <asm/vmx_vcpu.h>
   34.36 +#include <asm/regs.h>
   34.37 +#include <asm/gcc_intrin.h>
   34.38 +#include <asm/vmx_mm_def.h>
   34.39 +#include <asm/vmx.h>
   34.40 +#include <asm/hw_irq.h>
   34.41 +#include <asm/vmx_pal_vsa.h>
   34.42 +#include <asm/kregs.h>
   34.43 +
   34.44 +//u64  fire_itc;
   34.45 +//u64  fire_itc2;
   34.46 +//u64  fire_itm;
   34.47 +//u64  fire_itm2;
   34.48 +/*
   34.49 + * Update the checked last_itc.
   34.50 + */
   34.51 +static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
   34.52 +{
   34.53 +    vtm->last_itc = cur_itc;
   34.54 +}
   34.55 +
   34.56 +/*
   34.57 + * ITC value saw in guest (host+offset+drift).
   34.58 + */
   34.59 +static uint64_t now_itc(vtime_t *vtm)
   34.60 +{
   34.61 +        uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc();
   34.62 +        
   34.63 +        if ( vtm->vtm_local_drift ) {
   34.64 +//          guest_itc -= vtm->vtm_local_drift;
   34.65 +        }       
   34.66 +        if ( (long)(guest_itc - vtm->last_itc) > 0 ) {
   34.67 +            return guest_itc;
   34.68 +
   34.69 +        }
   34.70 +        else {
   34.71 +            /* guest ITC backwarded due after LP switch */
   34.72 +            return vtm->last_itc;
   34.73 +        }
   34.74 +}
   34.75 +
   34.76 +/*
   34.77 + * Interval time components reset.
   34.78 + */
   34.79 +static void vtm_reset(VCPU *vcpu)
   34.80 +{
   34.81 +    uint64_t    cur_itc;
   34.82 +    vtime_t     *vtm;
   34.83 +    
   34.84 +    vtm=&(vcpu->arch.arch_vmx.vtm);
   34.85 +    vtm->vtm_offset = 0;
   34.86 +    vtm->vtm_local_drift = 0;
   34.87 +    VPD_CR(vcpu, itm) = 0;
   34.88 +    VPD_CR(vcpu, itv) = 0x10000;
   34.89 +    cur_itc = ia64_get_itc();
   34.90 +    vtm->last_itc = vtm->vtm_offset + cur_itc;
   34.91 +}
   34.92 +
   34.93 +/* callback function when vtm_timer expires */
   34.94 +static void vtm_timer_fn(unsigned long data)
   34.95 +{
   34.96 +    vtime_t *vtm;
   34.97 +    VCPU    *vcpu = (VCPU*)data;
   34.98 +    u64	    cur_itc,vitm;
   34.99 +
  34.100 +    UINT64  vec;
  34.101 +    
  34.102 +    vec = VPD_CR(vcpu, itv) & 0xff;
  34.103 +    vmx_vcpu_pend_interrupt(vcpu, vec);
  34.104 +
  34.105 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.106 +    cur_itc = now_itc(vtm);
  34.107 +    vitm =VPD_CR(vcpu, itm);
  34.108 + //fire_itc2 = cur_itc;
  34.109 + //fire_itm2 = vitm;
  34.110 +    update_last_itc(vtm,cur_itc);  // pseudo read to update vITC
  34.111 +    vtm->timer_hooked = 0;
  34.112 +}
  34.113 +
  34.114 +void vtm_init(VCPU *vcpu)
  34.115 +{
  34.116 +    vtime_t     *vtm;
  34.117 +    uint64_t    itc_freq;
  34.118 +    
  34.119 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.120 +
  34.121 +    itc_freq = local_cpu_data->itc_freq;
  34.122 +    vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
  34.123 +    vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
  34.124 +    /* set up the actimer */
  34.125 +    init_ac_timer(&(vtm->vtm_timer));
  34.126 +    vtm->timer_hooked = 0;
  34.127 +    vtm->vtm_timer.cpu = 0;     /* Init value for SMP case */
  34.128 +    vtm->vtm_timer.data = (unsigned long)vcpu;
  34.129 +    vtm->vtm_timer.function = vtm_timer_fn;
  34.130 +    vtm_reset(vcpu);
  34.131 +}
  34.132 +
  34.133 +/*
  34.134 + * Action when guest read ITC.
  34.135 + */
  34.136 +uint64_t vtm_get_itc(VCPU *vcpu)
  34.137 +{
  34.138 +    uint64_t    guest_itc, spsr;
  34.139 +    vtime_t    *vtm;
  34.140 +
  34.141 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.142 +    // FIXME: should use local_irq_disable & local_irq_enable ??
  34.143 +    local_irq_save(spsr);
  34.144 +    guest_itc = now_itc(vtm);
  34.145 +    update_last_itc(vtm, guest_itc);
  34.146 +
  34.147 +    local_irq_restore(spsr);
  34.148 +    return guest_itc;
  34.149 +}
  34.150 +
  34.151 +void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
  34.152 +{
  34.153 +    uint64_t    spsr;
  34.154 +    vtime_t     *vtm;
  34.155 +
  34.156 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.157 +    local_irq_save(spsr);
  34.158 +    vtm->vtm_offset = new_itc - ia64_get_itc();
  34.159 +    vtm->last_itc = new_itc;
  34.160 +    vtm_interruption_update(vcpu, vtm);
  34.161 +    local_irq_restore(spsr);
  34.162 +}
  34.163 +
  34.164 +void vtm_set_itv(VCPU *vcpu)
  34.165 +{
  34.166 +    uint64_t    spsr,itv;
  34.167 +    vtime_t     *vtm;
  34.168 +
  34.169 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.170 +    local_irq_save(spsr);
  34.171 +    itv = VPD_CR(vcpu, itv);
  34.172 +    if ( ITV_IRQ_MASK(itv) && vtm->timer_hooked ) {
  34.173 +        rem_ac_timer(&(vtm->vtm_timer));
  34.174 +        vtm->timer_hooked = 0;
  34.175 +    }
  34.176 +    vtm_interruption_update(vcpu, vtm);
  34.177 +    local_irq_restore(spsr);
  34.178 +}
  34.179 +
  34.180 +
  34.181 +/*
  34.182 + * Update interrupt or hook the vtm ac_timer for fire 
  34.183 + * At this point vtm_timer should be removed if itv is masked.
  34.184 + */
  34.185 +/* Interrupt must be disabled at this point */
  34.186 +
  34.187 +extern u64 tick_to_ns(u64 tick);
  34.188 +#define TIMER_SLOP (50*1000) /* ns */	/* copy from ac_timer.c */
  34.189 +void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
  34.190 +{
  34.191 +    uint64_t    cur_itc,vitm,vitv;
  34.192 +    uint64_t    expires;
  34.193 +    long     	diff_now, diff_last;
  34.194 +    uint64_t    spsr;
  34.195 +    
  34.196 +    vitv = VPD_CR(vcpu, itv);
  34.197 +    if ( ITV_IRQ_MASK(vitv) ) {
  34.198 +        return;
  34.199 +    }
  34.200 +    
  34.201 +    vitm =VPD_CR(vcpu, itm);
  34.202 +    local_irq_save(spsr);
  34.203 +    cur_itc =now_itc(vtm);
  34.204 +    diff_last = vtm->last_itc - vitm;
  34.205 +    diff_now = cur_itc - vitm;
  34.206 +    update_last_itc (vtm,cur_itc);
  34.207 +    
  34.208 +    if ( diff_last >= 0 ) {
  34.209 +        // interrupt already fired.
  34.210 +        if ( vtm->timer_hooked ) {
  34.211 +            rem_ac_timer(&(vtm->vtm_timer));
  34.212 +            vtm->timer_hooked = 0;          
  34.213 +        }
  34.214 +    }
  34.215 +    else if ( diff_now >= 0 ) {
  34.216 +        // ITV is fired.
  34.217 +        vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
  34.218 +    }
  34.219 +    /* Both last_itc & cur_itc < itm, wait for fire condition */
  34.220 +    else if ( vtm->timer_hooked ) {
  34.221 +        expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
  34.222 +        mod_ac_timer (&(vtm->vtm_timer), expires);
  34.223 +	printf("mod vtm_timer\n");
  34.224 +//fire_itc = cur_itc;
  34.225 +//fire_itm = vitm;
  34.226 +    }
  34.227 +    else {
  34.228 +        vtm->vtm_timer.expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
  34.229 +        vtm->vtm_timer.cpu = vcpu->processor;
  34.230 +            add_ac_timer(&(vtm->vtm_timer));
  34.231 +            vtm->timer_hooked = 1;
  34.232 +//fire_itc = cur_itc;
  34.233 +//fire_itm = vitm;
  34.234 +    }
  34.235 +    local_irq_restore(spsr);
  34.236 +}
  34.237 +
  34.238 +/*
  34.239 + * Action for vtm when the domain is scheduled out.
  34.240 + * Remove the ac_timer for vtm.
  34.241 + */
  34.242 +void vtm_domain_out(VCPU *vcpu)
  34.243 +{
  34.244 +    vtime_t     *vtm;
  34.245 +    uint64_t    spsr;
  34.246 +    
  34.247 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.248 +    local_irq_save(spsr);
  34.249 +    if ( vtm->timer_hooked ) {
  34.250 +        rem_ac_timer(&(vtm->vtm_timer));
  34.251 +        vtm->timer_hooked = 0;
  34.252 +    }
  34.253 +    local_irq_restore(spsr);
  34.254 +}
  34.255 +
  34.256 +/*
  34.257 + * Action for vtm when the domain is scheduled in.
  34.258 + * Fire vtm IRQ or add the ac_timer for vtm.
  34.259 + */
  34.260 +void vtm_domain_in(VCPU *vcpu)
  34.261 +{
  34.262 +    vtime_t     *vtm;
  34.263 +    
  34.264 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  34.265 +    vtm_interruption_update(vcpu, vtm);
  34.266 +}
  34.267 +
  34.268 +
  34.269 +
  34.270 +/*
  34.271 + * Next for vLSapic
  34.272 + */
  34.273 +
  34.274 +#define  NMI_VECTOR         2
  34.275 +#define  ExtINT_VECTOR      0
  34.276 +
  34.277 +#define  VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i])
  34.278 +/*
  34.279 + * LID-CR64: Keep in vpd.
  34.280 + * IVR-CR65: (RO) see guest_read_ivr().
  34.281 + * TPR-CR66: Keep in vpd, acceleration enabled.
  34.282 + * EOI-CR67: see guest_write_eoi().
  34.283 + * IRR0-3 - CR68-71: (RO) Keep in vpd irq_pending[]
  34.284 + *          can move to vpd for optimization.
  34.285 + * ITV: in time virtualization.
  34.286 + * PMV: Keep in vpd initialized as 0x10000.
  34.287 + * CMCV: Keep in vpd initialized as 0x10000.
  34.288 + * LRR0-1: Keep in vpd, initialized as 0x10000.
  34.289 + *
  34.290 + */
  34.291 +
  34.292 +void vlsapic_reset(VCPU *vcpu)
  34.293 +{
  34.294 +    int     i;
  34.295 +    VPD_CR(vcpu, lid) = 0;
  34.296 +    VPD_CR(vcpu, ivr) = 0;
  34.297 +    VPD_CR(vcpu,tpr) = 0x10000;
  34.298 +    VPD_CR(vcpu, eoi) = 0;
  34.299 +    VPD_CR(vcpu, irr[0]) = 0;
  34.300 +    VPD_CR(vcpu, irr[1]) = 0;
  34.301 +    VPD_CR(vcpu, irr[2]) = 0;
  34.302 +    VPD_CR(vcpu, irr[3]) = 0;
  34.303 +    VPD_CR(vcpu, pmv) = 0x10000;
  34.304 +    VPD_CR(vcpu, cmcv) = 0x10000;
  34.305 +    VPD_CR(vcpu, lrr0) = 0x10000;   // default reset value?
  34.306 +    VPD_CR(vcpu, lrr1) = 0x10000;   // default reset value?
  34.307 +    for ( i=0; i<4; i++) {
  34.308 +        VLSAPIC_INSVC(vcpu,i) = 0;
  34.309 +    }
  34.310 +}
  34.311 +
  34.312 +/*
  34.313 + *  Find highest signaled bits in 4 words (long). 
  34.314 + *
  34.315 + *  return 0-255: highest bits.
  34.316 + *          -1 : Not found.
  34.317 + */
  34.318 +static __inline__ int highest_bits(uint64_t *dat)
  34.319 +{
  34.320 +    uint64_t  bits, bitnum=-1;
  34.321 +    int i;
  34.322 +    
  34.323 +    /* loop for all 256 bits */
  34.324 +    for ( i=3; i >= 0 ; i -- ) {
  34.325 +        bits = dat[i];
  34.326 +        if ( bits ) {
  34.327 +            bitnum = ia64_fls(bits);
  34.328 +            return i*64+bitnum;
  34.329 +        }
  34.330 +    }
  34.331 +   return -1;
  34.332 +}
  34.333 +
  34.334 +/*
  34.335 + * Return 0-255 for pending irq.
  34.336 + *        -1 when no pending.
  34.337 + */
  34.338 +static int highest_pending_irq(VCPU *vcpu)
  34.339 +{
  34.340 +    if ( VPD_CR(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
  34.341 +    if ( VPD_CR(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
  34.342 +    return highest_bits(&VPD_CR(vcpu, irr[0]));
  34.343 +}
  34.344 +
  34.345 +static int highest_inservice_irq(VCPU *vcpu)
  34.346 +{
  34.347 +    if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
  34.348 +    if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
  34.349 +    return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
  34.350 +}
  34.351 +
  34.352 +/*
  34.353 + * The pending irq is higher than the inservice one.
  34.354 + *
  34.355 + */
  34.356 +static int is_higher_irq(int pending, int inservice)
  34.357 +{
  34.358 +    return ( (pending >> 4) > (inservice>>4) || 
  34.359 +                ((pending != -1) && (inservice == -1)) );
  34.360 +}
  34.361 +
  34.362 +static int is_higher_class(int pending, int mic)
  34.363 +{
  34.364 +    return ( (pending >> 4) > mic );
  34.365 +}
  34.366 +
  34.367 +static int is_invalid_irq(int vec)
  34.368 +{
  34.369 +    return (vec == 1 || ((vec <= 14 && vec >= 3)));
  34.370 +}
  34.371 +
  34.372 +/* See Table 5-8 in SDM vol2 for the definition */
  34.373 +static int
  34.374 +irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
  34.375 +{
  34.376 +    uint64_t    vtpr;
  34.377 +    
  34.378 +    vtpr = VPD_CR(vcpu, tpr);
  34.379 +
  34.380 +    if ( h_pending == NMI_VECTOR && h_inservice != NMI_VECTOR )
  34.381 +        // Non Maskable Interrupt
  34.382 +        return 0;
  34.383 +
  34.384 +    if ( h_pending == ExtINT_VECTOR && h_inservice >= 16)
  34.385 +        return (vtpr>>16)&1;    // vtpr.mmi
  34.386 +
  34.387 +    if ( !(vtpr&(1UL<<16)) &&
  34.388 +          is_higher_irq(h_pending, h_inservice) &&
  34.389 +          is_higher_class(h_pending, (vtpr>>4)&0xf) )
  34.390 +        return 0;
  34.391 +
  34.392 +    return 1;
  34.393 +}
  34.394 +
  34.395 +void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
  34.396 +{
  34.397 +    uint64_t    spsr;
  34.398 +
  34.399 +    if (vector & ~0xff) {
  34.400 +        printf("vmx_vcpu_pend_interrupt: bad vector\n");
  34.401 +        return;
  34.402 +    }
  34.403 +    local_irq_save(spsr);
  34.404 +    VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
  34.405 +    local_irq_restore(spsr);
  34.406 +}
  34.407 +
  34.408 +/*
  34.409 + * If the new pending interrupt is enabled and not masked, we directly inject 
  34.410 + * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when 
  34.411 + * the interrupt becomes unmasked, it gets injected.
  34.412 + * RETURN:
  34.413 + *  TRUE:   Interrupt is injected.
  34.414 + *  FALSE:  Not injected but may be in VHPI when vac.a_int=1
  34.415 + *
  34.416 + * Optimization: We defer setting the VHPI until the EOI time, if a higher 
  34.417 + *               priority interrupt is in-service. The idea is to reduce the 
  34.418 + *               number of unnecessary calls to inject_vhpi.
  34.419 + */
  34.420 +int vmx_check_pending_irq(VCPU *vcpu)
  34.421 +{
  34.422 +    uint64_t  spsr;
  34.423 +    int     h_pending, h_inservice;
  34.424 +    int injected=0;
  34.425 +    uint64_t    isr;
  34.426 +    IA64_PSR    vpsr;
  34.427 +
  34.428 +    local_irq_save(spsr);
  34.429 +    h_pending = highest_pending_irq(vcpu);
  34.430 +    if ( h_pending == -1 ) goto chk_irq_exit;
  34.431 +    h_inservice = highest_inservice_irq(vcpu);
  34.432 +
  34.433 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  34.434 +    if (  vpsr.i &&
  34.435 +        !irq_masked(vcpu, h_pending, h_inservice) ) {
  34.436 +        //inject_guest_irq(v);
  34.437 +        isr = vpsr.val & IA64_PSR_RI;
  34.438 +        if ( !vpsr.ic )
  34.439 +            panic("Interrupt when IC=0\n");
  34.440 +        vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
  34.441 +        injected = 1;
  34.442 +    }
  34.443 +    else if ( VMX_VPD(vcpu,vac).a_int && 
  34.444 +            is_higher_irq(h_pending,h_inservice) ) {
  34.445 +        vmx_inject_vhpi(vcpu,h_pending);
  34.446 +    }
  34.447 +
  34.448 +chk_irq_exit:
  34.449 +    local_irq_restore(spsr);
  34.450 +    return injected;
  34.451 +}
  34.452 +
  34.453 +void guest_write_eoi(VCPU *vcpu)
  34.454 +{
  34.455 +    int vec;
  34.456 +    uint64_t  spsr;
  34.457 +
  34.458 +    vec = highest_inservice_irq(vcpu);
  34.459 +    if ( vec < 0 ) panic("Wrong vector to EOI\n");
  34.460 +    local_irq_save(spsr);
  34.461 +    VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
  34.462 +    local_irq_restore(spsr);
  34.463 +    VPD_CR(vcpu, eoi)=0;    // overwrite the data
  34.464 +}
  34.465 +
  34.466 +uint64_t guest_read_vivr(VCPU *vcpu)
  34.467 +{
  34.468 +    int vec, next, h_inservice;
  34.469 +    uint64_t  spsr;
  34.470 +
  34.471 +    local_irq_save(spsr);
  34.472 +    vec = highest_pending_irq(vcpu);
  34.473 +    h_inservice = highest_inservice_irq(vcpu);
  34.474 +    if ( vec < 0 || irq_masked(vcpu, vec, h_inservice) ) {
  34.475 +        local_irq_restore(spsr);
  34.476 +        return IA64_SPURIOUS_INT_VECTOR;
  34.477 +    }
  34.478 + 
  34.479 +    VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
  34.480 +    VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
  34.481 +
  34.482 +    h_inservice = highest_inservice_irq(vcpu);
  34.483 +    next = highest_pending_irq(vcpu);
  34.484 +    if ( VMX_VPD(vcpu,vac).a_int &&
  34.485 +        (is_higher_irq(next, h_inservice) || (next == -1)) )
  34.486 +        vmx_inject_vhpi(vcpu, next);
  34.487 +    local_irq_restore(spsr);
  34.488 +    return (uint64_t)vec;
  34.489 +}
  34.490 +
  34.491 +void vmx_inject_vhpi(VCPU *vcpu, u8 vec)
  34.492 +{
  34.493 +        VMX_VPD(vcpu,vhpi) = vec / 16;
  34.494 +
  34.495 +
  34.496 +        // non-maskable
  34.497 +        if ( vec == NMI_VECTOR ) // NMI
  34.498 +                VMX_VPD(vcpu,vhpi) = 32;
  34.499 +        else if (vec == ExtINT_VECTOR) //ExtINT
  34.500 +                VMX_VPD(vcpu,vhpi) = 16;
  34.501 +        else if (vec == -1)
  34.502 +                VMX_VPD(vcpu,vhpi) = 0; /* Nothing pending */
  34.503 +
  34.504 +        ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 
  34.505 +            (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
  34.506 +}
  34.507 +
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/xen/arch/ia64/vmmu.c	Mon May 23 15:29:59 2005 +0000
    35.3 @@ -0,0 +1,801 @@
    35.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    35.5 +/*
    35.6 + * vmmu.c: virtual memory management unit components.
    35.7 + * Copyright (c) 2005, Intel Corporation.
    35.8 + *
    35.9 + * This program is free software; you can redistribute it and/or modify it
   35.10 + * under the terms and conditions of the GNU General Public License,
   35.11 + * version 2, as published by the Free Software Foundation.
   35.12 + *
   35.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   35.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   35.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   35.16 + * more details.
   35.17 + *
   35.18 + * You should have received a copy of the GNU General Public License along with
   35.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   35.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   35.21 + *
   35.22 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   35.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   35.24 + */
   35.25 +#include <linux/sched.h>
   35.26 +#include <asm/tlb.h>
   35.27 +#include <asm/gcc_intrin.h>
   35.28 +#include <asm/vcpu.h>
   35.29 +#include <xen/interrupt.h>
   35.30 +#include <asm/vmx_vcpu.h>
   35.31 +#include <asm/vmx_mm_def.h>
   35.32 +#include <asm/vmx.h>
   35.33 +#include <asm/hw_irq.h>
   35.34 +#include <asm/vmx_pal_vsa.h>
   35.35 +#include <asm/kregs.h>
   35.36 +
   35.37 +/*
   35.38 + * Architecture ppn is in 4KB unit while XEN
   35.39 + * page may be different(1<<PAGE_SHIFT).
   35.40 + */
   35.41 +static inline u64 arch_ppn_to_xen_ppn(u64 appn)
   35.42 +{
   35.43 +    return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
   35.44 +}
   35.45 +
   35.46 +static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
   35.47 +{
   35.48 +    return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
   35.49 +}
   35.50 +
   35.51 +
   35.52 +/*
   35.53 + * Get the machine page frame number in 16KB unit
   35.54 + * Input:
   35.55 + *  d: 
   35.56 + */
   35.57 +u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
   35.58 +{
   35.59 +    struct domain *d;
   35.60 +    u64    i, xen_gppn, xen_mppn, mpfn;
   35.61 +    
   35.62 +    if ( domid == DOMID_SELF ) {
   35.63 +        d = current->domain;
   35.64 +    }
   35.65 +    else {
   35.66 +        d = find_domain_by_id(domid);
   35.67 +    }
   35.68 +    xen_gppn = arch_ppn_to_xen_ppn(gpfn);
   35.69 +    xen_mppn = __gpfn_to_mfn(d, xen_gppn);
   35.70 +/*
   35.71 +    for (i=0; i<pages; i++) {
   35.72 +        if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
   35.73 +            return INVALID_MFN;
   35.74 +        }
   35.75 +    }
   35.76 +*/
   35.77 +    mpfn= xen_ppn_to_arch_ppn(xen_mppn);
   35.78 +    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
   35.79 +    return mpfn;
   35.80 +    
   35.81 +}
   35.82 +
   35.83 +/*
   35.84 + * The VRN bits of va stand for which rr to get.
   35.85 + */
   35.86 +rr_t vmmu_get_rr(VCPU *vcpu, u64 va)
   35.87 +{
   35.88 +    rr_t   vrr;
   35.89 +    vmx_vcpu_get_rr(vcpu, va, &vrr.value);
   35.90 +    return vrr;
   35.91 +}
   35.92 +
   35.93 +
   35.94 +void recycle_message(thash_cb_t *hcb, u64 para)
   35.95 +{
   35.96 +    printk("hcb=%p recycled with %lx\n",hcb,para);
   35.97 +}
   35.98 +
   35.99 +
  35.100 +/*
  35.101 + * Purge all guest TCs in logical processor.
  35.102 + * Instead of purging all LP TCs, we should only purge   
  35.103 + * TCs that belong to this guest.
  35.104 + */
  35.105 +void
  35.106 +purge_machine_tc_by_domid(domid_t domid)
  35.107 +{
  35.108 +#ifndef PURGE_GUEST_TC_ONLY
  35.109 +    // purge all TCs
  35.110 +    struct ia64_pal_retval  result;
  35.111 +    u64 addr;
  35.112 +    u32 count1,count2;
  35.113 +    u32 stride1,stride2;
  35.114 +    u32 i,j;
  35.115 +    u64 psr;
  35.116 +    
  35.117 +
  35.118 +    result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
  35.119 +    if ( result.status != 0 ) {
  35.120 +        panic ("PAL_PTCE_INFO failed\n");
  35.121 +    }
  35.122 +    addr = result.v0;
  35.123 +    count1 = HIGH_32BITS(result.v1);
  35.124 +    count2 = LOW_32BITS (result.v1);
  35.125 +    stride1 = HIGH_32BITS(result.v2);
  35.126 +    stride2 = LOW_32BITS (result.v2);
  35.127 +    
  35.128 +    local_irq_save(psr);
  35.129 +    for (i=0; i<count1; i++) {
  35.130 +        for (j=0; j<count2; j++) {
  35.131 +            ia64_ptce(addr);
  35.132 +            addr += stride2;
  35.133 +        }
  35.134 +        addr += stride1;
  35.135 +    }
  35.136 +    local_irq_restore(psr);
  35.137 +#else
  35.138 +    // purge all TCs belong to this guest.
  35.139 +#endif
  35.140 +}
  35.141 +
  35.142 +static thash_cb_t *init_domain_vhpt(struct exec_domain *d)
  35.143 +{
  35.144 +    struct pfn_info *page;
  35.145 +    void   *vbase,*vcur;
  35.146 +    vhpt_special *vs;
  35.147 +    thash_cb_t  *vhpt;
  35.148 +    PTA pta_value;
  35.149 +    
  35.150 +    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
  35.151 +    if ( page == NULL ) {
  35.152 +        panic("No enough contiguous memory for init_domain_mm\n");
  35.153 +    }
  35.154 +    vbase = page_to_virt(page);
  35.155 +    printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
  35.156 +    memset(vbase, 0, VCPU_TLB_SIZE);
  35.157 +    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
  35.158 +    vhpt = --((thash_cb_t*)vcur);
  35.159 +    vhpt->ht = THASH_VHPT;
  35.160 +    vhpt->vcpu = d;
  35.161 +    vhpt->hash_func = machine_thash;
  35.162 +    vs = --((vhpt_special *)vcur);
  35.163 +
  35.164 +    /* Setup guest pta */
  35.165 +    pta_value.val = 0;
  35.166 +    pta_value.ve = 1;
  35.167 +    pta_value.vf = 1;
  35.168 +    pta_value.size = VCPU_TLB_SHIFT - 1;    /* 2M */
  35.169 +    pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
  35.170 +    d->arch.arch_vmx.mpta = pta_value.val;
  35.171 +   
  35.172 +    vhpt->vs = vs;
  35.173 +    vhpt->vs->get_mfn = get_mfn;
  35.174 +    vhpt->vs->tag_func = machine_ttag;
  35.175 +    vhpt->hash = vbase;
  35.176 +    vhpt->hash_sz = VCPU_TLB_SIZE/2;
  35.177 +    vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
  35.178 +    vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
  35.179 +    vhpt->recycle_notifier = recycle_message;
  35.180 +    thash_init(vhpt,VCPU_TLB_SHIFT-1);
  35.181 +    return vhpt;
  35.182 +}
  35.183 +
  35.184 +
  35.185 +thash_cb_t *init_domain_tlb(struct exec_domain *d)
  35.186 +{
  35.187 +    struct pfn_info *page;
  35.188 +    void    *vbase,*vcur;
  35.189 +    tlb_special_t  *ts;
  35.190 +    thash_cb_t  *tlb;
  35.191 +    
  35.192 +    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
  35.193 +    if ( page == NULL ) {
  35.194 +        panic("No enough contiguous memory for init_domain_mm\n");
  35.195 +    }
  35.196 +    vbase = page_to_virt(page);
  35.197 +    printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
  35.198 +    memset(vbase, 0, VCPU_TLB_SIZE);
  35.199 +    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
  35.200 +    tlb = --((thash_cb_t*)vcur);
  35.201 +    tlb->ht = THASH_TLB;
  35.202 +    tlb->vcpu = d;
  35.203 +    ts = --((tlb_special_t *)vcur);
  35.204 +    tlb->ts = ts;
  35.205 +    tlb->ts->vhpt = init_domain_vhpt(d);
  35.206 +    tlb->hash_func = machine_thash;
  35.207 +    tlb->hash = vbase;
  35.208 +    tlb->hash_sz = VCPU_TLB_SIZE/2;
  35.209 +    tlb->cch_buf = (u64)vbase + tlb->hash_sz;
  35.210 +    tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
  35.211 +    tlb->recycle_notifier = recycle_message;
  35.212 +    thash_init(tlb,VCPU_TLB_SHIFT-1);
  35.213 +    return tlb;
  35.214 +}
  35.215 +
  35.216 +/* Allocate physical to machine mapping table for domN
  35.217 + * FIXME: Later this interface may be removed, if that table is provided
  35.218 + * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
  35.219 + * this interface at all.
  35.220 + */
  35.221 +void
  35.222 +alloc_pmt(struct domain *d)
  35.223 +{
  35.224 +    struct pfn_info *page;
  35.225 +
  35.226 +    /* Only called once */
  35.227 +    ASSERT(d->arch.pmt);
  35.228 +
  35.229 +    page = alloc_domheap_pages(NULL, get_order(d->max_pages));
  35.230 +    ASSERT(page);
  35.231 +
  35.232 +    d->arch.pmt = page_to_virt(page);
  35.233 +    memset(d->arch.pmt, 0x55, d->max_pages * 8);
  35.234 +}
  35.235 +
  35.236 +/*
  35.237 + * Insert guest TLB to machine TLB.
  35.238 + *  data:   In TLB format
  35.239 + */
  35.240 +void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb)
  35.241 +{
  35.242 +    u64     saved_itir, saved_ifa, saved_rr;
  35.243 +    u64     pages;
  35.244 +    thash_data_t    mtlb;
  35.245 +    rr_t    vrr;
  35.246 +    unsigned int    cl = tlb->cl;
  35.247 +
  35.248 +    mtlb.ifa = tlb->vadr;
  35.249 +    mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
  35.250 +    vrr = vmmu_get_rr(d,mtlb.ifa);
  35.251 +    //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
  35.252 +    pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
  35.253 +    mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
  35.254 +    mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages);
  35.255 +    if (mtlb.ppn == INVALID_MFN)
  35.256 +    panic("Machine tlb insert with invalid mfn number.\n");
  35.257 +
  35.258 +    __asm __volatile("rsm   psr.ic|psr.i;; srlz.i" );
  35.259 +    
  35.260 +    saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
  35.261 +    saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
  35.262 +    saved_rr = ia64_get_rr(mtlb.ifa);
  35.263 +
  35.264 +    ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
  35.265 +    ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
  35.266 +    /* Only access memory stack which is mapped by TR,
  35.267 +     * after rr is switched.
  35.268 +     */
  35.269 +    ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value));
  35.270 +    ia64_srlz_d();
  35.271 +    if ( cl == ISIDE_TLB ) {
  35.272 +        ia64_itci(mtlb.page_flags);
  35.273 +    ia64_srlz_i();
  35.274 +    }
  35.275 +    else {
  35.276 +        ia64_itcd(mtlb.page_flags);
  35.277 +    ia64_srlz_d();
  35.278 +    }
  35.279 +    ia64_set_rr(mtlb.ifa,saved_rr);
  35.280 +    ia64_srlz_d();
  35.281 +    ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
  35.282 +    ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
  35.283 +    __asm __volatile("ssm   psr.ic|psr.i;; srlz.i" );
  35.284 +}
  35.285 +
  35.286 +u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
  35.287 +{
  35.288 +    u64     saved_pta, saved_rr0;
  35.289 +    u64     hash_addr, tag;
  35.290 +    unsigned long psr;
  35.291 +    struct exec_domain *ed = current;
  35.292 +    rr_t    vrr;
  35.293 +
  35.294 +    
  35.295 +    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
  35.296 +    saved_rr0 = ia64_get_rr(0);
  35.297 +    vrr.value = saved_rr0;
  35.298 +    vrr.rid = rid;
  35.299 +    vrr.ps = ps;
  35.300 +
  35.301 +    va = (va << 3) >> 3;    // set VRN to 0.
  35.302 +    // TODO: Set to enforce lazy mode
  35.303 +    local_irq_save(psr);
  35.304 +    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
  35.305 +    ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
  35.306 +    ia64_srlz_d();
  35.307 +
  35.308 +    hash_addr = ia64_thash(va);
  35.309 +    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
  35.310 +
  35.311 +    ia64_set_rr(0, saved_rr0);
  35.312 +    ia64_srlz_d();
  35.313 +    local_irq_restore(psr);
  35.314 +    return hash_addr;
  35.315 +}
  35.316 +
  35.317 +u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
  35.318 +{
  35.319 +    u64     saved_pta, saved_rr0;
  35.320 +    u64     hash_addr, tag;
  35.321 +    u64     psr;
  35.322 +    struct exec_domain *ed = current;
  35.323 +    rr_t    vrr;
  35.324 +
  35.325 +    // TODO: Set to enforce lazy mode    
  35.326 +    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
  35.327 +    saved_rr0 = ia64_get_rr(0);
  35.328 +    vrr.value = saved_rr0;
  35.329 +    vrr.rid = rid;
  35.330 +    vrr.ps = ps;
  35.331 +
  35.332 +    va = (va << 3) >> 3;    // set VRN to 0.
  35.333 +    local_irq_save(psr);
  35.334 +    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
  35.335 +    ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
  35.336 +    ia64_srlz_d();
  35.337 +
  35.338 +    tag = ia64_ttag(va);
  35.339 +    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
  35.340 +
  35.341 +    ia64_set_rr(0, saved_rr0);
  35.342 +    ia64_srlz_d();
  35.343 +    local_irq_restore(psr);
  35.344 +    return tag;
  35.345 +}
  35.346 +
  35.347 +/*
  35.348 + *  Purge machine tlb.
  35.349 + *  INPUT
  35.350 + *      rr:     guest rr.
  35.351 + *      va:     only bits 0:60 is valid
  35.352 + *      size:   bits format (1<<size) for the address range to purge.
  35.353 + *
  35.354 + */
  35.355 +void machine_tlb_purge(u64 rid, u64 va, u64 ps)
  35.356 +{
  35.357 +    u64       saved_rr0;
  35.358 +    u64       psr;
  35.359 +    rr_t      vrr;
  35.360 +
  35.361 +    va = (va << 3) >> 3;    // set VRN to 0.
  35.362 +    saved_rr0 = ia64_get_rr(0);
  35.363 +    vrr.value = saved_rr0;
  35.364 +    vrr.rid = rid;
  35.365 +    vrr.ps = ps;
  35.366 +    local_irq_save(psr);
  35.367 +    ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) );
  35.368 +    ia64_srlz_d();
  35.369 +    ia64_ptcl(va, ps << 2);
  35.370 +    ia64_set_rr( 0, saved_rr0 );
  35.371 +    ia64_srlz_d();
  35.372 +    local_irq_restore(psr);
  35.373 +}
  35.374 +
  35.375 +
  35.376 +int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
  35.377 +{
  35.378 +    ia64_rr  vrr;
  35.379 +    PTA   vpta;
  35.380 +    IA64_PSR  vpsr; 
  35.381 +
  35.382 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  35.383 +    vrr = vmx_vcpu_rr(vcpu, vadr);
  35.384 +    vmx_vcpu_get_pta(vcpu,&vpta.val);
  35.385 +
  35.386 +    if ( vrr.ve & vpta.ve ) {
  35.387 +        switch ( ref ) {
  35.388 +        case DATA_REF:
  35.389 +        case NA_REF:
  35.390 +            return vpsr.dt;
  35.391 +        case INST_REF:
  35.392 +            return vpsr.dt && vpsr.it && vpsr.ic;
  35.393 +        case RSE_REF:
  35.394 +            return vpsr.dt && vpsr.rt;
  35.395 +
  35.396 +        }
  35.397 +    }
  35.398 +    return 0;
  35.399 +}
  35.400 +
  35.401 +
  35.402 +int unimplemented_gva(VCPU *vcpu,u64 vadr)
  35.403 +{
  35.404 +    int bit=vcpu->domain->arch.imp_va_msb;
  35.405 +    u64 ladr =(vadr<<3)>>(3+bit);
  35.406 +    if(!ladr||ladr==(1U<<(61-bit))-1){
  35.407 +        return 0;
  35.408 +    }else{
  35.409 +        return 1;
  35.410 +    }
  35.411 +}
  35.412 +
  35.413 +
  35.414 +/*
  35.415 + * Prefetch guest bundle code.
  35.416 + * INPUT:
  35.417 + *  code: buffer pointer to hold the read data.
  35.418 + *  num:  number of dword (8byts) to read.
  35.419 + */
  35.420 +int
  35.421 +fetch_code(VCPU *vcpu, u64 gip, u64 *code)
  35.422 +{
  35.423 +    u64     gpip;   // guest physical IP
  35.424 +    u64     mpa;
  35.425 +    thash_data_t    *tlb;
  35.426 +    rr_t    vrr;
  35.427 +    u64     mfn;
  35.428 +    
  35.429 +    if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
  35.430 +        gpip = gip;
  35.431 +    }
  35.432 +    else {
  35.433 +        vmx_vcpu_get_rr(vcpu, gip, &vrr.value);
  35.434 +        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 
  35.435 +                vrr.rid, gip, ISIDE_TLB );
  35.436 +        if ( tlb == NULL ) panic("No entry found in ITLB\n");
  35.437 +        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
  35.438 +    }
  35.439 +    mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
  35.440 +    if ( mfn == INVALID_MFN ) return 0;
  35.441 +    
  35.442 +    mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
  35.443 +    *code = *(u64*)__va(mpa);
  35.444 +    return 1;
  35.445 +}
  35.446 +
  35.447 +IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  35.448 +{
  35.449 +
  35.450 +    thash_data_t data, *ovl;
  35.451 +    thash_cb_t  *hcb;
  35.452 +    search_section_t sections;
  35.453 +    rr_t    vrr;
  35.454 +
  35.455 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.456 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  35.457 +    data.itir=itir;
  35.458 +    data.vadr=PAGEALIGN(ifa,data.ps);
  35.459 +    data.section=THASH_TLB_TC;
  35.460 +    data.cl=ISIDE_TLB;
  35.461 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  35.462 +    data.rid = vrr.rid;
  35.463 +    
  35.464 +    sections.v = THASH_SECTION_TR;
  35.465 +
  35.466 +    ovl = thash_find_overlap(hcb, &data, sections);
  35.467 +    while (ovl) {
  35.468 +        // generate MCA.
  35.469 +        panic("Tlb conflict!!");
  35.470 +        return;
  35.471 +    }
  35.472 +    sections.v = THASH_SECTION_TC;
  35.473 +    thash_purge_entries(hcb, &data, sections);
  35.474 +    thash_insert(hcb, &data, ifa);
  35.475 +    return IA64_NO_FAULT;
  35.476 +}
  35.477 +
  35.478 +
  35.479 +
  35.480 +
  35.481 +IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  35.482 +{
  35.483 +
  35.484 +    thash_data_t data, *ovl;
  35.485 +    thash_cb_t  *hcb;
  35.486 +    search_section_t sections;
  35.487 +    rr_t    vrr;
  35.488 +
  35.489 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.490 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  35.491 +    data.itir=itir;
  35.492 +    data.vadr=PAGEALIGN(ifa,data.ps);
  35.493 +    data.section=THASH_TLB_TC;
  35.494 +    data.cl=DSIDE_TLB;
  35.495 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  35.496 +    data.rid = vrr.rid;
  35.497 +    sections.v = THASH_SECTION_TR;
  35.498 +
  35.499 +    ovl = thash_find_overlap(hcb, &data, sections);
  35.500 +    if (ovl) {
  35.501 +          // generate MCA.
  35.502 +        panic("Tlb conflict!!");
  35.503 +        return;
  35.504 +    }
  35.505 +    sections.v = THASH_SECTION_TC;
  35.506 +    thash_purge_entries(hcb, &data, sections);
  35.507 +    thash_insert(hcb, &data, ifa);
  35.508 +    return IA64_NO_FAULT;
  35.509 +}
  35.510 +
  35.511 +IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
  35.512 +{
  35.513 +
  35.514 +    thash_data_t data, *ovl;
  35.515 +    thash_cb_t  *hcb;
  35.516 +    search_section_t sections;
  35.517 +    rr_t    vrr;
  35.518 +
  35.519 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.520 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  35.521 +    data.itir=0;
  35.522 +    data.ps = ps;
  35.523 +    data.vadr=PAGEALIGN(va,ps);
  35.524 +    data.section=THASH_TLB_FM;
  35.525 +    data.cl=DSIDE_TLB;
  35.526 +    vmx_vcpu_get_rr(vcpu, va, &vrr);
  35.527 +    data.rid = vrr.rid;
  35.528 +    sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
  35.529 +
  35.530 +    ovl = thash_find_overlap(hcb, &data, sections);
  35.531 +    if (ovl) {
  35.532 +          // generate MCA.
  35.533 +        panic("Foreignmap Tlb conflict!!");
  35.534 +        return;
  35.535 +    }
  35.536 +    thash_insert(hcb, &data, va);
  35.537 +    return IA64_NO_FAULT;
  35.538 +}
  35.539 +
  35.540 +
  35.541 +IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
  35.542 +{
  35.543 +
  35.544 +    thash_data_t data, *ovl;
  35.545 +    thash_cb_t  *hcb;
  35.546 +    search_section_t sections;
  35.547 +    rr_t    vrr;
  35.548 +
  35.549 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.550 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  35.551 +    data.itir=itir;
  35.552 +    data.vadr=PAGEALIGN(ifa,data.ps);
  35.553 +    data.section=THASH_TLB_TR;
  35.554 +    data.cl=ISIDE_TLB;
  35.555 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  35.556 +    data.rid = vrr.rid;
  35.557 +    sections.v = THASH_SECTION_TR;
  35.558 +
  35.559 +    ovl = thash_find_overlap(hcb, &data, sections);
  35.560 +    if (ovl) {
  35.561 +        // generate MCA.
  35.562 +        panic("Tlb conflict!!");
  35.563 +        return;
  35.564 +    }
  35.565 +    sections.v=THASH_SECTION_TC;
  35.566 +    thash_purge_entries(hcb, &data, sections);
  35.567 +    thash_tr_insert(hcb, &data, ifa, idx);
  35.568 +    return IA64_NO_FAULT;
  35.569 +}
  35.570 +
  35.571 +IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
  35.572 +{
  35.573 +
  35.574 +    thash_data_t data, *ovl;
  35.575 +    thash_cb_t  *hcb;
  35.576 +    search_section_t sections;
  35.577 +    rr_t    vrr;
  35.578 +
  35.579 +
  35.580 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.581 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  35.582 +    data.itir=itir;
  35.583 +    data.vadr=PAGEALIGN(ifa,data.ps);
  35.584 +    data.section=THASH_TLB_TR;
  35.585 +    data.cl=DSIDE_TLB;
  35.586 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  35.587 +    data.rid = vrr.rid;
  35.588 +    sections.v = THASH_SECTION_TR;
  35.589 +
  35.590 +    ovl = thash_find_overlap(hcb, &data, sections);
  35.591 +    while (ovl) {
  35.592 +        // generate MCA.
  35.593 +        panic("Tlb conflict!!");
  35.594 +        return;
  35.595 +    }
  35.596 +    sections.v=THASH_SECTION_TC;
  35.597 +    thash_purge_entries(hcb, &data, sections);
  35.598 +    thash_tr_insert(hcb, &data, ifa, idx);
  35.599 +    return IA64_NO_FAULT;
  35.600 +}
  35.601 +
  35.602 +
  35.603 +
  35.604 +IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
  35.605 +{
  35.606 +    thash_cb_t  *hcb;
  35.607 +    ia64_rr rr;
  35.608 +    search_section_t sections;
  35.609 +
  35.610 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.611 +    rr=vmx_vcpu_rr(vcpu,vadr);
  35.612 +    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
  35.613 +    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
  35.614 +    return IA64_NO_FAULT;
  35.615 +}
  35.616 +
  35.617 +IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
  35.618 +{
  35.619 +    thash_cb_t  *hcb;
  35.620 +    ia64_rr rr;
  35.621 +    search_section_t sections;
  35.622 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.623 +    rr=vmx_vcpu_rr(vcpu,vadr);
  35.624 +    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
  35.625 +    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
  35.626 +    return IA64_NO_FAULT;
  35.627 +}
  35.628 +
  35.629 +IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
  35.630 +{
  35.631 +    thash_cb_t  *hcb;
  35.632 +    ia64_rr vrr;
  35.633 +    search_section_t sections;
  35.634 +    thash_data_t data, *ovl;
  35.635 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.636 +    vrr=vmx_vcpu_rr(vcpu,vadr);
  35.637 +    sections.v = THASH_SECTION_TC;
  35.638 +    vadr = PAGEALIGN(vadr, ps);
  35.639 +
  35.640 +    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
  35.641 +    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB);
  35.642 +    return IA64_NO_FAULT;
  35.643 +}
  35.644 +
  35.645 +
  35.646 +IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
  35.647 +{
  35.648 +    thash_cb_t  *hcb;
  35.649 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.650 +    thash_purge_all(hcb);
  35.651 +    return IA64_NO_FAULT;
  35.652 +}
  35.653 +
  35.654 +IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps)
  35.655 +{
  35.656 +    vmx_vcpu_ptc_l(vcpu, vadr, ps);
  35.657 +    return IA64_ILLOP_FAULT;
  35.658 +}
  35.659 +
  35.660 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps)
  35.661 +{
  35.662 +    vmx_vcpu_ptc_l(vcpu, vadr, ps);
  35.663 +    return IA64_NO_FAULT;
  35.664 +}
  35.665 +
  35.666 +
  35.667 +IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  35.668 +{
  35.669 +    PTA vpta;
  35.670 +    ia64_rr vrr;
  35.671 +    u64 vhpt_offset,tmp;
  35.672 +    vmx_vcpu_get_pta(vcpu, &vpta.val);
  35.673 +    vrr=vmx_vcpu_rr(vcpu, vadr);
  35.674 +    if(vpta.vf){
  35.675 +        panic("THASH,Don't support long format VHPT");
  35.676 +        *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
  35.677 +    }else{
  35.678 +        vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
  35.679 +        *pval = (vadr&VRN_MASK)|
  35.680 +            (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
  35.681 +            vhpt_offset;
  35.682 +    }
  35.683 +    return  IA64_NO_FAULT;
  35.684 +}
  35.685 +
  35.686 +
  35.687 +IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  35.688 +{
  35.689 +    ia64_rr vrr;
  35.690 +    PTA vpta;
  35.691 +    vmx_vcpu_get_pta(vcpu, &vpta.val);
  35.692 +    vrr=vmx_vcpu_rr(vcpu, vadr);
  35.693 +    if(vpta.vf){
  35.694 +        panic("THASH,Don't support long format VHPT");
  35.695 +        *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
  35.696 +    }else{
  35.697 +        *pval = 1;
  35.698 +    }
  35.699 +    return  IA64_NO_FAULT;
  35.700 +}
  35.701 +
  35.702 +
  35.703 +
  35.704 +IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  35.705 +{
  35.706 +    thash_data_t *data;
  35.707 +    thash_cb_t  *hcb;
  35.708 +    ia64_rr vrr;
  35.709 +    ISR visr,pt_isr;
  35.710 +    REGS *regs;
  35.711 +    u64 vhpt_adr;
  35.712 +    IA64_PSR vpsr;
  35.713 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.714 +    vrr=vmx_vcpu_rr(vcpu,vadr);
  35.715 +    regs=vcpu_regs(vcpu);
  35.716 +    pt_isr.val=regs->cr_isr;
  35.717 +    visr.val=0;
  35.718 +    visr.ei=pt_isr.ei;
  35.719 +    visr.ir=pt_isr.ir;
  35.720 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  35.721 +    if(vpsr.ic==0){
  35.722 +         visr.ni=1;
  35.723 +    }
  35.724 +    visr.na=1;
  35.725 +    data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB);
  35.726 +    if(data){
  35.727 +        if(data->p==0){
  35.728 +            visr.na=1;
  35.729 +            vmx_vcpu_set_isr(vcpu,visr.val);
  35.730 +            page_not_present(vcpu, vadr);
  35.731 +            return IA64_FAULT;
  35.732 +        }else if(data->ma == VA_MATTR_NATPAGE){
  35.733 +            visr.na = 1;
  35.734 +            vmx_vcpu_set_isr(vcpu, visr.val);
  35.735 +            dnat_page_consumption(vcpu, vadr);
  35.736 +            return IA64_FAULT;
  35.737 +        }else{
  35.738 +            *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
  35.739 +            return IA64_NO_FAULT;
  35.740 +        }
  35.741 +    }else{
  35.742 +        if(!vhpt_enabled(vcpu, vadr, NA_REF)){
  35.743 +            if(vpsr.ic){
  35.744 +                vmx_vcpu_set_isr(vcpu, visr.val);
  35.745 +                alt_dtlb(vcpu, vadr);
  35.746 +                return IA64_FAULT;
  35.747 +            }
  35.748 +            else{
  35.749 +                nested_dtlb(vcpu);
  35.750 +                return IA64_FAULT;
  35.751 +            }
  35.752 +        }
  35.753 +        else{
  35.754 +            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  35.755 +            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  35.756 +            data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
  35.757 +            if(data){
  35.758 +                if(vpsr.ic){
  35.759 +                    vmx_vcpu_set_isr(vcpu, visr.val);
  35.760 +                    dtlb_fault(vcpu, vadr);
  35.761 +                    return IA64_FAULT;
  35.762 +                }
  35.763 +                else{
  35.764 +                    nested_dtlb(vcpu);
  35.765 +                    return IA64_FAULT;
  35.766 +                }
  35.767 +            }
  35.768 +            else{
  35.769 +                if(vpsr.ic){
  35.770 +                    vmx_vcpu_set_isr(vcpu, visr.val);
  35.771 +                    dvhpt_fault(vcpu, vadr);
  35.772 +                    return IA64_FAULT;
  35.773 +                }
  35.774 +                else{
  35.775 +                    nested_dtlb(vcpu);
  35.776 +                    return IA64_FAULT;
  35.777 +                }
  35.778 +            }
  35.779 +        }
  35.780 +    }
  35.781 +}
  35.782 +
  35.783 +IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
  35.784 +{
  35.785 +    thash_data_t *data;
  35.786 +    thash_cb_t  *hcb;
  35.787 +    ia64_rr rr;
  35.788 +    PTA vpta;
  35.789 +    vmx_vcpu_get_pta(vcpu, &vpta.val);
  35.790 +    if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
  35.791 +        *key=1;
  35.792 +        return IA64_NO_FAULT;
  35.793 +    }
  35.794 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  35.795 +    rr=vmx_vcpu_rr(vcpu,vadr);
  35.796 +    data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB);
  35.797 +    if(!data||!data->p){
  35.798 +        *key=1;
  35.799 +    }else{
  35.800 +        *key=data->key;
  35.801 +    }
  35.802 +    return IA64_NO_FAULT;
  35.803 +}
  35.804 +
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/xen/arch/ia64/vmx_entry.S	Mon May 23 15:29:59 2005 +0000
    36.3 @@ -0,0 +1,611 @@
    36.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    36.5 +/*
    36.6 + * vmx_entry.S:
    36.7 + * Copyright (c) 2005, Intel Corporation.
    36.8 + *
    36.9 + * This program is free software; you can redistribute it and/or modify it
   36.10 + * under the terms and conditions of the GNU General Public License,
   36.11 + * version 2, as published by the Free Software Foundation.
   36.12 + *
   36.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   36.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   36.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   36.16 + * more details.
   36.17 + *
   36.18 + * You should have received a copy of the GNU General Public License along with
   36.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   36.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   36.21 + *
   36.22 + *  Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
   36.23 + *  Kun Tian (Kevin Tian) (kevin.tian@intel.com)
   36.24 + */
   36.25 +
   36.26 +#ifndef VCPU_TLB_SHIFT
   36.27 +#define VCPU_TLB_SHIFT	22
   36.28 +#endif
   36.29 +#include <linux/config.h>
   36.30 +#include <asm/asmmacro.h>
   36.31 +#include <asm/cache.h>
   36.32 +#include <asm/kregs.h>
   36.33 +#include <asm/offsets.h>
   36.34 +#include <asm/pgtable.h>
   36.35 +#include <asm/percpu.h>
   36.36 +#include <asm/processor.h>
   36.37 +#include <asm/thread_info.h>
   36.38 +#include <asm/unistd.h>
   36.39 +
   36.40 +#include "vmx_minstate.h"
   36.41 +
   36.42 +/*
   36.43 + * prev_task <- vmx_ia64_switch_to(struct task_struct *next)
   36.44 + *	With Ingo's new scheduler, interrupts are disabled when this routine gets
   36.45 + *	called.  The code starting at .map relies on this.  The rest of the code
   36.46 + *	doesn't care about the interrupt masking status.
   36.47 + *
   36.48 + * Since we allocate domain stack in xenheap, there's no need to map new
   36.49 + * domain's stack since all xenheap is mapped by TR. Another different task
   36.50 + * for vmx_ia64_switch_to is to switch to bank0 and change current pointer.
   36.51 + */
   36.52 +GLOBAL_ENTRY(vmx_ia64_switch_to)
   36.53 +	.prologue
   36.54 +	alloc r16=ar.pfs,1,0,0,0
   36.55 +	DO_SAVE_SWITCH_STACK
   36.56 +	.body
   36.57 +
   36.58 +	bsw.0	// Switch to bank0, because bank0 r21 is current pointer
   36.59 +	;;
   36.60 +	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
   36.61 +	movl r25=init_task
   36.62 +	adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0
   36.63 +	;;
   36.64 +	st8 [r22]=sp			// save kernel stack pointer of old task
   36.65 +	;;
   36.66 +	/*
   36.67 +	 * TR always mapped this task's page, we can skip doing it again.
   36.68 +	 */
   36.69 +	ld8 sp=[r26]			// load kernel stack pointer of new task
   36.70 +	mov r21=in0			// update "current" application register
   36.71 +	mov r8=r13			// return pointer to previously running task
   36.72 +	mov r13=in0			// set "current" pointer
   36.73 +	;;
   36.74 +	bsw.1
   36.75 +	;;
   36.76 +	DO_LOAD_SWITCH_STACK
   36.77 +
   36.78 +#ifdef CONFIG_SMP
   36.79 +	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
   36.80 +#endif
   36.81 +	br.ret.sptk.many rp		// boogie on out in new context
   36.82 +END(vmx_ia64_switch_to)
   36.83 +
   36.84 +GLOBAL_ENTRY(ia64_leave_nested)
   36.85 +	rsm psr.i
   36.86 +	;;
   36.87 +	adds r21=PT(PR)+16,r12
   36.88 +	;;
   36.89 +
   36.90 +	lfetch [r21],PT(CR_IPSR)-PT(PR)
   36.91 +	adds r2=PT(B6)+16,r12
   36.92 +	adds r3=PT(R16)+16,r12
   36.93 +	;;
   36.94 +	lfetch [r21]
   36.95 +	ld8 r28=[r2],8		// load b6
   36.96 +	adds r29=PT(R24)+16,r12
   36.97 +
   36.98 +	ld8.fill r16=[r3]
   36.99 +	adds r3=PT(AR_CSD)-PT(R16),r3
  36.100 +	adds r30=PT(AR_CCV)+16,r12
  36.101 +	;;
  36.102 +	ld8.fill r24=[r29]
  36.103 +	ld8 r15=[r30]		// load ar.ccv
  36.104 +	;;
  36.105 +	ld8 r29=[r2],16		// load b7
  36.106 +	ld8 r30=[r3],16		// load ar.csd
  36.107 +	;;
  36.108 +	ld8 r31=[r2],16		// load ar.ssd
  36.109 +	ld8.fill r8=[r3],16
  36.110 +	;;
  36.111 +	ld8.fill r9=[r2],16
  36.112 +	ld8.fill r10=[r3],PT(R17)-PT(R10)
  36.113 +	;;
  36.114 +	ld8.fill r11=[r2],PT(R18)-PT(R11)
  36.115 +	ld8.fill r17=[r3],16
  36.116 +	;;
  36.117 +	ld8.fill r18=[r2],16
  36.118 +	ld8.fill r19=[r3],16
  36.119 +	;;
  36.120 +	ld8.fill r20=[r2],16
  36.121 +	ld8.fill r21=[r3],16
  36.122 +	mov ar.csd=r30
  36.123 +	mov ar.ssd=r31
  36.124 +	;;
  36.125 +	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
  36.126 +	invala			// invalidate ALAT
  36.127 +	;;
  36.128 +	ld8.fill r22=[r2],24
  36.129 +	ld8.fill r23=[r3],24
  36.130 +	mov b6=r28
  36.131 +	;;
  36.132 +	ld8.fill r25=[r2],16
  36.133 +	ld8.fill r26=[r3],16
  36.134 +	mov b7=r29
  36.135 +	;;
  36.136 +	ld8.fill r27=[r2],16
  36.137 +	ld8.fill r28=[r3],16
  36.138 +	;;
  36.139 +	ld8.fill r29=[r2],16
  36.140 +	ld8.fill r30=[r3],24
  36.141 +	;;
  36.142 +	ld8.fill r31=[r2],PT(F9)-PT(R31)
  36.143 +	adds r3=PT(F10)-PT(F6),r3
  36.144 +	;;
  36.145 +	ldf.fill f9=[r2],PT(F6)-PT(F9)
  36.146 +	ldf.fill f10=[r3],PT(F8)-PT(F10)
  36.147 +	;;
  36.148 +	ldf.fill f6=[r2],PT(F7)-PT(F6)
  36.149 +	;;
  36.150 +	ldf.fill f7=[r2],PT(F11)-PT(F7)
  36.151 +	ldf.fill f8=[r3],32
  36.152 +	;;
  36.153 +	srlz.i			// ensure interruption collection is off
  36.154 +	mov ar.ccv=r15
  36.155 +	;;
  36.156 +	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
  36.157 +	;;
  36.158 +	ldf.fill f11=[r2]
  36.159 +//	mov r18=r13
  36.160 +//    mov r21=r13
  36.161 +	adds r16=PT(CR_IPSR)+16,r12
  36.162 +	adds r17=PT(CR_IIP)+16,r12
  36.163 +	;;
  36.164 +	ld8 r29=[r16],16	// load cr.ipsr
  36.165 +	ld8 r28=[r17],16	// load cr.iip
  36.166 +	;;
  36.167 +	ld8 r30=[r16],16	// load cr.ifs
  36.168 +	ld8 r25=[r17],16	// load ar.unat
  36.169 +	;;
  36.170 +	ld8 r26=[r16],16	// load ar.pfs
  36.171 +	ld8 r27=[r17],16	// load ar.rsc
  36.172 +	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
  36.173 +	;;
  36.174 +	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
  36.175 +	ld8 r23=[r17],16// load ar.bspstore (may be garbage)
  36.176 +	;;
  36.177 +	ld8 r31=[r16],16	// load predicates
  36.178 +	ld8 r22=[r17],16	// load b0
  36.179 +	;;
  36.180 +	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
  36.181 +	ld8.fill r1=[r17],16	// load r1
  36.182 +	;;
  36.183 +	ld8.fill r12=[r16],16
  36.184 +	ld8.fill r13=[r17],16
  36.185 +	;;
  36.186 +	ld8 r20=[r16],16	// ar.fpsr
  36.187 +	ld8.fill r15=[r17],16
  36.188 +	;;
  36.189 +	ld8.fill r14=[r16],16
  36.190 +	ld8.fill r2=[r17]
  36.191 +	;;
  36.192 +	ld8.fill r3=[r16]
  36.193 +	;;
  36.194 +	mov r16=ar.bsp		// get existing backing store pointer
  36.195 +	;;
  36.196 +	mov b0=r22
  36.197 +	mov ar.pfs=r26
  36.198 +	mov cr.ifs=r30
  36.199 +	mov cr.ipsr=r29
  36.200 +	mov ar.fpsr=r20
  36.201 +	mov cr.iip=r28
  36.202 +	;;
  36.203 +	mov ar.rsc=r27
  36.204 +	mov ar.unat=r25
  36.205 +	mov pr=r31,-1
  36.206 +	rfi
  36.207 +END(ia64_leave_nested)
  36.208 +
  36.209 +
  36.210 +
  36.211 +GLOBAL_ENTRY(ia64_leave_hypervisor)
  36.212 +    PT_REGS_UNWIND_INFO(0)
  36.213 +    /*
  36.214 +     * work.need_resched etc. mustn't get changed by this CPU before it returns to
  36.215 +    ;;
  36.216 +     * user- or fsys-mode, hence we disable interrupts early on:
  36.217 +     */
  36.218 +    rsm psr.i
  36.219 +    ;;
  36.220 +    alloc loc0=ar.pfs,0,1,1,0
  36.221 +    adds out0=16,r12
  36.222 +    ;;
  36.223 +    br.call.sptk.many b0=vmx_deliver_pending_interrupt
  36.224 +    mov ar.pfs=loc0
  36.225 +    adds r8=IA64_VPD_BASE_OFFSET,r13
  36.226 +    ;;
  36.227 +    ld8 r8=[r8]
  36.228 +    ;;
  36.229 +    adds r9=VPD(VPSR),r8
  36.230 +    ;;
  36.231 +    ld8 r9=[r9]
  36.232 +    ;;
  36.233 +    tbit.z pBN0,pBN1=r9,IA64_PSR_BN_BIT
  36.234 +    ;;
  36.235 +(pBN0) add r7=VPD(VBNAT),r8;
  36.236 +(pBN1) add r7=VPD(VNAT),r8;
  36.237 +    ;;
  36.238 +    ld8 r7=[r7]
  36.239 +    ;;
  36.240 +    mov ar.unat=r7
  36.241 +(pBN0) add r4=VPD(VBGR),r8;
  36.242 +(pBN1) add r4=VPD(VGR),r8;
  36.243 +(pBN0) add r5=VPD(VBGR)+0x8,r8;
  36.244 +(pBN1) add r5=VPD(VGR)+0x8,r8;
  36.245 +    ;;
  36.246 +    ld8.fill r16=[r4],16
  36.247 +    ld8.fill r17=[r5],16
  36.248 +    ;;
  36.249 +    ld8.fill r18=[r4],16
  36.250 +    ld8.fill r19=[r5],16
  36.251 +    ;;
  36.252 +    ld8.fill r20=[r4],16
  36.253 +    ld8.fill r21=[r5],16
  36.254 +    ;;
  36.255 +    ld8.fill r22=[r4],16
  36.256 +    ld8.fill r23=[r5],16
  36.257 +    ;;
  36.258 +    ld8.fill r24=[r4],16
  36.259 +    ld8.fill r25=[r5],16
  36.260 +    ;;
  36.261 +    ld8.fill r26=[r4],16
  36.262 +    ld8.fill r27=[r5],16
  36.263 +    ;;
  36.264 +    ld8.fill r28=[r4],16
  36.265 +    ld8.fill r29=[r5],16
  36.266 +    ;;
  36.267 +    ld8.fill r30=[r4],16
  36.268 +    ld8.fill r31=[r5],16
  36.269 +    ;;
  36.270 +    bsw.0
  36.271 +    ;;
  36.272 +    mov r18=r8      //vpd
  36.273 +    mov r19=r9      //vpsr
  36.274 +    adds r20=PT(PR)+16,r12
  36.275 +    ;;
  36.276 +    lfetch [r20],PT(CR_IPSR)-PT(PR)
  36.277 +    adds r16=PT(B6)+16,r12
  36.278 +    adds r17=PT(B7)+16,r12
  36.279 +    ;;
  36.280 +    lfetch [r20]
  36.281 +    mov r21=r13		// get current
  36.282 +    ;;
  36.283 +    ld8 r30=[r16],16      // load b6
  36.284 +    ld8 r31=[r17],16      // load b7
  36.285 +    add r20=PT(EML_UNAT)+16,r12
  36.286 +    ;;
  36.287 +    ld8 r29=[r20]       //load ar_unat
  36.288 +    mov b6=r30
  36.289 +    mov b7=r31
  36.290 +    ld8 r30=[r16],16    //load ar_csd
  36.291 +    ld8 r31=[r17],16    //load ar_ssd
  36.292 +    ;;
  36.293 +    mov ar.unat=r29
  36.294 +    mov ar.csd=r30
  36.295 +    mov ar.ssd=r31
  36.296 +    ;;
  36.297 +    ld8.fill r8=[r16],16    //load r8
  36.298 +    ld8.fill r9=[r17],16    //load r9
  36.299 +    ;;
  36.300 +    ld8.fill r10=[r16],PT(R1)-PT(R10)    //load r10
  36.301 +    ld8.fill r11=[r17],PT(R12)-PT(R11)    //load r11
  36.302 +    ;;
  36.303 +    ld8.fill r1=[r16],16    //load r1
  36.304 +    ld8.fill r12=[r17],16    //load r12
  36.305 +    ;;
  36.306 +    ld8.fill r13=[r16],16    //load r13
  36.307 +    ld8 r30=[r17],16    //load ar_fpsr
  36.308 +    ;;
  36.309 +    ld8.fill r15=[r16],16    //load r15
  36.310 +    ld8.fill r14=[r17],16    //load r14
  36.311 +    mov ar.fpsr=r30
  36.312 +    ;;
  36.313 +    ld8.fill r2=[r16],16    //load r2
  36.314 +    ld8.fill r3=[r17],16    //load r3
  36.315 +    ;;
  36.316 +/*
  36.317 +(pEml) ld8.fill r4=[r16],16    //load r4
  36.318 +(pEml) ld8.fill r5=[r17],16    //load r5
  36.319 +    ;;
  36.320 +(pEml) ld8.fill r6=[r16],PT(AR_CCV)-PT(R6)   //load r6
  36.321 +(pEml) ld8.fill r7=[r17],PT(F7)-PT(R7)   //load r7
  36.322 +    ;;
  36.323 +(pNonEml) adds r16=PT(AR_CCV)-PT(R4),r16
  36.324 +(pNonEml) adds r17=PT(F7)-PT(R5),r17
  36.325 +    ;;
  36.326 +*/
  36.327 +    ld8.fill r4=[r16],16    //load r4
  36.328 +    ld8.fill r5=[r17],16    //load r5
  36.329 +     ;;
  36.330 +    ld8.fill r6=[r16],PT(AR_CCV)-PT(R6)   //load r6
  36.331 +    ld8.fill r7=[r17],PT(F7)-PT(R7)   //load r7
  36.332 +    ;;
  36.333 +
  36.334 +    ld8 r30=[r16],PT(F6)-PT(AR_CCV)
  36.335 +    rsm psr.i | psr.ic  // initiate turning off of interrupt and interruption collection
  36.336 +    ;;
  36.337 +    srlz.i          // ensure interruption collection is off
  36.338 +    ;;
  36.339 +    invala          // invalidate ALAT
  36.340 +    ;;
  36.341 +    ldf.fill f6=[r16],32
  36.342 +    ldf.fill f7=[r17],32
  36.343 +    ;;
  36.344 +    ldf.fill f8=[r16],32
  36.345 +    ldf.fill f9=[r17],32
  36.346 +    ;;
  36.347 +    ldf.fill f10=[r16]
  36.348 +    ldf.fill f11=[r17]
  36.349 +    ;;
  36.350 +    mov ar.ccv=r30
  36.351 +    adds r16=PT(CR_IPSR)-PT(F10),r16
  36.352 +    adds r17=PT(CR_IIP)-PT(F11),r17
  36.353 +    ;;
  36.354 +    ld8 r31=[r16],16    // load cr.ipsr
  36.355 +    ld8 r30=[r17],16    // load cr.iip
  36.356 +    ;;
  36.357 +    ld8 r29=[r16],16    // load cr.ifs
  36.358 +    ld8 r28=[r17],16    // load ar.unat
  36.359 +    ;;
  36.360 +    ld8 r27=[r16],16    // load ar.pfs
  36.361 +    ld8 r26=[r17],16    // load ar.rsc
  36.362 +    ;;
  36.363 +    ld8 r25=[r16],16    // load ar.rnat (may be garbage)
  36.364 +    ld8 r24=[r17],16// load ar.bspstore (may be garbage)
  36.365 +    ;;
  36.366 +    ld8 r23=[r16],16    // load predicates
  36.367 +    ld8 r22=[r17],PT(RFI_PFS)-PT(B0)    // load b0
  36.368 +    ;;
  36.369 +    ld8 r20=[r16],16    // load ar.rsc value for "loadrs"
  36.370 +    ;;
  36.371 +//rbs_switch
  36.372 +    // loadrs has already been shifted
  36.373 +    alloc r16=ar.pfs,0,0,0,0    // drop current register frame
  36.374 +    ;;
  36.375 +    mov ar.rsc=r20
  36.376 +    ;;
  36.377 +    loadrs
  36.378 +    ;;
  36.379 +    mov ar.bspstore=r24
  36.380 +    ;;
  36.381 +    ld8 r24=[r17]       //load rfi_pfs
  36.382 +    mov ar.unat=r28
  36.383 +    mov ar.rnat=r25
  36.384 +    mov ar.rsc=r26
  36.385 +    ;;
  36.386 +    mov cr.ipsr=r31
  36.387 +    mov cr.iip=r30
  36.388 +    mov cr.ifs=r29
  36.389 +    cmp.ne p6,p0=r24,r0
  36.390 +(p6)br.sptk vmx_dorfirfi
  36.391 +    ;;
  36.392 +vmx_dorfirfi_back:
  36.393 +    mov ar.pfs=r27
  36.394 +
  36.395 +//vsa_sync_write_start
  36.396 +    movl r20=__vsa_base
  36.397 +    ;;
  36.398 +    ld8 r20=[r20]       // read entry point
  36.399 +    mov r25=r18
  36.400 +    ;;
  36.401 +    add r16=PAL_VPS_SYNC_WRITE,r20
  36.402 +    movl r24=switch_rr7  // calculate return address
  36.403 +    ;;
  36.404 +    mov b0=r16
  36.405 +    br.cond.sptk b0         // call the service
  36.406 +    ;;
  36.407 +// switch rr7 and rr5
  36.408 +switch_rr7:
  36.409 +    adds r24=SWITCH_MRR5_OFFSET, r21
  36.410 +    adds r26=SWITCH_MRR6_OFFSET, r21
  36.411 +    adds r16=SWITCH_MRR7_OFFSET ,r21
  36.412 +    movl r25=(5<<61)
  36.413 +    movl r27=(6<<61)
  36.414 +    movl r17=(7<<61)
  36.415 +    ;;
  36.416 +    ld8 r24=[r24]
  36.417 +    ld8 r26=[r26]
  36.418 +    ld8 r16=[r16]
  36.419 +    ;;
  36.420 +    mov rr[r25]=r24
  36.421 +    mov rr[r27]=r26
  36.422 +    mov rr[r17]=r16
  36.423 +    ;;
  36.424 +    srlz.i
  36.425 +    ;;
  36.426 +    add r24=SWITCH_MPTA_OFFSET, r21
  36.427 +    ;;
  36.428 +    ld8 r24=[r24]
  36.429 +    ;;
  36.430 +    mov cr.pta=r24
  36.431 +    ;;
  36.432 +    srlz.i
  36.433 +    ;;
  36.434 +// fall through
  36.435 +GLOBAL_ENTRY(ia64_vmm_entry)
  36.436 +/*
  36.437 + *  must be at bank 0
  36.438 + *  parameter:
  36.439 + *  r18:vpd
  36.440 + *  r19:vpsr
  36.441 + *  r20:__vsa_base
  36.442 + *  r22:b0
  36.443 + *  r23:predicate
  36.444 + */
  36.445 +    mov r24=r22
  36.446 +    mov r25=r18
  36.447 +    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
  36.448 +    ;;
  36.449 +    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
  36.450 +    (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
  36.451 +    ;;
  36.452 +    mov pr=r23,-2
  36.453 +    mov b0=r29
  36.454 +    ;;
  36.455 +    br.cond.sptk b0             // call pal service
  36.456 +END(ia64_leave_hypervisor)
  36.457 +
  36.458 +//r24 rfi_pfs
  36.459 +//r17 address of rfi_pfs
  36.460 +GLOBAL_ENTRY(vmx_dorfirfi)
  36.461 +    mov r16=ar.ec
  36.462 +    movl r20 = vmx_dorfirfi_back
  36.463 +	;;
  36.464 +// clean rfi_pfs
  36.465 +    st8 [r17]=r0
  36.466 +    mov b0=r20
  36.467 +// pfs.pec=ar.ec
  36.468 +    dep r24 = r16, r24, 52, 6
  36.469 +    ;;
  36.470 +    mov ar.pfs=r24
  36.471 +	;;
  36.472 +    br.ret.sptk b0
  36.473 +	;;
  36.474 +END(vmx_dorfirfi)
  36.475 +
  36.476 +
  36.477 +#define VMX_PURGE_RR7	0
  36.478 +#define VMX_INSERT_RR7	1
  36.479 +/*
  36.480 + * in0: old rr7
  36.481 + * in1: virtual address of xen image
  36.482 + * in2: virtual address of vhpt table
  36.483 + */
  36.484 +GLOBAL_ENTRY(vmx_purge_double_mapping)
  36.485 +    alloc loc1 = ar.pfs,5,9,0,0
  36.486 +    mov loc0 = rp
  36.487 +    movl r8 = 1f
  36.488 +    ;;
  36.489 +    movl loc4 = KERNEL_TR_PAGE_SHIFT
  36.490 +    movl loc5 = VCPU_TLB_SHIFT
  36.491 +    mov loc6 = psr
  36.492 +    movl loc7 = XEN_RR7_SWITCH_STUB
  36.493 +    mov loc8 = (1<<VMX_PURGE_RR7)
  36.494 +    ;;
  36.495 +    srlz.i
  36.496 +    ;;
  36.497 +    rsm psr.i | psr.ic
  36.498 +    ;;
  36.499 +    srlz.i
  36.500 +    ;;
  36.501 +    mov ar.rsc = 0
  36.502 +    mov b6 = loc7
  36.503 +    mov rp = r8
  36.504 +    ;;
  36.505 +    br.sptk b6
  36.506 +1:
  36.507 +    mov ar.rsc = 3
  36.508 +    mov rp = loc0
  36.509 +    ;;
  36.510 +    mov psr.l = loc6
  36.511 +    ;;
  36.512 +    srlz.i
  36.513 +    ;;
  36.514 +    br.ret.sptk rp
  36.515 +END(vmx_purge_double_mapping)
  36.516 +
  36.517 +/*
  36.518 + * in0: new rr7
  36.519 + * in1: virtual address of xen image
  36.520 + * in2: virtual address of vhpt table
  36.521 + * in3: pte entry of xen image
  36.522 + * in4: pte entry of vhpt table
  36.523 + */
  36.524 +GLOBAL_ENTRY(vmx_insert_double_mapping)
  36.525 +    alloc loc1 = ar.pfs,5,9,0,0
  36.526 +    mov loc0 = rp
  36.527 +    movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
  36.528 +    ;;
  36.529 +    movl loc3 = IA64_TR_VHPT_IN_DOM	// TR number for vhpt table
  36.530 +    movl r8 = 1f
  36.531 +    movl loc4 = KERNEL_TR_PAGE_SHIFT
  36.532 +    ;;
  36.533 +    movl loc5 = VCPU_TLB_SHIFT
  36.534 +    mov loc6 = psr
  36.535 +    movl loc7 = XEN_RR7_SWITCH_STUB
  36.536 +    ;;
  36.537 +    srlz.i
  36.538 +    ;;
  36.539 +    rsm psr.i | psr.ic
  36.540 +    mov loc8 = (1<<VMX_INSERT_RR7)
  36.541 +    ;;
  36.542 +    srlz.i
  36.543 +    ;;
  36.544 +    mov ar.rsc = 0
  36.545 +    mov b6 = loc7
  36.546 +    mov rp = r8
  36.547 +    ;;
  36.548 +    br.sptk b6
  36.549 +1:
  36.550 +    mov ar.rsc = 3
  36.551 +    mov rp = loc0
  36.552 +    ;;
  36.553 +    mov psr.l = loc6
  36.554 +    ;;
  36.555 +    srlz.i
  36.556 +    ;;
  36.557 +    br.ret.sptk rp
  36.558 +END(vmx_insert_double_mapping)
  36.559 +
  36.560 +    .align PAGE_SIZE
  36.561 +/*
  36.562 + * Stub to add double mapping for new domain, which shouldn't
  36.563 + * access any memory when active. Before reaching this point,
  36.564 + * both psr.i/ic is cleared and rse is set in lazy mode.
  36.565 + *
  36.566 + * in0: new rr7
  36.567 + * in1: virtual address of xen image
  36.568 + * in2: virtual address of vhpt table
  36.569 + * in3: pte entry of xen image
  36.570 + * in4: pte entry of vhpt table
  36.571 + * loc2: TR number for xen image
  36.572 + * loc3: TR number for vhpt table
  36.573 + * loc4: page size for xen image
  36.574 + * loc5: page size of vhpt table
  36.575 + * loc7: free to use
  36.576 + * loc8: purge or insert
  36.577 + * r8: will contain old rid value
  36.578 + */
  36.579 +GLOBAL_ENTRY(vmx_switch_rr7)
  36.580 +    movl loc7 = (7<<61)
  36.581 +    dep.z loc4 = loc4, 2, 6
  36.582 +    dep.z loc5 = loc5, 2, 6
  36.583 +    ;;
  36.584 +    tbit.nz p6,p7=loc8, VMX_INSERT_RR7
  36.585 +    mov r8 = rr[loc7]
  36.586 +    ;;
  36.587 +    mov rr[loc7] = in0
  36.588 +(p6)mov cr.ifa = in1
  36.589 +(p6)mov cr.itir = loc4
  36.590 +    ;;
  36.591 +    srlz.i
  36.592 +    ;;
  36.593 +(p6)itr.i itr[loc2] = in3
  36.594 +(p7)ptr.i in1, loc4
  36.595 +    ;;
  36.596 +(p6)itr.d dtr[loc2] = in3
  36.597 +(p7)ptr.d in1, loc4
  36.598 +    ;;
  36.599 +    srlz.i
  36.600 +    ;;
  36.601 +(p6)mov cr.ifa = in2
  36.602 +(p6)mov cr.itir = loc5
  36.603 +    ;;
  36.604 +(p6)itr.d dtr[loc3] = in4
  36.605 +(p7)ptr.d in2, loc5
  36.606 +    ;;
  36.607 +    srlz.i
  36.608 +    ;;
  36.609 +    mov rr[loc7] = r8
  36.610 +    ;;
  36.611 +    srlz.i
  36.612 +    br.sptk rp
  36.613 +END(vmx_switch_rr7)
  36.614 +    .align PAGE_SIZE
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xen/arch/ia64/vmx_init.c	Mon May 23 15:29:59 2005 +0000
    37.3 @@ -0,0 +1,296 @@
    37.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    37.5 +/*
    37.6 + * vmx_init.c: initialization work for vt specific domain
    37.7 + * Copyright (c) 2005, Intel Corporation.
    37.8 + *	Kun Tian (Kevin Tian) <kevin.tian@intel.com>
    37.9 + *	Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
   37.10 + *	Fred Yang <fred.yang@intel.com>
   37.11 + *
   37.12 + * This program is free software; you can redistribute it and/or modify it
   37.13 + * under the terms and conditions of the GNU General Public License,
   37.14 + * version 2, as published by the Free Software Foundation.
   37.15 + *
   37.16 + * This program is distributed in the hope it will be useful, but WITHOUT
   37.17 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   37.18 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   37.19 + * more details.
   37.20 + *
   37.21 + * You should have received a copy of the GNU General Public License along with
   37.22 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   37.23 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   37.24 + *
   37.25 + */
   37.26 +
   37.27 +/*
   37.28 + * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
   37.29 + * Simplied design in first step:
   37.30 + *	- One virtual environment
   37.31 + *	- Domain is bound to one LP
   37.32 + * Later to support guest SMP:
   37.33 + *	- Need interface to handle VP scheduled to different LP
   37.34 + */
   37.35 +#include <xen/config.h>
   37.36 +#include <xen/types.h>
   37.37 +#include <xen/sched.h>
   37.38 +#include <asm/pal.h>
   37.39 +#include <asm/page.h>
   37.40 +#include <asm/processor.h>
   37.41 +#include <asm/vmx_vcpu.h>
   37.42 +#include <xen/lib.h>
   37.43 +#include <asm/vmmu.h>
   37.44 +#include <public/arch-ia64.h>
   37.45 +#include <asm/vmx_phy_mode.h>
   37.46 +#include <asm/vmx.h>
   37.47 +
   37.48 +/* Global flag to identify whether Intel vmx feature is on */
   37.49 +u32 vmx_enabled = 0;
   37.50 +static u32 vm_order;
   37.51 +static u64 buffer_size;
   37.52 +static u64 vp_env_info;
   37.53 +static u64 vm_buffer = 0;	/* Buffer required to bring up VMX feature */
   37.54 +u64 __vsa_base = 0;	/* Run-time service base of VMX */
   37.55 +
   37.56 +/* Check whether vt feature is enabled or not. */
   37.57 +void
   37.58 +identify_vmx_feature(void)
   37.59 +{
   37.60 +	pal_status_t ret;
   37.61 +	u64 avail = 1, status = 1, control = 1;
   37.62 +
   37.63 +	vmx_enabled = 0;
   37.64 +	/* Check VT-i feature */
   37.65 +	ret = ia64_pal_proc_get_features(&avail, &status, &control);
   37.66 +	if (ret != PAL_STATUS_SUCCESS) {
   37.67 +		printk("Get proc features failed.\n");
   37.68 +		goto no_vti;
   37.69 +	}
   37.70 +
   37.71 +	/* FIXME: do we need to check status field, to see whether
   37.72 +	 * PSR.vm is actually enabled? If yes, aonther call to
   37.73 +	 * ia64_pal_proc_set_features may be reuqired then.
   37.74 +	 */
   37.75 +	printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
   37.76 +		avail, status, control, avail & PAL_PROC_VM_BIT);
   37.77 +	if (!(avail & PAL_PROC_VM_BIT)) {
   37.78 +		printk("No VT feature supported.\n");
   37.79 +		goto no_vti;
   37.80 +	}
   37.81 +
   37.82 +	ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
   37.83 +	if (ret != PAL_STATUS_SUCCESS) {
   37.84 +		printk("Get vp environment info failed.\n");
   37.85 +		goto no_vti;
   37.86 +	}
   37.87 +
   37.88 +	/* Does xen has ability to decode itself? */
   37.89 +	if (!(vp_env_info & VP_OPCODE))
   37.90 +		printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
   37.91 +	vm_order = get_order(buffer_size);
   37.92 +	printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
   37.93 +
   37.94 +	vmx_enabled = 1;
   37.95 +no_vti:
   37.96 +	return;
   37.97 +}
   37.98 +
   37.99 +/*
  37.100 + * Init virtual environment on current LP
  37.101 + * vsa_base is the indicator whether it's first LP to be initialized
  37.102 + * for current domain.
  37.103 + */ 
  37.104 +void
  37.105 +vmx_init_env(void)
  37.106 +{
  37.107 +	u64 status, tmp_base;
  37.108 +
  37.109 +	if (!vm_buffer) {
  37.110 +		vm_buffer = alloc_xenheap_pages(vm_order);
  37.111 +		ASSERT(vm_buffer);
  37.112 +		printk("vm_buffer: 0x%lx\n", vm_buffer);
  37.113 +	}
  37.114 +
  37.115 +	status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
  37.116 +				    __pa(vm_buffer),
  37.117 +				    vm_buffer,
  37.118 +				    &tmp_base);
  37.119 +
  37.120 +	if (status != PAL_STATUS_SUCCESS) {
  37.121 +		printk("ia64_pal_vp_init_env failed.\n");
  37.122 +		return -1;
  37.123 +	}
  37.124 +
  37.125 +	if (!__vsa_base)
  37.126 +		__vsa_base = tmp_base;
  37.127 +	else
  37.128 +		ASSERT(tmp_base != __vsa_base);
  37.129 +
  37.130 +	/* Init stub for rr7 switch */
  37.131 +	vmx_init_double_mapping_stub();
  37.132 +}
  37.133 +
  37.134 +typedef union {
  37.135 +	u64 value;
  37.136 +	struct {
  37.137 +		u64 number : 8;
  37.138 +		u64 revision : 8;
  37.139 +		u64 model : 8;
  37.140 +		u64 family : 8;
  37.141 +		u64 archrev : 8;
  37.142 +		u64 rv : 24;
  37.143 +	};
  37.144 +} cpuid3_t;
  37.145 +
  37.146 +/* Allocate vpd from xenheap */
  37.147 +static vpd_t *alloc_vpd(void)
  37.148 +{
  37.149 +	int i;
  37.150 +	cpuid3_t cpuid3;
  37.151 +	vpd_t *vpd;
  37.152 +
  37.153 +	vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
  37.154 +	if (!vpd) {
  37.155 +		printk("VPD allocation failed.\n");
  37.156 +		return NULL;
  37.157 +	}
  37.158 +
  37.159 +	printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
  37.160 +	memset(vpd, 0, VPD_SIZE);
  37.161 +	/* CPUID init */
  37.162 +	for (i = 0; i < 5; i++)
  37.163 +		vpd->vcpuid[i] = ia64_get_cpuid(i);
  37.164 +
  37.165 +	/* Limit the CPUID number to 5 */
  37.166 +	cpuid3.value = vpd->vcpuid[3];
  37.167 +	cpuid3.number = 4;	/* 5 - 1 */
  37.168 +	vpd->vcpuid[3] = cpuid3.value;
  37.169 +
  37.170 +	vpd->vdc.d_vmsw = 1;
  37.171 +	return vpd;
  37.172 +}
  37.173 +
  37.174 +
  37.175 +
  37.176 +/*
  37.177 + * Create a VP on intialized VMX environment.
  37.178 + */
  37.179 +static void
  37.180 +vmx_create_vp(struct exec_domain *ed)
  37.181 +{
  37.182 +	u64 ret;
  37.183 +	vpd_t *vpd = ed->arch.arch_vmx.vpd;
  37.184 +	u64 ivt_base;
  37.185 +    extern char vmx_ia64_ivt;
  37.186 +	/* ia64_ivt is function pointer, so need this tranlation */
  37.187 +	ivt_base = (u64) &vmx_ia64_ivt;
  37.188 +	printk("ivt_base: 0x%lx\n", ivt_base);
  37.189 +	ret = ia64_pal_vp_create(vpd, ivt_base, 0);
  37.190 +	if (ret != PAL_STATUS_SUCCESS)
  37.191 +		panic("ia64_pal_vp_create failed. \n");
  37.192 +}
  37.193 +
  37.194 +void vmx_init_double_mapping_stub(void)
  37.195 +{
  37.196 +	u64 base, psr;
  37.197 +	extern void vmx_switch_rr7(void);
  37.198 +
  37.199 +	base = (u64) &vmx_switch_rr7;
  37.200 +	base = *((u64*)base);
  37.201 +
  37.202 +	psr = ia64_clear_ic();
  37.203 +	ia64_itr(0x1, IA64_TR_RR7_SWITCH_STUB, XEN_RR7_SWITCH_STUB,
  37.204 +		 pte_val(pfn_pte(__pa(base) >> PAGE_SHIFT, PAGE_KERNEL)),
  37.205 +		 RR7_SWITCH_SHIFT);
  37.206 +	ia64_set_psr(psr);
  37.207 +	ia64_srlz_i();
  37.208 +	printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", (u64)(__pa(base)));
  37.209 +}
  37.210 +
  37.211 +/* Other non-context related tasks can be done in context switch */
  37.212 +void
  37.213 +vmx_save_state(struct exec_domain *ed)
  37.214 +{
  37.215 +	u64 status, psr;
  37.216 +	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
  37.217 +
  37.218 +	/* FIXME: about setting of pal_proc_vector... time consuming */
  37.219 +	status = ia64_pal_vp_save(ed->arch.arch_vmx.vpd, 0);
  37.220 +	if (status != PAL_STATUS_SUCCESS)
  37.221 +		panic("Save vp status failed\n");
  37.222 +
  37.223 +	/* FIXME: Do we really need purge double mapping for old ed?
  37.224 +	 * Since rid is completely different between prev and next,
  37.225 +	 * it's not overlap and thus no MCA possible... */
  37.226 +	dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
  37.227 +        vmx_purge_double_mapping(dom_rr7, KERNEL_START,
  37.228 +				 (u64)ed->arch.vtlb->ts->vhpt->hash);
  37.229 +
  37.230 +}
  37.231 +
  37.232 +/* Even guest is in physical mode, we still need such double mapping */
  37.233 +void
  37.234 +vmx_load_state(struct exec_domain *ed)
  37.235 +{
  37.236 +	u64 status, psr;
  37.237 +	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
  37.238 +	u64 pte_xen, pte_vhpt;
  37.239 +
  37.240 +	status = ia64_pal_vp_restore(ed->arch.arch_vmx.vpd, 0);
  37.241 +	if (status != PAL_STATUS_SUCCESS)
  37.242 +		panic("Restore vp status failed\n");
  37.243 +
  37.244 +	dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
  37.245 +	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
  37.246 +	pte_vhpt = pte_val(pfn_pte((__pa(ed->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
  37.247 +	vmx_insert_double_mapping(dom_rr7, KERNEL_START,
  37.248 +				  (u64)ed->arch.vtlb->ts->vhpt->hash,
  37.249 +				  pte_xen, pte_vhpt);
  37.250 +
  37.251 +	/* Guest vTLB is not required to be switched explicitly, since
  37.252 +	 * anchored in exec_domain */
  37.253 +}
  37.254 +
  37.255 +/* Purge old double mapping and insert new one, due to rr7 change */
  37.256 +void
  37.257 +vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7)
  37.258 +{
  37.259 +	u64 pte_xen, pte_vhpt, vhpt_base;
  37.260 +
  37.261 +    vhpt_base = (u64)ed->arch.vtlb->ts->vhpt->hash;
  37.262 +    vmx_purge_double_mapping(oldrr7, KERNEL_START,
  37.263 +				 vhpt_base);
  37.264 +
  37.265 +	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
  37.266 +	pte_vhpt = pte_val(pfn_pte((__pa(vhpt_base) >> PAGE_SHIFT), PAGE_KERNEL));
  37.267 +	vmx_insert_double_mapping(newrr7, KERNEL_START,
  37.268 +				  vhpt_base,
  37.269 +				  pte_xen, pte_vhpt);
  37.270 +}
  37.271 +
  37.272 +/*
  37.273 + * Initialize VMX envirenment for guest. Only the 1st vp/exec_domain
  37.274 + * is registered here.
  37.275 + */
  37.276 +void
  37.277 +vmx_final_setup_domain(struct domain *d)
  37.278 +{
  37.279 +	struct exec_domain *ed = d->exec_domain[0];
  37.280 +	vpd_t *vpd;
  37.281 +
  37.282 +	/* Allocate resources for exec_domain 0 */
  37.283 +	//memset(&ed->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
  37.284 +
  37.285 +	vpd = alloc_vpd();
  37.286 +	ASSERT(vpd);
  37.287 +
  37.288 +	ed->arch.arch_vmx.vpd = vpd;
  37.289 +	vpd->virt_env_vaddr = vm_buffer;
  37.290 +
  37.291 +	/* ed->arch.schedule_tail = arch_vmx_do_launch; */
  37.292 +	vmx_create_vp(ed);
  37.293 +
  37.294 +	/* Set this ed to be vmx */
  37.295 +	ed->arch.arch_vmx.flags = 1;
  37.296 +
  37.297 +	/* Other vmx specific initialization work */
  37.298 +}
  37.299 +
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/xen/arch/ia64/vmx_interrupt.c	Mon May 23 15:29:59 2005 +0000
    38.3 @@ -0,0 +1,388 @@
    38.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    38.5 +/*
    38.6 + * vmx_interrupt.c: handle inject interruption.
    38.7 + * Copyright (c) 2005, Intel Corporation.
    38.8 + *
    38.9 + * This program is free software; you can redistribute it and/or modify it
   38.10 + * under the terms and conditions of the GNU General Public License,
   38.11 + * version 2, as published by the Free Software Foundation.
   38.12 + *
   38.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   38.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   38.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   38.16 + * more details.
   38.17 + *
   38.18 + * You should have received a copy of the GNU General Public License along with
   38.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   38.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   38.21 + *
   38.22 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   38.23 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
   38.24 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   38.25 + */
   38.26 +
   38.27 +
   38.28 +#include <xen/types.h>
   38.29 +#include <asm/vmx_vcpu.h>
   38.30 +#include <asm/vmx_mm_def.h>
   38.31 +#include <asm/vmx_pal_vsa.h>
   38.32 +/* SDM vol2 5.5 - IVA based interruption handling */
   38.33 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
   38.34 +void
   38.35 +collect_interruption(VCPU *vcpu)
   38.36 +{
   38.37 +    u64 ipsr;
   38.38 +    u64 vdcr;
   38.39 +    u64 vifs;
   38.40 +    IA64_PSR vpsr;
   38.41 +    REGS * regs = vcpu_regs(vcpu);
   38.42 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
   38.43 +
   38.44 +    if(vpsr.ic){
   38.45 +	extern void vmx_dorfirfi(void);
   38.46 +	if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
   38.47 +		panic("COLLECT interruption for vmx_dorfirfi\n");
   38.48 +
   38.49 +        /* Sync mpsr id/da/dd/ss/ed bits to vipsr
   38.50 +         * since after guest do rfi, we still want these bits on in
   38.51 +         * mpsr
   38.52 +         */
   38.53 +
   38.54 +        ipsr = regs->cr_ipsr;
   38.55 +        vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
   38.56 +             | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED));
   38.57 +        vmx_vcpu_set_ipsr(vcpu, vpsr.val);
   38.58 +
   38.59 +        /* Currently, for trap, we do not advance IIP to next
   38.60 +         * instruction. That's because we assume caller already
   38.61 +         * set up IIP correctly
   38.62 +         */
   38.63 +
   38.64 +        vmx_vcpu_set_iip(vcpu , regs->cr_iip);
   38.65 +
   38.66 +        /* set vifs.v to zero */
   38.67 +        vifs = VPD_CR(vcpu,ifs);
   38.68 +        vifs &= ~IA64_IFS_V;
   38.69 +        vmx_vcpu_set_ifs(vcpu, vifs);
   38.70 +
   38.71 +        vmx_vcpu_set_iipa(vcpu, regs->cr_iipa);
   38.72 +    }
   38.73 +
   38.74 +    vdcr = VPD_CR(vcpu,dcr);
   38.75 +
   38.76 +    /* Set guest psr
   38.77 +     * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
   38.78 +     * be: set to the value of dcr.be
   38.79 +     * pp: set to the value of dcr.pp
   38.80 +     */
   38.81 +    vpsr.val &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
   38.82 +    vpsr.val |= ( vdcr & IA64_DCR_BE);
   38.83 +
   38.84 +    /* VDCR pp bit position is different from VPSR pp bit */
   38.85 +    if ( vdcr & IA64_DCR_PP ) {
   38.86 +        vpsr.val |= IA64_PSR_PP;
   38.87 +    } else {
   38.88 +        vpsr.val &= ~IA64_PSR_PP;;
   38.89 +    }
   38.90 +
   38.91 +    vmx_vcpu_set_psr(vcpu, vpsr.val);
   38.92 +
   38.93 +}
   38.94 +int
   38.95 +inject_guest_interruption(VCPU *vcpu, u64 vec)
   38.96 +{
   38.97 +    u64 viva;
   38.98 +    REGS *regs;
   38.99 +    regs=vcpu_regs(vcpu);
  38.100 +
  38.101 +    collect_interruption(vcpu);
  38.102 +
  38.103 +    vmx_vcpu_get_iva(vcpu,&viva);
  38.104 +    regs->cr_iip = viva + vec;
  38.105 +}
  38.106 +
  38.107 +
  38.108 +/*
  38.109 + * Set vIFA & vITIR & vIHA, when vPSR.ic =1
  38.110 + * Parameter:
  38.111 + *  set_ifa: if true, set vIFA
  38.112 + *  set_itir: if true, set vITIR
  38.113 + *  set_iha: if true, set vIHA
  38.114 + */
  38.115 +void
  38.116 +set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
  38.117 +          int set_ifa, int set_itir, int set_iha)
  38.118 +{
  38.119 +    IA64_PSR vpsr;
  38.120 +    u64 value;
  38.121 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  38.122 +    /* Vol2, Table 8-1 */
  38.123 +    if ( vpsr.ic ) {
  38.124 +        if ( set_ifa){
  38.125 +            vmx_vcpu_set_ifa(vcpu, vadr);
  38.126 +        }
  38.127 +        if ( set_itir) {
  38.128 +            value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
  38.129 +            vmx_vcpu_set_itir(vcpu, value);
  38.130 +        }
  38.131 +
  38.132 +        if ( set_iha) {
  38.133 +            vmx_vcpu_thash(vcpu, vadr, &value);
  38.134 +            vmx_vcpu_set_iha(vcpu, value);
  38.135 +        }
  38.136 +    }
  38.137 +
  38.138 +
  38.139 +}
  38.140 +
  38.141 +/*
  38.142 + * Data TLB Fault
  38.143 + *  @ Data TLB vector
  38.144 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.145 + */
  38.146 +void
  38.147 +dtlb_fault (VCPU *vcpu, u64 vadr)
  38.148 +{
  38.149 +    /* If vPSR.ic, IFA, ITIR, IHA */
  38.150 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 1);
  38.151 +    inject_guest_interruption(vcpu,IA64_DATA_TLB_VECTOR);
  38.152 +}
  38.153 +
  38.154 +/*
  38.155 + * Instruction TLB Fault
  38.156 + *  @ Instruction TLB vector
  38.157 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.158 + */
  38.159 +void
  38.160 +itlb_fault (VCPU *vcpu, u64 vadr)
  38.161 +{
  38.162 +     /* If vPSR.ic, IFA, ITIR, IHA */
  38.163 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 1);
  38.164 +    inject_guest_interruption(vcpu,IA64_INST_TLB_VECTOR);
  38.165 +}
  38.166 +
  38.167 +
  38.168 +
  38.169 +/*
  38.170 + * Data Nested TLB Fault
  38.171 + *  @ Data Nested TLB Vector
  38.172 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.173 + */
  38.174 +void
  38.175 +nested_dtlb (VCPU *vcpu)
  38.176 +{
  38.177 +    inject_guest_interruption(vcpu,IA64_DATA_NESTED_TLB_VECTOR);
  38.178 +}
  38.179 +
  38.180 +/*
  38.181 + * Alternate Data TLB Fault
  38.182 + *  @ Alternate Data TLB vector
  38.183 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.184 + */
  38.185 +void
  38.186 +alt_dtlb (VCPU *vcpu, u64 vadr)
  38.187 +{
  38.188 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
  38.189 +    inject_guest_interruption(vcpu,IA64_ALT_DATA_TLB_VECTOR);
  38.190 +}
  38.191 +
  38.192 +
  38.193 +/*
  38.194 + * Data TLB Fault
  38.195 + *  @ Data TLB vector
  38.196 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.197 + */
  38.198 +void
  38.199 +alt_itlb (VCPU *vcpu, u64 vadr)
  38.200 +{
  38.201 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
  38.202 +    inject_guest_interruption(vcpu,IA64_ALT_INST_TLB_VECTOR);
  38.203 +}
  38.204 +
  38.205 +/* Deal with:
  38.206 + *  VHPT Translation Vector
  38.207 + */
  38.208 +static void
  38.209 +_vhpt_fault(VCPU *vcpu, u64 vadr)
  38.210 +{
  38.211 +    /* If vPSR.ic, IFA, ITIR, IHA*/
  38.212 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 1);
  38.213 +    inject_guest_interruption(vcpu,IA64_VHPT_TRANS_VECTOR);
  38.214 +
  38.215 +
  38.216 +}
  38.217 +
  38.218 +/*
  38.219 + * VHPT Instruction Fault
  38.220 + *  @ VHPT Translation vector
  38.221 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.222 + */
  38.223 +void
  38.224 +ivhpt_fault (VCPU *vcpu, u64 vadr)
  38.225 +{
  38.226 +    _vhpt_fault(vcpu, vadr);
  38.227 +}
  38.228 +
  38.229 +
  38.230 +/*
  38.231 + * VHPT Data Fault
  38.232 + *  @ VHPT Translation vector
  38.233 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.234 + */
  38.235 +void
  38.236 +dvhpt_fault (VCPU *vcpu, u64 vadr)
  38.237 +{
  38.238 +    _vhpt_fault(vcpu, vadr);
  38.239 +}
  38.240 +
  38.241 +
  38.242 +
  38.243 +/*
  38.244 + * Deal with:
  38.245 + *  General Exception vector
  38.246 + */
  38.247 +void
  38.248 +_general_exception (VCPU *vcpu)
  38.249 +{
  38.250 +    inject_guest_interruption(vcpu,IA64_GENEX_VECTOR);
  38.251 +}
  38.252 +
  38.253 +
  38.254 +/*
  38.255 + * Illegal Operation Fault
  38.256 + *  @ General Exception Vector
  38.257 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.258 + */
  38.259 +void
  38.260 +illegal_op (VCPU *vcpu)
  38.261 +{
  38.262 +    _general_exception(vcpu);
  38.263 +}
  38.264 +
  38.265 +/*
  38.266 + * Illegal Dependency Fault
  38.267 + *  @ General Exception Vector
  38.268 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.269 + */
  38.270 +void
  38.271 +illegal_dep (VCPU *vcpu)
  38.272 +{
  38.273 +    _general_exception(vcpu);
  38.274 +}
  38.275 +
  38.276 +/*
  38.277 + * Reserved Register/Field Fault
  38.278 + *  @ General Exception Vector
  38.279 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.280 + */
  38.281 +void
  38.282 +rsv_reg_field (VCPU *vcpu)
  38.283 +{
  38.284 +    _general_exception(vcpu);
  38.285 +}
  38.286 +/*
  38.287 + * Privileged Operation Fault
  38.288 + *  @ General Exception Vector
  38.289 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.290 + */
  38.291 +
  38.292 +void
  38.293 +privilege_op (VCPU *vcpu)
  38.294 +{
  38.295 +    _general_exception(vcpu);
  38.296 +}
  38.297 +
  38.298 +/*
  38.299 + * Unimplement Data Address Fault
  38.300 + *  @ General Exception Vector
  38.301 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.302 + */
  38.303 +void
  38.304 +unimpl_daddr (VCPU *vcpu)
  38.305 +{
  38.306 +    _general_exception(vcpu);
  38.307 +}
  38.308 +
  38.309 +/*
  38.310 + * Privileged Register Fault
  38.311 + *  @ General Exception Vector
  38.312 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.313 + */
  38.314 +void
  38.315 +privilege_reg (VCPU *vcpu)
  38.316 +{
  38.317 +    _general_exception(vcpu);
  38.318 +}
  38.319 +
  38.320 +/* Deal with
  38.321 + *  Nat consumption vector
  38.322 + * Parameter:
  38.323 + *  vaddr: Optional, if t == REGISTER
  38.324 + */
  38.325 +static void
  38.326 +_nat_consumption_fault(VCPU *vcpu, u64 vadr, miss_type t)
  38.327 +{
  38.328 +    /* If vPSR.ic && t == DATA/INST, IFA */
  38.329 +    if ( t == DATA || t == INSTRUCTION ) {
  38.330 +        /* IFA */
  38.331 +        set_ifa_itir_iha (vcpu, vadr, 1, 0, 0);
  38.332 +    }
  38.333 +
  38.334 +    inject_guest_interruption(vcpu,IA64_NAT_CONSUMPTION_VECTOR);
  38.335 +}
  38.336 +
  38.337 +/*
  38.338 + * IR Data Nat Page Consumption Fault
  38.339 + *  @ Nat Consumption Vector
  38.340 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.341 + */
  38.342 +static void
  38.343 +ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
  38.344 +{
  38.345 +    _nat_consumption_fault(vcpu, vadr, DATA);
  38.346 +}
  38.347 +
  38.348 +/*
  38.349 + * Instruction Nat Page Consumption Fault
  38.350 + *  @ Nat Consumption Vector
  38.351 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.352 + */
  38.353 +void
  38.354 +inat_page_consumption (VCPU *vcpu, u64 vadr)
  38.355 +{
  38.356 +    _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
  38.357 +}
  38.358 +
  38.359 +/*
  38.360 + * Register Nat Consumption Fault
  38.361 + *  @ Nat Consumption Vector
  38.362 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.363 + */
  38.364 +void
  38.365 +rnat_consumption (VCPU *vcpu)
  38.366 +{
  38.367 +    _nat_consumption_fault(vcpu, 0, REGISTER);
  38.368 +}
  38.369 +
  38.370 +/*
  38.371 + * Data Nat Page Consumption Fault
  38.372 + *  @ Nat Consumption Vector
  38.373 + * Refer to SDM Vol2 Table 5-6 & 8-1
  38.374 + */
  38.375 +void
  38.376 +dnat_page_consumption (VCPU *vcpu, uint64_t vadr)
  38.377 +{
  38.378 +    _nat_consumption_fault(vcpu, vadr, DATA);
  38.379 +}
  38.380 +
  38.381 +/* Deal with
  38.382 + *  Page not present vector
  38.383 + */
  38.384 +void
  38.385 +page_not_present(VCPU *vcpu, u64 vadr)
  38.386 +{
  38.387 +    /* If vPSR.ic, IFA, ITIR */
  38.388 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
  38.389 +    inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
  38.390 +}
  38.391 +
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/xen/arch/ia64/vmx_ivt.S	Mon May 23 15:29:59 2005 +0000
    39.3 @@ -0,0 +1,978 @@
    39.4 +/*
    39.5 + * arch/ia64/kernel/vmx_ivt.S
    39.6 + *
    39.7 + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
    39.8 + *	Stephane Eranian <eranian@hpl.hp.com>
    39.9 + *	David Mosberger <davidm@hpl.hp.com>
   39.10 + * Copyright (C) 2000, 2002-2003 Intel Co
   39.11 + *	Asit Mallick <asit.k.mallick@intel.com>
   39.12 + *      Suresh Siddha <suresh.b.siddha@intel.com>
   39.13 + *      Kenneth Chen <kenneth.w.chen@intel.com>
   39.14 + *      Fenghua Yu <fenghua.yu@intel.com>
   39.15 + *
   39.16 + *
   39.17 + * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
   39.18 + * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
   39.19 + *
   39.20 + * 05/3/20 Xuefei Xu  (Anthony Xu) (anthony.xu@intel.com)
   39.21 + *              Supporting Intel virtualization architecture
   39.22 + *
   39.23 + */
   39.24 +
   39.25 +/*
   39.26 + * This file defines the interruption vector table used by the CPU.
   39.27 + * It does not include one entry per possible cause of interruption.
   39.28 + *
   39.29 + * The first 20 entries of the table contain 64 bundles each while the
   39.30 + * remaining 48 entries contain only 16 bundles each.
   39.31 + *
   39.32 + * The 64 bundles are used to allow inlining the whole handler for critical
   39.33 + * interruptions like TLB misses.
   39.34 + *
   39.35 + *  For each entry, the comment is as follows:
   39.36 + *
   39.37 + *		// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
   39.38 + *  entry offset ----/     /         /                  /          /
   39.39 + *  entry number ---------/         /                  /          /
   39.40 + *  size of the entry -------------/                  /          /
   39.41 + *  vector name -------------------------------------/          /
   39.42 + *  interruptions triggering this vector ----------------------/
   39.43 + *
   39.44 + * The table is 32KB in size and must be aligned on 32KB boundary.
   39.45 + * (The CPU ignores the 15 lower bits of the address)
   39.46 + *
   39.47 + * Table is based upon EAS2.6 (Oct 1999)
   39.48 + */
   39.49 +
   39.50 +#include <linux/config.h>
   39.51 +
   39.52 +#include <asm/asmmacro.h>
   39.53 +#include <asm/break.h>
   39.54 +#include <asm/ia32.h>
   39.55 +#include <asm/kregs.h>
   39.56 +#include <asm/offsets.h>
   39.57 +#include <asm/pgtable.h>
   39.58 +#include <asm/processor.h>
   39.59 +#include <asm/ptrace.h>
   39.60 +#include <asm/system.h>
   39.61 +#include <asm/thread_info.h>
   39.62 +#include <asm/unistd.h>
   39.63 +#include <asm/vhpt.h>
   39.64 +
   39.65 +
   39.66 +#if 0
   39.67 +  /*
   39.68 +   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
   39.69 +   * needed for something else before enabling this...
   39.70 +   */
   39.71 +# define VMX_DBG_FAULT(i)	mov r16=ar.k2;;	shl r16=r16,8;;	add r16=(i),r16;;mov ar.k2=r16
   39.72 +#else
   39.73 +# define VMX_DBG_FAULT(i)
   39.74 +#endif
   39.75 +
   39.76 +#include "vmx_minstate.h"
   39.77 +
   39.78 +
   39.79 +
   39.80 +#define VMX_FAULT(n)    \
   39.81 +vmx_fault_##n:;          \
   39.82 +    br.sptk vmx_fault_##n;         \
   39.83 +    ;;                  \
   39.84 +
   39.85 +
   39.86 +#define VMX_REFLECT(n)				\
   39.87 +	mov r31=pr;									\
   39.88 +	mov r19=n;			/* prepare to save predicates */		\
   39.89 +    mov r29=cr.ipsr;        \
   39.90 +    ;;      \
   39.91 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
   39.92 +(p7) br.sptk.many vmx_dispatch_reflection;        \
   39.93 +    VMX_FAULT(n);            \
   39.94 +
   39.95 +
   39.96 +GLOBAL_ENTRY(vmx_panic)
   39.97 +    br.sptk.many vmx_panic
   39.98 +    ;;
   39.99 +END(vmx_panic)
  39.100 +
  39.101 +
  39.102 +
  39.103 +
  39.104 +
  39.105 +	.section .text.ivt,"ax"
  39.106 +
  39.107 +	.align 32768	// align on 32KB boundary
  39.108 +	.global vmx_ia64_ivt
  39.109 +vmx_ia64_ivt:
  39.110 +/////////////////////////////////////////////////////////////////////////////////////////
  39.111 +// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
  39.112 +ENTRY(vmx_vhpt_miss)
  39.113 +    VMX_FAULT(0)
  39.114 +END(vmx_vhpt_miss)
  39.115 +
  39.116 +	.org vmx_ia64_ivt+0x400
  39.117 +/////////////////////////////////////////////////////////////////////////////////////////
  39.118 +// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
  39.119 +ENTRY(vmx_itlb_miss)
  39.120 +    mov r31 = pr
  39.121 +    mov r29=cr.ipsr;
  39.122 +    ;;
  39.123 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  39.124 +(p6) br.sptk vmx_fault_1
  39.125 +    mov r16 = cr.ifa
  39.126 +    ;;
  39.127 +    thash r17 = r16
  39.128 +    ttag r20 = r16
  39.129 +    ;;
  39.130 +vmx_itlb_loop:
  39.131 +    cmp.eq p6,p0 = r0, r17
  39.132 +(p6) br vmx_itlb_out
  39.133 +    ;;
  39.134 +    adds r22 = VLE_TITAG_OFFSET, r17
  39.135 +    adds r23 = VLE_CCHAIN_OFFSET, r17
  39.136 +    ;;
  39.137 +    ld8 r24 = [r22]
  39.138 +    ld8 r25 = [r23]
  39.139 +    ;;
  39.140 +    lfetch [r25]
  39.141 +    cmp.eq  p6,p7 = r20, r24
  39.142 +    ;;
  39.143 +(p7)    mov r17 = r25;
  39.144 +(p7)    br.sptk vmx_itlb_loop
  39.145 +    ;;
  39.146 +    adds r23 = VLE_PGFLAGS_OFFSET, r17
  39.147 +    adds r24 = VLE_ITIR_OFFSET, r17
  39.148 +    ;;
  39.149 +    ld8 r26 = [r23]
  39.150 +    ld8 r25 = [r24]
  39.151 +    ;;
  39.152 +    mov cr.itir = r25
  39.153 +    ;;
  39.154 +    itc.i r26
  39.155 +    ;;
  39.156 +    srlz.i
  39.157 +    ;;
  39.158 +    mov r23=r31
  39.159 +    mov r22=b0
  39.160 +    adds r16=IA64_VPD_BASE_OFFSET,r21
  39.161 +    ;;
  39.162 +    ld8 r18=[r16]
  39.163 +    ;;
  39.164 +    adds r19=VPD(VPSR),r18
  39.165 +    movl r20=__vsa_base
  39.166 +    ;;
  39.167 +    ld8 r19=[r19]
  39.168 +    ld8 r20=[r20]
  39.169 +    ;;
  39.170 +    br.sptk ia64_vmm_entry
  39.171 +    ;;
  39.172 +vmx_itlb_out:
  39.173 +    mov r19 = 1
  39.174 +    br.sptk vmx_dispatch_tlb_miss
  39.175 +    VMX_FAULT(1);
  39.176 +END(vmx_itlb_miss)
  39.177 +
  39.178 +	.org vmx_ia64_ivt+0x0800
  39.179 +/////////////////////////////////////////////////////////////////////////////////////////
  39.180 +// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
  39.181 +ENTRY(vmx_dtlb_miss)
  39.182 +    mov r31 = pr
  39.183 +    mov r29=cr.ipsr;
  39.184 +    ;;
  39.185 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  39.186 +(p6)br.sptk vmx_fault_1
  39.187 +    mov r16 = cr.ifa
  39.188 +    ;;
  39.189 +    thash r17 = r16
  39.190 +    ttag r20 = r16
  39.191 +    ;;
  39.192 +vmx_dtlb_loop:
  39.193 +    cmp.eq p6,p0 = r0, r17
  39.194 +(p6)br vmx_dtlb_out
  39.195 +    ;;
  39.196 +    adds r22 = VLE_TITAG_OFFSET, r17
  39.197 +    adds r23 = VLE_CCHAIN_OFFSET, r17
  39.198 +    ;;
  39.199 +    ld8 r24 = [r22]
  39.200 +    ld8 r25 = [r23]
  39.201 +    ;;
  39.202 +    lfetch [r25]
  39.203 +    cmp.eq  p6,p7 = r20, r24
  39.204 +    ;;
  39.205 +(p7)mov r17 = r25;
  39.206 +(p7)br.sptk vmx_dtlb_loop
  39.207 +    ;;
  39.208 +    adds r23 = VLE_PGFLAGS_OFFSET, r17
  39.209 +    adds r24 = VLE_ITIR_OFFSET, r17
  39.210 +    ;;
  39.211 +    ld8 r26 = [r23]
  39.212 +    ld8 r25 = [r24]
  39.213 +    ;;
  39.214 +    mov cr.itir = r25
  39.215 +    ;;
  39.216 +    itc.d r26
  39.217 +    ;;
  39.218 +    srlz.d;
  39.219 +    ;;
  39.220 +    mov r23=r31
  39.221 +    mov r22=b0
  39.222 +    adds r16=IA64_VPD_BASE_OFFSET,r21
  39.223 +    ;;
  39.224 +    ld8 r18=[r16]
  39.225 +    ;;
  39.226 +    adds r19=VPD(VPSR),r18
  39.227 +    movl r20=__vsa_base
  39.228 +    ;;
  39.229 +    ld8 r19=[r19]
  39.230 +    ld8 r20=[r20]
  39.231 +    ;;
  39.232 +    br.sptk ia64_vmm_entry
  39.233 +    ;;
  39.234 +vmx_dtlb_out:
  39.235 +    mov r19 = 2
  39.236 +    br.sptk vmx_dispatch_tlb_miss
  39.237 +    VMX_FAULT(2);
  39.238 +END(vmx_dtlb_miss)
  39.239 +
  39.240 +	.org vmx_ia64_ivt+0x0c00
  39.241 +/////////////////////////////////////////////////////////////////////////////////////////
  39.242 +// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
  39.243 +ENTRY(vmx_alt_itlb_miss)
  39.244 +    mov r31 = pr
  39.245 +    mov r29=cr.ipsr;
  39.246 +    ;;
  39.247 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  39.248 +(p7)br.sptk vmx_fault_3
  39.249 +	mov r16=cr.ifa		// get address that caused the TLB miss
  39.250 +	movl r17=PAGE_KERNEL
  39.251 +	mov r24=cr.ipsr
  39.252 +	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  39.253 +	;;
  39.254 +	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
  39.255 +	shr.u r18=r16,57	// move address bit 61 to bit 4
  39.256 +	;;
  39.257 +	andcm r18=0x10,r18	// bit 4=~address-bit(61)
  39.258 +	or r19=r17,r19		// insert PTE control bits into r19
  39.259 +	;;
  39.260 +	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
  39.261 +	;;
  39.262 +	itc.i r19		// insert the TLB entry
  39.263 +	mov pr=r31,-1
  39.264 +	rfi
  39.265 +    VMX_FAULT(3);
  39.266 +END(vmx_alt_itlb_miss)
  39.267 +
  39.268 +
  39.269 +	.org vmx_ia64_ivt+0x1000
  39.270 +/////////////////////////////////////////////////////////////////////////////////////////
  39.271 +// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
  39.272 +ENTRY(vmx_alt_dtlb_miss)
  39.273 +	mov r31=pr
  39.274 +    mov r29=cr.ipsr;
  39.275 +    ;;
  39.276 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  39.277 +(p7)br.sptk vmx_fault_4
  39.278 +	mov r16=cr.ifa		// get address that caused the TLB miss
  39.279 +	movl r17=PAGE_KERNEL
  39.280 +	mov r20=cr.isr
  39.281 +	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  39.282 +	mov r24=cr.ipsr
  39.283 +	;;
  39.284 +	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
  39.285 +	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
  39.286 +	shr.u r18=r16,57			// move address bit 61 to bit 4
  39.287 +	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
  39.288 +	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
  39.289 +	;;
  39.290 +	andcm r18=0x10,r18	// bit 4=~address-bit(61)
  39.291 +(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
  39.292 +	dep r24=-1,r24,IA64_PSR_ED_BIT,1
  39.293 +	or r19=r19,r17		// insert PTE control bits into r19
  39.294 +	;;
  39.295 +	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
  39.296 +(p6) mov cr.ipsr=r24
  39.297 +	;;
  39.298 +(p7) itc.d r19		// insert the TLB entry
  39.299 +	mov pr=r31,-1
  39.300 +	rfi
  39.301 +    VMX_FAULT(4);
  39.302 +END(vmx_alt_dtlb_miss)
  39.303 +
  39.304 +	.org vmx_ia64_ivt+0x1400
  39.305 +/////////////////////////////////////////////////////////////////////////////////////////
  39.306 +// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
  39.307 +ENTRY(vmx_nested_dtlb_miss)
  39.308 +    VMX_FAULT(5)
  39.309 +END(vmx_nested_dtlb_miss)
  39.310 +
  39.311 +	.org vmx_ia64_ivt+0x1800
  39.312 +/////////////////////////////////////////////////////////////////////////////////////////
  39.313 +// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
  39.314 +ENTRY(vmx_ikey_miss)
  39.315 +	VMX_REFLECT(6)
  39.316 +END(vmx_ikey_miss)
  39.317 +
  39.318 +	.org vmx_ia64_ivt+0x1c00
  39.319 +/////////////////////////////////////////////////////////////////////////////////////////
  39.320 +// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  39.321 +ENTRY(vmx_dkey_miss)
  39.322 +	VMX_REFLECT(7)
  39.323 +END(vmx_dkey_miss)
  39.324 +
  39.325 +	.org vmx_ia64_ivt+0x2000
  39.326 +/////////////////////////////////////////////////////////////////////////////////////////
  39.327 +// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
  39.328 +ENTRY(vmx_dirty_bit)
  39.329 +	VMX_REFLECT(8)
  39.330 +END(vmx_idirty_bit)
  39.331 +
  39.332 +	.org vmx_ia64_ivt+0x2400
  39.333 +/////////////////////////////////////////////////////////////////////////////////////////
  39.334 +// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
  39.335 +ENTRY(vmx_iaccess_bit)
  39.336 +	VMX_REFLECT(9)
  39.337 +END(vmx_iaccess_bit)
  39.338 +
  39.339 +	.org vmx_ia64_ivt+0x2800
  39.340 +/////////////////////////////////////////////////////////////////////////////////////////
  39.341 +// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
  39.342 +ENTRY(vmx_daccess_bit)
  39.343 +	VMX_REFLECT(10)
  39.344 +END(vmx_daccess_bit)
  39.345 +
  39.346 +	.org vmx_ia64_ivt+0x2c00
  39.347 +/////////////////////////////////////////////////////////////////////////////////////////
  39.348 +// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
  39.349 +ENTRY(vmx_break_fault)
  39.350 +	mov r31=pr
  39.351 +    mov r19=11
  39.352 +    br.sptk.many vmx_dispatch_break_fault
  39.353 +END(vmx_break_fault)
  39.354 +
  39.355 +	.org vmx_ia64_ivt+0x3000
  39.356 +/////////////////////////////////////////////////////////////////////////////////////////
  39.357 +// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
  39.358 +ENTRY(vmx_interrupt)
  39.359 +	mov r31=pr		// prepare to save predicates
  39.360 +    mov r19=12
  39.361 +    mov r29=cr.ipsr
  39.362 +    ;;
  39.363 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT
  39.364 +(p7) br.sptk vmx_dispatch_interrupt
  39.365 +    ;;
  39.366 +	mov r27=ar.rsc			/* M */
  39.367 +	mov r20=r1			/* A */
  39.368 +	mov r25=ar.unat		/* M */
  39.369 +	mov r26=ar.pfs			/* I */
  39.370 +	mov r28=cr.iip			/* M */
  39.371 +	cover               /* B (or nothing) */
  39.372 +	;;
  39.373 +	mov r1=sp
  39.374 +	;;
  39.375 +	invala				/* M */
  39.376 +	mov r30=cr.ifs
  39.377 +	;;
  39.378 +    addl r1=-IA64_PT_REGS_SIZE,r1
  39.379 +    ;;
  39.380 +	adds r17=2*L1_CACHE_BYTES,r1		/* really: biggest cache-line size */
  39.381 +	adds r16=PT(CR_IPSR),r1
  39.382 +	;;
  39.383 +	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
  39.384 +	st8 [r16]=r29		/* save cr.ipsr */
  39.385 +	;;
  39.386 +	lfetch.fault.excl.nt1 [r17]
  39.387 +	mov r29=b0
  39.388 +	;;
  39.389 +	adds r16=PT(R8),r1  	/* initialize first base pointer */
  39.390 +	adds r17=PT(R9),r1  	/* initialize second base pointer */
  39.391 +	mov r18=r0      		/* make sure r18 isn't NaT */
  39.392 +	;;
  39.393 +.mem.offset 0,0; st8.spill [r16]=r8,16
  39.394 +.mem.offset 8,0; st8.spill [r17]=r9,16
  39.395 +        ;;
  39.396 +.mem.offset 0,0; st8.spill [r16]=r10,24
  39.397 +.mem.offset 8,0; st8.spill [r17]=r11,24
  39.398 +        ;;
  39.399 +	st8 [r16]=r28,16	/* save cr.iip */
  39.400 +	st8 [r17]=r30,16	/* save cr.ifs */
  39.401 +	mov r8=ar.fpsr		/* M */
  39.402 +	mov r9=ar.csd
  39.403 +	mov r10=ar.ssd
  39.404 +	movl r11=FPSR_DEFAULT   /* L-unit */
  39.405 +	;;
  39.406 +	st8 [r16]=r25,16	/* save ar.unat */
  39.407 +	st8 [r17]=r26,16	/* save ar.pfs */
  39.408 +	shl r18=r18,16		/* compute ar.rsc to be used for "loadrs" */
  39.409 +	;;
  39.410 +    st8 [r16]=r27,16   /* save ar.rsc */
  39.411 +    adds r17=16,r17    /* skip over ar_rnat field */
  39.412 +    ;;          /* avoid RAW on r16 & r17 */
  39.413 +    st8 [r17]=r31,16   /* save predicates */
  39.414 +    adds r16=16,r16    /* skip over ar_bspstore field */
  39.415 +    ;;
  39.416 +    st8 [r16]=r29,16   /* save b0 */
  39.417 +    st8 [r17]=r18,16   /* save ar.rsc value for "loadrs" */
  39.418 +    ;;
  39.419 +.mem.offset 0,0; st8.spill [r16]=r20,16    /* save original r1 */
  39.420 +.mem.offset 8,0; st8.spill [r17]=r12,16
  39.421 +    adds r12=-16,r1    /* switch to kernel memory stack (with 16 bytes of scratch) */
  39.422 +    ;;
  39.423 +.mem.offset 0,0; st8.spill [r16]=r13,16
  39.424 +.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
  39.425 +    mov r13=r21    /* establish `current' */
  39.426 +    ;;
  39.427 +.mem.offset 0,0; st8.spill [r16]=r15,16
  39.428 +.mem.offset 8,0; st8.spill [r17]=r14,16
  39.429 +    dep r14=-1,r0,60,4
  39.430 +    ;;
  39.431 +.mem.offset 0,0; st8.spill [r16]=r2,16
  39.432 +.mem.offset 8,0; st8.spill [r17]=r3,16
  39.433 +    adds r2=IA64_PT_REGS_R16_OFFSET,r1
  39.434 +    ;;
  39.435 +    mov r8=ar.ccv
  39.436 +    movl r1=__gp       /* establish kernel global pointer */
  39.437 +    ;;                                          \
  39.438 +    bsw.1
  39.439 +    ;;
  39.440 +	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  39.441 +	mov out0=cr.ivr		// pass cr.ivr as first arg
  39.442 +	add out1=16,sp		// pass pointer to pt_regs as second arg
  39.443 +
  39.444 +	ssm psr.ic
  39.445 +    ;;
  39.446 +    srlz.i
  39.447 +	;;
  39.448 +    ssm psr.i
  39.449 +	adds r3=8,r2		// set up second base pointer for SAVE_REST
  39.450 +	srlz.i			// ensure everybody knows psr.ic is back on
  39.451 +	;;
  39.452 +.mem.offset 0,0; st8.spill [r2]=r16,16
  39.453 +.mem.offset 8,0; st8.spill [r3]=r17,16
  39.454 +    ;;
  39.455 +.mem.offset 0,0; st8.spill [r2]=r18,16
  39.456 +.mem.offset 8,0; st8.spill [r3]=r19,16
  39.457 +    ;;
  39.458 +.mem.offset 0,0; st8.spill [r2]=r20,16
  39.459 +.mem.offset 8,0; st8.spill [r3]=r21,16
  39.460 +    mov r18=b6
  39.461 +    ;;
  39.462 +.mem.offset 0,0; st8.spill [r2]=r22,16
  39.463 +.mem.offset 8,0; st8.spill [r3]=r23,16
  39.464 +    mov r19=b7
  39.465 +    ;;
  39.466 +.mem.offset 0,0; st8.spill [r2]=r24,16
  39.467 +.mem.offset 8,0; st8.spill [r3]=r25,16
  39.468 +    ;;
  39.469 +.mem.offset 0,0; st8.spill [r2]=r26,16
  39.470 +.mem.offset 8,0; st8.spill [r3]=r27,16
  39.471 +    ;;
  39.472 +.mem.offset 0,0; st8.spill [r2]=r28,16
  39.473 +.mem.offset 8,0; st8.spill [r3]=r29,16
  39.474 +    ;;
  39.475 +.mem.offset 0,0; st8.spill [r2]=r30,16
  39.476 +.mem.offset 8,0; st8.spill [r3]=r31,32
  39.477 +    ;;
  39.478 +    mov ar.fpsr=r11     /* M-unit */
  39.479 +    st8 [r2]=r8,8      /* ar.ccv */
  39.480 +    adds r24=PT(B6)-PT(F7),r3
  39.481 +    ;;
  39.482 +    stf.spill [r2]=f6,32
  39.483 +    stf.spill [r3]=f7,32
  39.484 +    ;;
  39.485 +    stf.spill [r2]=f8,32
  39.486 +    stf.spill [r3]=f9,32
  39.487 +    ;;
  39.488 +    stf.spill [r2]=f10
  39.489 +    stf.spill [r3]=f11
  39.490 +    adds r25=PT(B7)-PT(F11),r3
  39.491 +    ;;
  39.492 +    st8 [r24]=r18,16       /* b6 */
  39.493 +    st8 [r25]=r19,16       /* b7 */
  39.494 +    ;;
  39.495 +    st8 [r24]=r9           /* ar.csd */
  39.496 +    st8 [r25]=r10          /* ar.ssd */
  39.497 +    ;;
  39.498 +	srlz.d			// make sure we see the effect of cr.ivr
  39.499 +	movl r14=ia64_leave_nested
  39.500 +	;;
  39.501 +	mov rp=r14
  39.502 +	br.call.sptk.many b6=vmx_ia64_handle_irq
  39.503 +	;;
  39.504 +END(vmx_interrupt)
  39.505 +
  39.506 +	.org vmx_ia64_ivt+0x3400
  39.507 +/////////////////////////////////////////////////////////////////////////////////////////
  39.508 +// 0x3400 Entry 13 (size 64 bundles) Reserved
  39.509 +	VMX_DBG_FAULT(13)
  39.510 +	VMX_FAULT(13)
  39.511 +
  39.512 +
  39.513 +	.org vmx_ia64_ivt+0x3800
  39.514 +/////////////////////////////////////////////////////////////////////////////////////////
  39.515 +// 0x3800 Entry 14 (size 64 bundles) Reserved
  39.516 +	VMX_DBG_FAULT(14)
  39.517 +	VMX_FAULT(14)
  39.518 +
  39.519 +
  39.520 +	.org vmx_ia64_ivt+0x3c00
  39.521 +/////////////////////////////////////////////////////////////////////////////////////////
  39.522 +// 0x3c00 Entry 15 (size 64 bundles) Reserved
  39.523 +	VMX_DBG_FAULT(15)
  39.524 +	VMX_FAULT(15)
  39.525 +
  39.526 +
  39.527 +	.org vmx_ia64_ivt+0x4000
  39.528 +/////////////////////////////////////////////////////////////////////////////////////////
  39.529 +// 0x4000 Entry 16 (size 64 bundles) Reserved
  39.530 +	VMX_DBG_FAULT(16)
  39.531 +	VMX_FAULT(16)
  39.532 +
  39.533 +	.org vmx_ia64_ivt+0x4400
  39.534 +/////////////////////////////////////////////////////////////////////////////////////////
  39.535 +// 0x4400 Entry 17 (size 64 bundles) Reserved
  39.536 +	VMX_DBG_FAULT(17)
  39.537 +	VMX_FAULT(17)
  39.538 +
  39.539 +	.org vmx_ia64_ivt+0x4800
  39.540 +/////////////////////////////////////////////////////////////////////////////////////////
  39.541 +// 0x4800 Entry 18 (size 64 bundles) Reserved
  39.542 +	VMX_DBG_FAULT(18)
  39.543 +	VMX_FAULT(18)
  39.544 +
  39.545 +	.org vmx_ia64_ivt+0x4c00
  39.546 +/////////////////////////////////////////////////////////////////////////////////////////
  39.547 +// 0x4c00 Entry 19 (size 64 bundles) Reserved
  39.548 +	VMX_DBG_FAULT(19)
  39.549 +	VMX_FAULT(19)
  39.550 +
  39.551 +/////////////////////////////////////////////////////////////////////////////////////////
  39.552 +// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  39.553 +ENTRY(vmx_iaccess_rights)
  39.554 +	VMX_REFLECT(22)
  39.555 +END(vmx_iaccess_rights)
  39.556 +
  39.557 +	.org vmx_ia64_ivt+0x5300
  39.558 +/////////////////////////////////////////////////////////////////////////////////////////
  39.559 +// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  39.560 +ENTRY(vmx_daccess_rights)
  39.561 +	VMX_REFLECT(23)
  39.562 +END(vmx_daccess_rights)
  39.563 +
  39.564 +	.org vmx_ia64_ivt+0x5400
  39.565 +/////////////////////////////////////////////////////////////////////////////////////////
  39.566 +// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
  39.567 +ENTRY(vmx_general_exception)
  39.568 +    VMX_FAULT(24)
  39.569 +//    VMX_REFLECT(24)
  39.570 +END(vmx_general_exception)
  39.571 +
  39.572 +	.org vmx_ia64_ivt+0x5500
  39.573 +/////////////////////////////////////////////////////////////////////////////////////////
  39.574 +// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  39.575 +ENTRY(vmx_disabled_fp_reg)
  39.576 +	VMX_REFLECT(25)
  39.577 +END(vmx_disabled_fp_reg)
  39.578 +
  39.579 +	.org vmx_ia64_ivt+0x5600
  39.580 +/////////////////////////////////////////////////////////////////////////////////////////
  39.581 +// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
  39.582 +ENTRY(vmx_nat_consumption)
  39.583 +	VMX_REFLECT(26)
  39.584 +END(vmx_nat_consumption)
  39.585 +
  39.586 +	.org vmx_ia64_ivt+0x5700
  39.587 +/////////////////////////////////////////////////////////////////////////////////////////
  39.588 +// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  39.589 +ENTRY(vmx_speculation_vector)
  39.590 +	VMX_REFLECT(27)
  39.591 +END(vmx_speculation_vector)
  39.592 +
  39.593 +	.org vmx_ia64_ivt+0x5800
  39.594 +/////////////////////////////////////////////////////////////////////////////////////////
  39.595 +// 0x5800 Entry 28 (size 16 bundles) Reserved
  39.596 +	VMX_DBG_FAULT(28)
  39.597 +	VMX_FAULT(28)
  39.598 +
  39.599 +	.org vmx_ia64_ivt+0x5900
  39.600 +/////////////////////////////////////////////////////////////////////////////////////////
  39.601 +// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
  39.602 +ENTRY(vmx_debug_vector)
  39.603 +	VMX_DBG_FAULT(29)
  39.604 +	VMX_FAULT(29)
  39.605 +END(vmx_debug_vector)
  39.606 +
  39.607 +	.org vmx_ia64_ivt+0x5a00
  39.608 +/////////////////////////////////////////////////////////////////////////////////////////
  39.609 +// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  39.610 +ENTRY(vmx_unaligned_access)
  39.611 +	VMX_REFLECT(30)
  39.612 +END(vmx_unaligned_access)
  39.613 +
  39.614 +	.org vmx_ia64_ivt+0x5b00
  39.615 +/////////////////////////////////////////////////////////////////////////////////////////
  39.616 +// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  39.617 +ENTRY(vmx_unsupported_data_reference)
  39.618 +	VMX_REFLECT(31)
  39.619 +END(vmx_unsupported_data_reference)
  39.620 +
  39.621 +	.org vmx_ia64_ivt+0x5c00
  39.622 +/////////////////////////////////////////////////////////////////////////////////////////
  39.623 +// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
  39.624 +ENTRY(vmx_floating_point_fault)
  39.625 +	VMX_REFLECT(32)
  39.626 +END(vmx_floating_point_fault)
  39.627 +
  39.628 +	.org vmx_ia64_ivt+0x5d00
  39.629 +/////////////////////////////////////////////////////////////////////////////////////////
  39.630 +// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  39.631 +ENTRY(vmx_floating_point_trap)
  39.632 +	VMX_REFLECT(33)
  39.633 +END(vmx_floating_point_trap)
  39.634 +
  39.635 +	.org vmx_ia64_ivt+0x5e00
  39.636 +/////////////////////////////////////////////////////////////////////////////////////////
  39.637 +// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  39.638 +ENTRY(vmx_lower_privilege_trap)
  39.639 +	VMX_REFLECT(34)
  39.640 +END(vmx_lower_privilege_trap)
  39.641 +
  39.642 +	.org vmx_ia64_ivt+0x5f00
  39.643 +/////////////////////////////////////////////////////////////////////////////////////////
  39.644 +// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  39.645 +ENTRY(vmx_taken_branch_trap)
  39.646 +	VMX_REFLECT(35)
  39.647 +END(vmx_taken_branch_trap)
  39.648 +
  39.649 +	.org vmx_ia64_ivt+0x6000
  39.650 +/////////////////////////////////////////////////////////////////////////////////////////
  39.651 +// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  39.652 +ENTRY(vmx_single_step_trap)
  39.653 +	VMX_REFLECT(36)
  39.654 +END(vmx_single_step_trap)
  39.655 +
  39.656 +	.org vmx_ia64_ivt+0x6100
  39.657 +/////////////////////////////////////////////////////////////////////////////////////////
  39.658 +// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
  39.659 +ENTRY(vmx_virtualization_fault)
  39.660 +	VMX_DBG_FAULT(37)
  39.661 +	mov r31=pr
  39.662 +    mov r19=37
  39.663 +    br.sptk vmx_dispatch_virtualization_fault
  39.664 +END(vmx_virtualization_fault)
  39.665 +
  39.666 +	.org vmx_ia64_ivt+0x6200
  39.667 +/////////////////////////////////////////////////////////////////////////////////////////
  39.668 +// 0x6200 Entry 38 (size 16 bundles) Reserved
  39.669 +	VMX_DBG_FAULT(38)
  39.670 +	VMX_FAULT(38)
  39.671 +
  39.672 +	.org vmx_ia64_ivt+0x6300
  39.673 +/////////////////////////////////////////////////////////////////////////////////////////
  39.674 +// 0x6300 Entry 39 (size 16 bundles) Reserved
  39.675 +	VMX_DBG_FAULT(39)
  39.676 +	VMX_FAULT(39)
  39.677 +
  39.678 +	.org vmx_ia64_ivt+0x6400
  39.679 +/////////////////////////////////////////////////////////////////////////////////////////
  39.680 +// 0x6400 Entry 40 (size 16 bundles) Reserved
  39.681 +	VMX_DBG_FAULT(40)
  39.682 +	VMX_FAULT(40)
  39.683 +
  39.684 +	.org vmx_ia64_ivt+0x6500
  39.685 +/////////////////////////////////////////////////////////////////////////////////////////
  39.686 +// 0x6500 Entry 41 (size 16 bundles) Reserved
  39.687 +	VMX_DBG_FAULT(41)
  39.688 +	VMX_FAULT(41)
  39.689 +
  39.690 +	.org vmx_ia64_ivt+0x6600
  39.691 +/////////////////////////////////////////////////////////////////////////////////////////
  39.692 +// 0x6600 Entry 42 (size 16 bundles) Reserved
  39.693 +	VMX_DBG_FAULT(42)
  39.694 +	VMX_FAULT(42)
  39.695 +
  39.696 +	.org vmx_ia64_ivt+0x6700
  39.697 +/////////////////////////////////////////////////////////////////////////////////////////
  39.698 +// 0x6700 Entry 43 (size 16 bundles) Reserved
  39.699 +	VMX_DBG_FAULT(43)
  39.700 +	VMX_FAULT(43)
  39.701 +
  39.702 +	.org vmx_ia64_ivt+0x6800
  39.703 +/////////////////////////////////////////////////////////////////////////////////////////
  39.704 +// 0x6800 Entry 44 (size 16 bundles) Reserved
  39.705 +	VMX_DBG_FAULT(44)
  39.706 +	VMX_FAULT(44)
  39.707 +
  39.708 +	.org vmx_ia64_ivt+0x6900
  39.709 +/////////////////////////////////////////////////////////////////////////////////////////
  39.710 +// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
  39.711 +ENTRY(vmx_ia32_exception)
  39.712 +	VMX_DBG_FAULT(45)
  39.713 +	VMX_FAULT(45)
  39.714 +END(vmx_ia32_exception)
  39.715 +
  39.716 +	.org vmx_ia64_ivt+0x6a00
  39.717 +/////////////////////////////////////////////////////////////////////////////////////////
  39.718 +// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
  39.719 +ENTRY(vmx_ia32_intercept)
  39.720 +	VMX_DBG_FAULT(46)
  39.721 +	VMX_FAULT(46)
  39.722 +END(vmx_ia32_intercept)
  39.723 +
  39.724 +	.org vmx_ia64_ivt+0x6b00
  39.725 +/////////////////////////////////////////////////////////////////////////////////////////
  39.726 +// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
  39.727 +ENTRY(vmx_ia32_interrupt)
  39.728 +	VMX_DBG_FAULT(47)
  39.729 +	VMX_FAULT(47)
  39.730 +END(vmx_ia32_interrupt)
  39.731 +
  39.732 +	.org vmx_ia64_ivt+0x6c00
  39.733 +/////////////////////////////////////////////////////////////////////////////////////////
  39.734 +// 0x6c00 Entry 48 (size 16 bundles) Reserved
  39.735 +	VMX_DBG_FAULT(48)
  39.736 +	VMX_FAULT(48)
  39.737 +
  39.738 +	.org vmx_ia64_ivt+0x6d00
  39.739 +/////////////////////////////////////////////////////////////////////////////////////////
  39.740 +// 0x6d00 Entry 49 (size 16 bundles) Reserved
  39.741 +	VMX_DBG_FAULT(49)
  39.742 +	VMX_FAULT(49)
  39.743 +
  39.744 +	.org vmx_ia64_ivt+0x6e00
  39.745 +/////////////////////////////////////////////////////////////////////////////////////////
  39.746 +// 0x6e00 Entry 50 (size 16 bundles) Reserved
  39.747 +	VMX_DBG_FAULT(50)
  39.748 +	VMX_FAULT(50)
  39.749 +
  39.750 +	.org vmx_ia64_ivt+0x6f00
  39.751 +/////////////////////////////////////////////////////////////////////////////////////////
  39.752 +// 0x6f00 Entry 51 (size 16 bundles) Reserved
  39.753 +	VMX_DBG_FAULT(51)
  39.754 +	VMX_FAULT(51)
  39.755 +
  39.756 +	.org vmx_ia64_ivt+0x7000
  39.757 +/////////////////////////////////////////////////////////////////////////////////////////
  39.758 +// 0x7000 Entry 52 (size 16 bundles) Reserved
  39.759 +	VMX_DBG_FAULT(52)
  39.760 +	VMX_FAULT(52)
  39.761 +
  39.762 +	.org vmx_ia64_ivt+0x7100
  39.763 +/////////////////////////////////////////////////////////////////////////////////////////
  39.764 +// 0x7100 Entry 53 (size 16 bundles) Reserved
  39.765 +	VMX_DBG_FAULT(53)
  39.766 +	VMX_FAULT(53)
  39.767 +
  39.768 +	.org vmx_ia64_ivt+0x7200
  39.769 +/////////////////////////////////////////////////////////////////////////////////////////
  39.770 +// 0x7200 Entry 54 (size 16 bundles) Reserved
  39.771 +	VMX_DBG_FAULT(54)
  39.772 +	VMX_FAULT(54)
  39.773 +
  39.774 +	.org vmx_ia64_ivt+0x7300
  39.775 +/////////////////////////////////////////////////////////////////////////////////////////
  39.776 +// 0x7300 Entry 55 (size 16 bundles) Reserved
  39.777 +	VMX_DBG_FAULT(55)
  39.778 +	VMX_FAULT(55)
  39.779 +
  39.780 +	.org vmx_ia64_ivt+0x7400
  39.781 +/////////////////////////////////////////////////////////////////////////////////////////
  39.782 +// 0x7400 Entry 56 (size 16 bundles) Reserved
  39.783 +	VMX_DBG_FAULT(56)
  39.784 +	VMX_FAULT(56)
  39.785 +
  39.786 +	.org vmx_ia64_ivt+0x7500
  39.787 +/////////////////////////////////////////////////////////////////////////////////////////
  39.788 +// 0x7500 Entry 57 (size 16 bundles) Reserved
  39.789 +	VMX_DBG_FAULT(57)
  39.790 +	VMX_FAULT(57)
  39.791 +
  39.792 +	.org vmx_ia64_ivt+0x7600
  39.793 +/////////////////////////////////////////////////////////////////////////////////////////
  39.794 +// 0x7600 Entry 58 (size 16 bundles) Reserved
  39.795 +	VMX_DBG_FAULT(58)
  39.796 +	VMX_FAULT(58)
  39.797 +
  39.798 +	.org vmx_ia64_ivt+0x7700
  39.799 +/////////////////////////////////////////////////////////////////////////////////////////
  39.800 +// 0x7700 Entry 59 (size 16 bundles) Reserved
  39.801 +	VMX_DBG_FAULT(59)
  39.802 +	VMX_FAULT(59)
  39.803 +
  39.804 +	.org vmx_ia64_ivt+0x7800
  39.805 +/////////////////////////////////////////////////////////////////////////////////////////
  39.806 +// 0x7800 Entry 60 (size 16 bundles) Reserved
  39.807 +	VMX_DBG_FAULT(60)
  39.808 +	VMX_FAULT(60)
  39.809 +
  39.810 +	.org vmx_ia64_ivt+0x7900
  39.811 +/////////////////////////////////////////////////////////////////////////////////////////
  39.812 +// 0x7900 Entry 61 (size 16 bundles) Reserved
  39.813 +	VMX_DBG_FAULT(61)
  39.814 +	VMX_FAULT(61)
  39.815 +
  39.816 +	.org vmx_ia64_ivt+0x7a00
  39.817 +/////////////////////////////////////////////////////////////////////////////////////////
  39.818 +// 0x7a00 Entry 62 (size 16 bundles) Reserved
  39.819 +	VMX_DBG_FAULT(62)
  39.820 +	VMX_FAULT(62)
  39.821 +
  39.822 +	.org vmx_ia64_ivt+0x7b00
  39.823 +/////////////////////////////////////////////////////////////////////////////////////////
  39.824 +// 0x7b00 Entry 63 (size 16 bundles) Reserved
  39.825 +	VMX_DBG_FAULT(63)
  39.826 +	VMX_FAULT(63)
  39.827 +
  39.828 +	.org vmx_ia64_ivt+0x7c00
  39.829 +/////////////////////////////////////////////////////////////////////////////////////////
  39.830 +// 0x7c00 Entry 64 (size 16 bundles) Reserved
  39.831 +    VMX_DBG_FAULT(64)
  39.832 +	VMX_FAULT(64)
  39.833 +
  39.834 +	.org vmx_ia64_ivt+0x7d00
  39.835 +/////////////////////////////////////////////////////////////////////////////////////////
  39.836 +// 0x7d00 Entry 65 (size 16 bundles) Reserved
  39.837 +	VMX_DBG_FAULT(65)
  39.838 +	VMX_FAULT(65)
  39.839 +
  39.840 +	.org vmx_ia64_ivt+0x7e00
  39.841 +/////////////////////////////////////////////////////////////////////////////////////////
  39.842 +// 0x7e00 Entry 66 (size 16 bundles) Reserved
  39.843 +	VMX_DBG_FAULT(66)
  39.844 +	VMX_FAULT(66)
  39.845 +
  39.846 +	.org vmx_ia64_ivt+0x7f00
  39.847 +/////////////////////////////////////////////////////////////////////////////////////////
  39.848 +// 0x7f00 Entry 67 (size 16 bundles) Reserved
  39.849 +	VMX_DBG_FAULT(67)
  39.850 +	VMX_FAULT(67)
  39.851 +
  39.852 +	.org vmx_ia64_ivt+0x8000
  39.853 +    // There is no particular reason for this code to be here, other than that
  39.854 +    // there happens to be space here that would go unused otherwise.  If this
  39.855 +    // fault ever gets "unreserved", simply moved the following code to a more
  39.856 +    // suitable spot...
  39.857 +
  39.858 +
  39.859 +ENTRY(vmx_dispatch_reflection)
  39.860 +    /*
  39.861 +     * Input:
  39.862 +     *  psr.ic: off
  39.863 +     *  r19:    intr type (offset into ivt, see ia64_int.h)
  39.864 +     *  r31:    contains saved predicates (pr)
  39.865 +     */
  39.866 +    VMX_SAVE_MIN_WITH_COVER_R19
  39.867 +    alloc r14=ar.pfs,0,0,4,0
  39.868 +    mov out0=cr.ifa
  39.869 +    mov out1=cr.isr
  39.870 +    mov out2=cr.iim
  39.871 +    mov out3=r15
  39.872 +
  39.873 +    ssm psr.ic
  39.874 +    ;;
  39.875 +    srlz.i                  // guarantee that interruption collection is on
  39.876 +    ;;
  39.877 +    ssm psr.i               // restore psr.i
  39.878 +    adds r3=16,r2                // set up second base pointer
  39.879 +    ;;
  39.880 +    VMX_SAVE_REST
  39.881 +    movl r14=ia64_leave_hypervisor
  39.882 +    ;;
  39.883 +    mov rp=r14
  39.884 +    br.call.sptk.many b6=vmx_reflect_interruption
  39.885 +END(vmx_dispatch_reflection)
  39.886 +
  39.887 +ENTRY(vmx_dispatch_virtualization_fault)
  39.888 +    cmp.eq pEml,pNonEml=r0,r0       /* force pEml =1, save r4 ~ r7 */
  39.889 +    ;;
  39.890 +    VMX_SAVE_MIN_WITH_COVER_R19
  39.891 +    ;;
  39.892 +    alloc r14=ar.pfs,0,0,3,0        // now it's safe (must be first in insn group!)
  39.893 +    mov out0=r13        //vcpu
  39.894 +    mov out1=r4         //cause
  39.895 +    mov out2=r5         //opcode
  39.896 +    ssm psr.ic
  39.897 +    ;;
  39.898 +    srlz.i                  // guarantee that interruption collection is on
  39.899 +    ;;
  39.900 +    ssm psr.i               // restore psr.i
  39.901 +    adds r3=16,r2                // set up second base pointer
  39.902 +    ;;
  39.903 +    VMX_SAVE_REST
  39.904 +    movl r14=ia64_leave_hypervisor
  39.905 +    ;;
  39.906 +    mov rp=r14
  39.907 +    br.call.sptk.many b6=vmx_emulate
  39.908 +END(vmx_dispatch_virtualization_fault)
  39.909 +
  39.910 +
  39.911 +
  39.912 +ENTRY(vmx_dispatch_tlb_miss)
  39.913 +    VMX_SAVE_MIN_WITH_COVER_R19
  39.914 +    alloc r14=ar.pfs,0,0,3,0
  39.915 +    mov out0=r13
  39.916 +    mov out1=r15
  39.917 +    mov out2=cr.ifa
  39.918 +
  39.919 +    ssm psr.ic
  39.920 +    ;;
  39.921 +    srlz.i                  // guarantee that interruption collection is on
  39.922 +    ;;
  39.923 +    ssm psr.i               // restore psr.i
  39.924 +    adds r3=16,r2                // set up second base pointer
  39.925 +    ;;
  39.926 +    VMX_SAVE_REST
  39.927 +    movl r14=ia64_leave_hypervisor
  39.928 +    ;;
  39.929 +    mov rp=r14
  39.930 +    br.call.sptk.many b6=vmx_hpw_miss
  39.931 +END(vmx_dispatch_tlb_miss)
  39.932 +
  39.933 +
  39.934 +ENTRY(vmx_dispatch_break_fault)
  39.935 +    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
  39.936 +    ;;
  39.937 +    VMX_SAVE_MIN_WITH_COVER_R19
  39.938 +    ;;
  39.939 +    alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
  39.940 +    mov out0=cr.ifa
  39.941 +    adds out1=16,sp
  39.942 +    mov out2=cr.isr     // FIXME: pity to make this slow access twice
  39.943 +    mov out3=cr.iim     // FIXME: pity to make this slow access twice
  39.944 +
  39.945 +    ssm psr.ic
  39.946 +    ;;
  39.947 +    srlz.i                  // guarantee that interruption collection is on
  39.948 +    ;;
  39.949 +    ssm psr.i               // restore psr.i
  39.950 +    adds r3=16,r2                // set up second base pointer
  39.951 +    ;;
  39.952 +    VMX_SAVE_REST
  39.953 +    movl r14=ia64_leave_hypervisor
  39.954 +    ;;
  39.955 +    mov rp=r14
  39.956 +    br.call.sptk.many b6=vmx_ia64_handle_break
  39.957 +END(vmx_dispatch_break_fault)
  39.958 +
  39.959 +
  39.960 +ENTRY(vmx_dispatch_interrupt)
  39.961 +    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
  39.962 +    ;;
  39.963 +	VMX_SAVE_MIN_WITH_COVER_R19	// uses r31; defines r2 and r3
  39.964 +	;;
  39.965 +	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  39.966 +	mov out0=cr.ivr		// pass cr.ivr as first arg
  39.967 +	add out1=16,sp		// pass pointer to pt_regs as second arg
  39.968 +
  39.969 +	ssm psr.ic
  39.970 +	;;
  39.971 +    srlz.i
  39.972 +    ;;
  39.973 +    ssm psr.i
  39.974 +	adds r3=16,r2		// set up second base pointer for SAVE_REST
  39.975 +	;;
  39.976 +	VMX_SAVE_REST
  39.977 +	movl r14=ia64_leave_hypervisor
  39.978 +	;;
  39.979 +	mov rp=r14
  39.980 +	br.call.sptk.many b6=vmx_ia64_handle_irq
  39.981 +END(vmx_dispatch_interrupt)
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/xen/arch/ia64/vmx_minstate.h	Mon May 23 15:29:59 2005 +0000
    40.3 @@ -0,0 +1,329 @@
    40.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    40.5 +/*
    40.6 + * vmx_minstate.h:
    40.7 + * Copyright (c) 2005, Intel Corporation.
    40.8 + *
    40.9 + * This program is free software; you can redistribute it and/or modify it
   40.10 + * under the terms and conditions of the GNU General Public License,
   40.11 + * version 2, as published by the Free Software Foundation.
   40.12 + *
   40.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   40.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   40.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   40.16 + * more details.
   40.17 + *
   40.18 + * You should have received a copy of the GNU General Public License along with
   40.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   40.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   40.21 + *
   40.22 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   40.23 + */
   40.24 +
   40.25 +#include <linux/config.h>
   40.26 +
   40.27 +#include <asm/asmmacro.h>
   40.28 +#include <asm/fpu.h>
   40.29 +#include <asm/mmu_context.h>
   40.30 +#include <asm/offsets.h>
   40.31 +#include <asm/pal.h>
   40.32 +#include <asm/pgtable.h>
   40.33 +#include <asm/processor.h>
   40.34 +#include <asm/ptrace.h>
   40.35 +#include <asm/system.h>
   40.36 +#include <asm/vmx_pal_vsa.h>
   40.37 +#include <asm/vmx_vpd.h>
   40.38 +#include <asm/cache.h>
   40.39 +#include "entry.h"
   40.40 +
   40.41 +#define VMX_MINSTATE_START_SAVE_MIN         \
   40.42 +    mov ar.rsc=0;       /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
   40.43 +    ;;                                          \
   40.44 +    mov.m r28=ar.rnat;                                  \
   40.45 +    addl r22=IA64_RBS_OFFSET,r1;            /* compute base of RBS */       \
   40.46 +    ;;                                          \
   40.47 +    lfetch.fault.excl.nt1 [r22];                                \
   40.48 +    addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base of memory stack */  \
   40.49 +    mov r23=ar.bspstore;                /* save ar.bspstore */          \
   40.50 +    ;;                                          \
   40.51 +    mov ar.bspstore=r22;                /* switch to kernel RBS */      \
   40.52 +    ;;                                          \
   40.53 +    mov r18=ar.bsp;                                     \
   40.54 +    mov ar.rsc=0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */     \
   40.55 +
   40.56 +
   40.57 +
   40.58 +#define VMX_MINSTATE_END_SAVE_MIN           \
   40.59 +    bsw.1;          /* switch back to bank 1 (must be last in insn group) */    \
   40.60 +    ;;
   40.61 +
   40.62 +
   40.63 +#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
   40.64 +    /* begin to call pal vps sync_read and cleanup psr.pl */     \
   40.65 +    add r25=IA64_VPD_BASE_OFFSET, r21;       \
   40.66 +    movl r20=__vsa_base;     \
   40.67 +    ;;          \
   40.68 +    ld8 r25=[r25];      /* read vpd base */     \
   40.69 +    ld8 r20=[r20];      /* read entry point */  \
   40.70 +    ;;      \
   40.71 +    mov r6=r25;     \
   40.72 +    add r20=PAL_VPS_SYNC_READ,r20;  \
   40.73 +    ;;  \
   40.74 +{ .mii;  \
   40.75 +    add r22=VPD(VPSR),r25;   \
   40.76 +    mov r24=ip;        \
   40.77 +    mov b0=r20;     \
   40.78 +    ;;      \
   40.79 +};           \
   40.80 +{ .mmb;      \
   40.81 +    add r24 = 0x20, r24;    \
   40.82 +    mov r16 = cr.ipsr;  /* Temp workaround since psr.ic is off */ \
   40.83 +    br.cond.sptk b0;        /*  call the service */ \
   40.84 +    ;;              \
   40.85 +};           \
   40.86 +    ld8 r7=[r22];   \
   40.87 +    /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */    \
   40.88 +    extr.u r30=r16, IA64_PSR_CPL0_BIT, 2;   \
   40.89 +    ;;      \
   40.90 +    dep r7=r30, r7, IA64_PSR_CPL0_BIT, 2;   \
   40.91 +    ;;      \
   40.92 +    extr.u r30=r16, IA64_PSR_BE_BIT, 5;   \
   40.93 +    ;;      \
   40.94 +    dep r7=r30, r7, IA64_PSR_BE_BIT, 5;   \
   40.95 +    ;;      \
   40.96 +    extr.u r30=r16, IA64_PSR_RI_BIT, 2;   \
   40.97 +    ;;      \
   40.98 +    dep r7=r30, r7, IA64_PSR_RI_BIT, 2;   \
   40.99 +    ;;      \
  40.100 +    st8 [r22]=r7;      \
  40.101 +    ;;
  40.102 +
  40.103 +
  40.104 +
  40.105 +#define IA64_CURRENT_REG    IA64_KR(CURRENT)  /* r21 is reserved for current pointer */
  40.106 +//#define VMX_MINSTATE_GET_CURRENT(reg)   mov reg=IA64_CURRENT_REG
  40.107 +#define VMX_MINSTATE_GET_CURRENT(reg)   mov reg=r21
  40.108 +
  40.109 +/*
  40.110 + * VMX_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  40.111 + * the minimum state necessary that allows us to turn psr.ic back
  40.112 + * on.
  40.113 + *
  40.114 + * Assumed state upon entry:
  40.115 + *  psr.ic: off
  40.116 + *  r31:    contains saved predicates (pr)
  40.117 + *
  40.118 + * Upon exit, the state is as follows:
  40.119 + *  psr.ic: off
  40.120 + *   r2 = points to &pt_regs.r16
  40.121 + *   r8 = contents of ar.ccv
  40.122 + *   r9 = contents of ar.csd
  40.123 + *  r10 = contents of ar.ssd
  40.124 + *  r11 = FPSR_DEFAULT
  40.125 + *  r12 = kernel sp (kernel virtual address)
  40.126 + *  r13 = points to current task_struct (kernel virtual address)
  40.127 + *  p15 = TRUE if psr.i is set in cr.ipsr
  40.128 + *  predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
  40.129 + *      preserved
  40.130 + *
  40.131 + * Note that psr.ic is NOT turned on by this macro.  This is so that
  40.132 + * we can pass interruption state as arguments to a handler.
  40.133 + */
  40.134 +#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
  40.135 +/*  switch rr7 */       \
  40.136 +    movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
  40.137 +    movl r17=(7<<61);        \
  40.138 +    movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
  40.139 +    movl r22=(6<<61);        \
  40.140 +    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);		\
  40.141 +    movl r23=(5<<61);	\
  40.142 +    ;;              \
  40.143 +    mov rr[r17]=r16;             \
  40.144 +    mov rr[r22]=r20;		 \
  40.145 +    mov rr[r23]=r18;		 \
  40.146 +    ;;      \
  40.147 +    srlz.i;      \
  40.148 +    ;;  \
  40.149 +    VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
  40.150 +    mov r27=ar.rsc;         /* M */                         \
  40.151 +    mov r20=r1;         /* A */                         \
  40.152 +    mov r26=ar.unat;        /* M */                         \
  40.153 +    mov r29=cr.ipsr;        /* M */                         \
  40.154 +    COVER;              /* B;; (or nothing) */                  \
  40.155 +    ;;                                          \
  40.156 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
  40.157 +(p6) br.sptk.few vmx_panic;        \
  40.158 +    mov r1=r16;                     \
  40.159 +/*    mov r21=r16;	*/		\
  40.160 +    /* switch from user to kernel RBS: */                           \
  40.161 +    ;;                                          \
  40.162 +    invala;             /* M */                         \
  40.163 +    SAVE_IFS;                                       \
  40.164 +    ;;                                          \
  40.165 +    VMX_MINSTATE_START_SAVE_MIN                                 \
  40.166 +    adds r17=2*L1_CACHE_BYTES,r1;       /* really: biggest cache-line size */       \
  40.167 +    adds r16=PT(CR_IPSR),r1;                                \
  40.168 +    ;;                                          \
  40.169 +    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;                     \
  40.170 +    st8 [r16]=r29;      /* save cr.ipsr */                      \
  40.171 +    ;;                                          \
  40.172 +    lfetch.fault.excl.nt1 [r17];                                \
  40.173 +    tbit.nz p15,p0=r29,IA64_PSR_I_BIT;                          \
  40.174 +    mov r29=b0                                      \
  40.175 +    ;;                                          \
  40.176 +    adds r16=PT(R8),r1; /* initialize first base pointer */             \
  40.177 +    adds r17=PT(R9),r1; /* initialize second base pointer */                \
  40.178 +    ;;                                          \
  40.179 +.mem.offset 0,0; st8.spill [r16]=r8,16;                             \
  40.180 +.mem.offset 8,0; st8.spill [r17]=r9,16;                             \
  40.181 +        ;;                                          \
  40.182 +.mem.offset 0,0; st8.spill [r16]=r10,24;                            \
  40.183 +.mem.offset 8,0; st8.spill [r17]=r11,24;                            \
  40.184 +        ;;                                          \
  40.185 +    mov r8=ar.pfs;         /* I */                         \
  40.186 +    mov r9=cr.iip;         /* M */                         \
  40.187 +    mov r10=ar.fpsr;        /* M */                         \
  40.188 +        ;;                      \
  40.189 +    st8 [r16]=r9,16;    /* save cr.iip */                       \
  40.190 +    st8 [r17]=r30,16;   /* save cr.ifs */                       \
  40.191 +    sub r18=r18,r22;    /* r18=RSE.ndirty*8 */                      \
  40.192 +    ;;          \
  40.193 +    st8 [r16]=r26,16;   /* save ar.unat */                      \
  40.194 +    st8 [r17]=r8,16;    /* save ar.pfs */                       \
  40.195 +    shl r18=r18,16;     /* compute ar.rsc to be used for "loadrs" */            \
  40.196 +    ;;                                          \
  40.197 +    st8 [r16]=r27,16;   /* save ar.rsc */                       \
  40.198 +    st8 [r17]=r28,16;   /* save ar.rnat */                      \
  40.199 +    ;;          /* avoid RAW on r16 & r17 */                    \
  40.200 +    st8 [r16]=r23,16;   /* save ar.bspstore */                      \
  40.201 +    st8 [r17]=r31,16;   /* save predicates */                       \
  40.202 +    ;;                                          \
  40.203 +    st8 [r16]=r29,16;   /* save b0 */                           \
  40.204 +    st8 [r17]=r18,16;   /* save ar.rsc value for "loadrs" */                \
  40.205 +    ;;                                          \
  40.206 +.mem.offset 0,0; st8.spill [r16]=r20,16;    /* save original r1 */              \
  40.207 +.mem.offset 8,0; st8.spill [r17]=r12,16;                            \
  40.208 +    adds r12=-16,r1;    /* switch to kernel memory stack (with 16 bytes of scratch) */  \
  40.209 +    ;;                                          \
  40.210 +.mem.offset 0,0; st8.spill [r16]=r13,16;                            \
  40.211 +.mem.offset 8,0; st8.spill [r17]=r10,16;    /* save ar.fpsr */              \
  40.212 +    mov r13=r21;   /* establish `current' */               \
  40.213 +    ;;                                          \
  40.214 +.mem.offset 0,0; st8.spill [r16]=r15,16;                            \
  40.215 +.mem.offset 8,0; st8.spill [r17]=r14,16;                            \
  40.216 +    ;;                                          \
  40.217 +.mem.offset 0,0; st8.spill [r16]=r2,16;                             \
  40.218 +.mem.offset 8,0; st8.spill [r17]=r3,16;                             \
  40.219 +    adds r2=PT(F6),r1;                         \
  40.220 +    ;;                                          \
  40.221 + .mem.offset 0,0; st8.spill [r16]=r4,16;                             \
  40.222 + .mem.offset 8,0; st8.spill [r17]=r5,16;                             \
  40.223 +    ;;          \
  40.224 + .mem.offset 0,0; st8.spill [r16]=r6,16;     \
  40.225 + .mem.offset 8,0; st8.spill [r17]=r7,16;     \
  40.226 +    mov r20=ar.ccv;      \
  40.227 +    ;;  \
  40.228 +  mov r18=cr.iipa;  \
  40.229 +  mov r4=cr.isr;   \
  40.230 +  mov r22=ar.unat;    \
  40.231 +    ;;  \
  40.232 +  st8 [r16]=r18,16;      \
  40.233 +  st8 [r17]=r4;      \
  40.234 +    ;;      \
  40.235 +    adds r16=PT(EML_UNAT),r1;   \
  40.236 +    adds r17=PT(AR_CCV),r1;                 \
  40.237 +    ;;                      \
  40.238 +    st8 [r16]=r22,8;     \
  40.239 +    st8 [r17]=r20;       \
  40.240 +    mov r4=r24;         \
  40.241 +    mov r5=r25;         \
  40.242 +     ;;  \
  40.243 +    st8 [r16]=r0;  \
  40.244 +    EXTRA;                                          \
  40.245 +    mov r9=ar.csd;                                      \
  40.246 +    mov r10=ar.ssd;                                     \
  40.247 +    movl r11=FPSR_DEFAULT;   /* L-unit */                           \
  40.248 +    movl r1=__gp;       /* establish kernel global pointer */               \
  40.249 +    ;;                                          \
  40.250 +    PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
  40.251 +    VMX_MINSTATE_END_SAVE_MIN
  40.252 +
  40.253 +/*
  40.254 + * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
  40.255 + *
  40.256 + * Assumed state upon entry:
  40.257 + *  psr.ic: on
  40.258 + *  r2: points to &pt_regs.f6
  40.259 + *  r3: points to &pt_regs.f7
  40.260 + *  r4,r5,scrach
  40.261 + *  r6: points to vpd
  40.262 + *  r7: vpsr
  40.263 + *  r9: contents of ar.csd
  40.264 + *  r10:    contents of ar.ssd
  40.265 + *  r11:    FPSR_DEFAULT
  40.266 + *
  40.267 + * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  40.268 + */
  40.269 +#define VMX_SAVE_REST               \
  40.270 +    tbit.z pBN0,pBN1=r7,IA64_PSR_BN_BIT;  /* guest bank0 or bank1 ? */      \
  40.271 +    ;;      \
  40.272 +(pBN0) add r4=VPD(VBGR),r6;     \
  40.273 +(pBN0) add r5=VPD(VBGR)+0x8,r6;     \
  40.274 +(pBN0) add r7=VPD(VBNAT),r6;     \
  40.275 +    ;;      \
  40.276 +(pBN1) add r5=VPD(VGR)+0x8,r6;      \
  40.277 +(pBN1) add r4=VPD(VGR),r6;      \
  40.278 +(pBN1) add r7=VPD(VNAT),r6;      \
  40.279 +    ;;      \
  40.280 +.mem.offset 0,0; st8.spill [r4]=r16,16;     \
  40.281 +.mem.offset 8,0; st8.spill [r5]=r17,16;     \
  40.282 +    ;;                  \
  40.283 +.mem.offset 0,0; st8.spill [r4]=r18,16;     \
  40.284 +.mem.offset 8,0; st8.spill [r5]=r19,16;     \
  40.285 +    ;;                  \
  40.286 +.mem.offset 0,0; st8.spill [r4]=r20,16;     \
  40.287 +.mem.offset 8,0; st8.spill [r5]=r21,16;     \
  40.288 +    mov r18=b6;         \
  40.289 +    ;;                  \
  40.290 +.mem.offset 0,0; st8.spill [r4]=r22,16;     \
  40.291 +.mem.offset 8,0; st8.spill [r5]=r23,16;     \
  40.292 +    mov r19=b7;     \
  40.293 +    ;;                  \
  40.294 +.mem.offset 0,0; st8.spill [r4]=r24,16;     \
  40.295 +.mem.offset 8,0; st8.spill [r5]=r25,16;     \
  40.296 +    ;;                  \
  40.297 +.mem.offset 0,0; st8.spill [r4]=r26,16;     \
  40.298 +.mem.offset 8,0; st8.spill [r5]=r27,16;     \
  40.299 +    ;;                  \
  40.300 +.mem.offset 0,0; st8.spill [r4]=r28,16;     \
  40.301 +.mem.offset 8,0; st8.spill [r5]=r29,16;     \
  40.302 +    ;;                  \
  40.303 +.mem.offset 0,0; st8.spill [r4]=r30,16;     \
  40.304 +.mem.offset 8,0; st8.spill [r5]=r31,16;     \
  40.305 +    ;;                  \
  40.306 +    mov r30=ar.unat;    \
  40.307 +    ;;      \
  40.308 +    st8 [r7]=r30;       \
  40.309 +    mov ar.fpsr=r11;    /* M-unit */    \
  40.310 +    ;;                  \
  40.311 +    stf.spill [r2]=f6,32;           \
  40.312 +    stf.spill [r3]=f7,32;           \
  40.313 +    ;;                  \
  40.314 +    stf.spill [r2]=f8,32;           \
  40.315 +    stf.spill [r3]=f9,32;           \
  40.316 +    ;;                  \
  40.317 +    stf.spill [r2]=f10;         \
  40.318 +    stf.spill [r3]=f11;         \
  40.319 +    ;;                  \
  40.320 +    adds r2=PT(B6)-PT(F10),r2;      \
  40.321 +    adds r3=PT(B7)-PT(F11),r3;      \
  40.322 +    ;;          \
  40.323 +    st8 [r2]=r18,16;       /* b6 */    \
  40.324 +    st8 [r3]=r19,16;       /* b7 */    \
  40.325 +    ;;                  \
  40.326 +    st8 [r2]=r9;           /* ar.csd */    \
  40.327 +    st8 [r3]=r10;          /* ar.ssd */    \
  40.328 +    ;;
  40.329 +
  40.330 +#define VMX_SAVE_MIN_WITH_COVER   VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
  40.331 +#define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
  40.332 +#define VMX_SAVE_MIN      VMX_DO_SAVE_MIN(     , mov r30=r0, )
    41.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    41.2 +++ b/xen/arch/ia64/vmx_phy_mode.c	Mon May 23 15:29:59 2005 +0000
    41.3 @@ -0,0 +1,393 @@
    41.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    41.5 +/*
    41.6 + * vmx_phy_mode.c: emulating domain physical mode.
    41.7 + * Copyright (c) 2005, Intel Corporation.
    41.8 + *
    41.9 + * This program is free software; you can redistribute it and/or modify it
   41.10 + * under the terms and conditions of the GNU General Public License,
   41.11 + * version 2, as published by the Free Software Foundation.
   41.12 + *
   41.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   41.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   41.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   41.16 + * more details.
   41.17 + *
   41.18 + * You should have received a copy of the GNU General Public License along with
   41.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   41.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   41.21 + *
   41.22 + * Arun Sharma (arun.sharma@intel.com)
   41.23 + * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
   41.24 + * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
   41.25 + */
   41.26 +
   41.27 +
   41.28 +#include <asm/processor.h>
   41.29 +#include <asm/gcc_intrin.h>
   41.30 +#include <asm/vmx_phy_mode.h>
   41.31 +#include <xen/sched.h>
   41.32 +#include <asm/pgtable.h>
   41.33 +
   41.34 +
   41.35 +int valid_mm_mode[8] = {
   41.36 +    GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
   41.37 +    INV_MODE,
   41.38 +    INV_MODE,
   41.39 +    GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
   41.40 +    INV_MODE,
   41.41 +    GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
   41.42 +    INV_MODE,
   41.43 +    GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
   41.44 +};
   41.45 +
   41.46 +/*
   41.47 + * Special notes:
   41.48 + * - Index by it/dt/rt sequence
   41.49 + * - Only existing mode transitions are allowed in this table
   41.50 + * - RSE is placed at lazy mode when emulating guest partial mode
   41.51 + * - If gva happens to be rr0 and rr4, only allowed case is identity
   41.52 + *   mapping (gva=gpa), or panic! (How?)
   41.53 + */
   41.54 +int mm_switch_table[8][8] = {
   41.55 +    /*  2004/09/12(Kevin): Allow switch to self */
   41.56 +        /*
   41.57 +         *  (it,dt,rt): (0,0,0) -> (1,1,1)
   41.58 +         *  This kind of transition usually occurs in the very early
   41.59 +     *  stage of Linux boot up procedure. Another case is in efi
   41.60 +     *  and pal calls. (see "arch/ia64/kernel/head.S")
   41.61 +     *
   41.62 +     *  (it,dt,rt): (0,0,0) -> (0,1,1)
   41.63 +     *  This kind of transition is found when OSYa exits efi boot
   41.64 +     *  service. Due to gva = gpa in this case (Same region),
   41.65 +     *  data access can be satisfied though itlb entry for physical
   41.66 +     *  emulation is hit.
   41.67 +         */
   41.68 +    SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V,
   41.69 +    0,  0,  0,  0,  0,  0,  0,  0,
   41.70 +    0,  0,  0,  0,  0,  0,  0,  0,
   41.71 +    /*
   41.72 +     *  (it,dt,rt): (0,1,1) -> (1,1,1)
   41.73 +     *  This kind of transition is found in OSYa.
   41.74 +     *
   41.75 +     *  (it,dt,rt): (0,1,1) -> (0,0,0)
   41.76 +     *  This kind of transition is found in OSYa
   41.77 +     */
   41.78 +    SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V,
   41.79 +    /* (1,0,0)->(1,1,1) */
   41.80 +    0,  0,  0,  0,  0,  0,  0,  SW_P2V,
   41.81 +    /*
   41.82 +         *  (it,dt,rt): (1,0,1) -> (1,1,1)
   41.83 +         *  This kind of transition usually occurs when Linux returns
   41.84 +     *  from the low level TLB miss handlers.
   41.85 +         *  (see "arch/ia64/kernel/ivt.S")
   41.86 +         */
   41.87 +    0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V,
   41.88 +    0,  0,  0,  0,  0,  0,  0,  0,
   41.89 +    /*
   41.90 +         *  (it,dt,rt): (1,1,1) -> (1,0,1)
   41.91 +         *  This kind of transition usually occurs in Linux low level
   41.92 +     *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
   41.93 +     *
   41.94 +     *  (it,dt,rt): (1,1,1) -> (0,0,0)
   41.95 +     *  This kind of transition usually occurs in pal and efi calls,
   41.96 +     *  which requires running in physical mode.
   41.97 +     *  (see "arch/ia64/kernel/head.S")
   41.98 +     *  (1,1,1)->(1,0,0)
   41.99 +     */
  41.100 +
  41.101 +    SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF,
  41.102 +};
  41.103 +
  41.104 +void
  41.105 +physical_mode_init(VCPU *vcpu)
  41.106 +{
  41.107 +    UINT64 psr;
  41.108 +    struct domain * d = vcpu->domain;
  41.109 +
  41.110 +    vcpu->domain->arch.emul_phy_rr0.rid = XEN_RR7_RID+((d->domain_id)<<3);
  41.111 +    /* FIXME */
  41.112 +#if 0
  41.113 +    vcpu->domain->arch.emul_phy_rr0.ps = 28;  /* set page size to 256M */
  41.114 +#endif
  41.115 +	vcpu->domain->arch.emul_phy_rr0.ps = EMUL_PHY_PAGE_SHIFT;  /* set page size to 4k */
  41.116 +    vcpu->domain->arch.emul_phy_rr0.ve = 1; /* enable VHPT walker on this region */
  41.117 +
  41.118 +    vcpu->domain->arch.emul_phy_rr4.rid = XEN_RR7_RID + ((d->domain_id)<<3) + 4;
  41.119 +    vcpu->domain->arch.emul_phy_rr4.ps = EMUL_PHY_PAGE_SHIFT;  /* set page size to 4k */
  41.120 +    vcpu->domain->arch.emul_phy_rr4.ve = 1; /* enable VHPT walker on this region */
  41.121 +
  41.122 +    vcpu->arch.old_rsc = 0;
  41.123 +    vcpu->arch.mode_flags = GUEST_IN_PHY;
  41.124 +
  41.125 +    psr = ia64_clear_ic();
  41.126 +
  41.127 +    ia64_set_rr((VRN0<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr0.rrval);
  41.128 +    ia64_srlz_d();
  41.129 +    ia64_set_rr((VRN4<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr4.rrval);
  41.130 +    ia64_srlz_d();
  41.131 +#if 0
  41.132 +    /* FIXME: temp workaround to support guest physical mode */
  41.133 +ia64_itr(0x1, IA64_TEMP_PHYSICAL, dom0_start,
  41.134 +	 pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
  41.135 +	 28);
  41.136 +ia64_itr(0x2, IA64_TEMP_PHYSICAL, dom0_start,
  41.137 +	 pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
  41.138 +	 28);
  41.139 +ia64_srlz_i();
  41.140 +#endif
  41.141 +    ia64_set_psr(psr);
  41.142 +    ia64_srlz_i();
  41.143 +    return;
  41.144 +}
  41.145 +
  41.146 +extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
  41.147 +void
  41.148 +physical_itlb_miss(VCPU *vcpu, u64 vadr)
  41.149 +{
  41.150 +    u64 psr;
  41.151 +    IA64_PSR vpsr;
  41.152 +    u64 mppn,gppn;
  41.153 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  41.154 +    gppn=(vadr<<1)>>13;
  41.155 +    mppn = get_mfn(DOMID_SELF,gppn,1);
  41.156 +    mppn=(mppn<<12)|(vpsr.cpl<<7)|PHY_PAGE_WB;
  41.157 +
  41.158 +    psr=ia64_clear_ic();
  41.159 +    ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
  41.160 +    ia64_set_psr(psr);
  41.161 +    ia64_srlz_i();
  41.162 +    return;
  41.163 +}
  41.164 +
  41.165 +void
  41.166 +physical_dtlb_miss(VCPU *vcpu, u64 vadr)
  41.167 +{
  41.168 +    u64 psr;
  41.169 +    IA64_PSR vpsr;
  41.170 +    u64 mppn,gppn;
  41.171 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  41.172 +    gppn=(vadr<<1)>>13;
  41.173 +    mppn = get_mfn(DOMID_SELF,gppn,1);
  41.174 +    mppn=(mppn<<12)|(vpsr.cpl<<7);
  41.175 +    if(vadr>>63)
  41.176 +        mppn |= PHY_PAGE_UC;
  41.177 +    else
  41.178 +        mppn |= PHY_PAGE_WB;
  41.179 +
  41.180 +    psr=ia64_clear_ic();
  41.181 +    ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
  41.182 +    ia64_set_psr(psr);
  41.183 +    ia64_srlz_i();
  41.184 +    return;
  41.185 +}
  41.186 +
  41.187 +void
  41.188 +vmx_init_all_rr(VCPU *vcpu)
  41.189 +{
  41.190 +	VMX(vcpu,vrr[VRN0]) = 0x38;
  41.191 +	VMX(vcpu,vrr[VRN1]) = 0x38;
  41.192 +	VMX(vcpu,vrr[VRN2]) = 0x38;
  41.193 +	VMX(vcpu,vrr[VRN3]) = 0x38;
  41.194 +	VMX(vcpu,vrr[VRN4]) = 0x38;
  41.195 +	VMX(vcpu,vrr[VRN5]) = 0x38;
  41.196 +	VMX(vcpu,vrr[VRN6]) = 0x60;
  41.197 +	VMX(vcpu,vrr[VRN7]) = 0x60;
  41.198 +
  41.199 +	VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
  41.200 +	VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
  41.201 +	VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60);
  41.202 +}
  41.203 +
  41.204 +void
  41.205 +vmx_load_all_rr(VCPU *vcpu)
  41.206 +{
  41.207 +	unsigned long psr;
  41.208 +
  41.209 +	psr = ia64_clear_ic();
  41.210 +
  41.211 +	/* WARNING: not allow co-exist of both virtual mode and physical
  41.212 +	 * mode in same region
  41.213 +	 */
  41.214 +	if (is_physical_mode(vcpu)) {
  41.215 +		ia64_set_rr((VRN0 << VRN_SHIFT),
  41.216 +			     vcpu->domain->arch.emul_phy_rr0.rrval);
  41.217 +		ia64_set_rr((VRN4 << VRN_SHIFT),
  41.218 +			     vcpu->domain->arch.emul_phy_rr4.rrval);
  41.219 +	} else {
  41.220 +		ia64_set_rr((VRN0 << VRN_SHIFT),
  41.221 +			     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
  41.222 +		ia64_set_rr((VRN4 << VRN_SHIFT),
  41.223 +			     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
  41.224 +	}
  41.225 +
  41.226 +#if 1
  41.227 +	/* rr567 will be postponed to last point when resuming back to guest */
  41.228 +	ia64_set_rr((VRN1 << VRN_SHIFT),
  41.229 +		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
  41.230 +	ia64_set_rr((VRN2 << VRN_SHIFT),
  41.231 +		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
  41.232 +	ia64_set_rr((VRN3 << VRN_SHIFT),
  41.233 +		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
  41.234 +#endif
  41.235 +	ia64_srlz_d();
  41.236 +	ia64_set_psr(psr);
  41.237 +    ia64_srlz_i();
  41.238 +}
  41.239 +
  41.240 +void
  41.241 +switch_to_physical_rid(VCPU *vcpu)
  41.242 +{
  41.243 +    UINT64 psr;
  41.244 +
  41.245 +    /* Save original virtual mode rr[0] and rr[4] */
  41.246 +
  41.247 +    psr=ia64_clear_ic();
  41.248 +    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr0.rrval);
  41.249 +    ia64_srlz_d();
  41.250 +    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr4.rrval);
  41.251 +    ia64_srlz_d();
  41.252 +
  41.253 +    ia64_set_psr(psr);
  41.254 +    ia64_srlz_i();
  41.255 +    return;
  41.256 +}
  41.257 +
  41.258 +
  41.259 +void
  41.260 +switch_to_virtual_rid(VCPU *vcpu)
  41.261 +{
  41.262 +    UINT64 psr;
  41.263 +    ia64_rr mrr;
  41.264 +
  41.265 +    psr=ia64_clear_ic();
  41.266 +
  41.267 +    mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
  41.268 +    mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
  41.269 +//VRID_2_MRID(vcpu,mrr.rid);
  41.270 +    mrr.ve = 1;
  41.271 +    ia64_set_rr(VRN0<<VRN_SHIFT, mrr.rrval );
  41.272 +    ia64_srlz_d();
  41.273 +    mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
  41.274 +    mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
  41.275 +    mrr.ve = 1;
  41.276 +    ia64_set_rr(VRN4<<VRN_SHIFT, mrr.rrval );
  41.277 +    ia64_srlz_d();
  41.278 +    ia64_set_psr(psr);
  41.279 +    ia64_srlz_i();
  41.280 +    return;
  41.281 +}
  41.282 +
  41.283 +static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
  41.284 +{
  41.285 +    return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  41.286 +}
  41.287 +
  41.288 +void
  41.289 +switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
  41.290 +{
  41.291 +    int act;
  41.292 +    REGS * regs=vcpu_regs(vcpu);
  41.293 +    act = mm_switch_action(old_psr, new_psr);
  41.294 +    switch (act) {
  41.295 +    case SW_V2P:
  41.296 +        vcpu->arch.old_rsc = regs->ar_rsc;
  41.297 +        switch_to_physical_rid(vcpu);
  41.298 +        /*
  41.299 +         * Set rse to enforced lazy, to prevent active rse save/restor when
  41.300 +         * guest physical mode.
  41.301 +         */
  41.302 +        regs->ar_rsc &= ~(IA64_RSC_MODE);
  41.303 +        vcpu->arch.mode_flags |= GUEST_IN_PHY;
  41.304 +        break;
  41.305 +    case SW_P2V:
  41.306 +        switch_to_virtual_rid(vcpu);
  41.307 +        /*
  41.308 +         * recover old mode which is saved when entering
  41.309 +         * guest physical mode
  41.310 +         */
  41.311 +        regs->ar_rsc = vcpu->arch.old_rsc;
  41.312 +        vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  41.313 +        break;
  41.314 +    case SW_SELF:
  41.315 +        printf("Switch to self-0x%lx!!! MM mode doesn't change...\n",
  41.316 +            old_psr.val);
  41.317 +        break;
  41.318 +    case SW_NOP:
  41.319 +        printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
  41.320 +            old_psr.val, new_psr.val);
  41.321 +        break;
  41.322 +    default:
  41.323 +        /* Sanity check */
  41.324 +    printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
  41.325 +        panic("Unexpected virtual <--> physical mode transition");
  41.326 +        break;
  41.327 +    }
  41.328 +    return;
  41.329 +}
  41.330 +
  41.331 +
  41.332 +
  41.333 +/*
  41.334 + * In physical mode, insert tc/tr for region 0 and 4 uses
  41.335 + * RID[0] and RID[4] which is for physical mode emulation.
  41.336 + * However what those inserted tc/tr wants is rid for
  41.337 + * virtual mode. So original virtual rid needs to be restored
  41.338 + * before insert.
  41.339 + *
  41.340 + * Operations which required such switch include:
  41.341 + *  - insertions (itc.*, itr.*)
  41.342 + *  - purges (ptc.* and ptr.*)
  41.343 + *  - tpa
  41.344 + *  - tak
  41.345 + *  - thash?, ttag?
  41.346 + * All above needs actual virtual rid for destination entry.
  41.347 + */
  41.348 +
  41.349 +void
  41.350 +check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR new_psr)
  41.351 +{
  41.352 +
  41.353 +    if ( (old_psr.dt != new_psr.dt ) ||
  41.354 +         (old_psr.it != new_psr.it ) ||
  41.355 +         (old_psr.rt != new_psr.rt )
  41.356 +         ) {
  41.357 +        switch_mm_mode (vcpu, old_psr, new_psr);
  41.358 +    }
  41.359 +
  41.360 +    return 0;
  41.361 +}
  41.362 +
  41.363 +
  41.364 +/*
  41.365 + * In physical mode, insert tc/tr for region 0 and 4 uses
  41.366 + * RID[0] and RID[4] which is for physical mode emulation.
  41.367 + * However what those inserted tc/tr wants is rid for
  41.368 + * virtual mode. So original virtual rid needs to be restored
  41.369 + * before insert.
  41.370 + *
  41.371 + * Operations which required such switch include:
  41.372 + *  - insertions (itc.*, itr.*)
  41.373 + *  - purges (ptc.* and ptr.*)
  41.374 + *  - tpa
  41.375 + *  - tak
  41.376 + *  - thash?, ttag?
  41.377 + * All above needs actual virtual rid for destination entry.
  41.378 + */
  41.379 +
  41.380 +void
  41.381 +prepare_if_physical_mode(VCPU *vcpu)
  41.382 +{
  41.383 +    if (is_physical_mode(vcpu))
  41.384 +        switch_to_virtual_rid(vcpu);
  41.385 +    return;
  41.386 +}
  41.387 +
  41.388 +/* Recover always follows prepare */
  41.389 +void
  41.390 +recover_if_physical_mode(VCPU *vcpu)
  41.391 +{
  41.392 +    if (is_physical_mode(vcpu))
  41.393 +        switch_to_physical_rid(vcpu);
  41.394 +    return;
  41.395 +}
  41.396 +
    42.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    42.2 +++ b/xen/arch/ia64/vmx_process.c	Mon May 23 15:29:59 2005 +0000
    42.3 @@ -0,0 +1,345 @@
    42.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    42.5 +/*
    42.6 + * vmx_process.c: handling VMX architecture-related VM exits
    42.7 + * Copyright (c) 2005, Intel Corporation.
    42.8 + *
    42.9 + * This program is free software; you can redistribute it and/or modify it
   42.10 + * under the terms and conditions of the GNU General Public License,
   42.11 + * version 2, as published by the Free Software Foundation.
   42.12 + *
   42.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   42.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   42.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   42.16 + * more details.
   42.17 + *
   42.18 + * You should have received a copy of the GNU General Public License along with
   42.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   42.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   42.21 + *
   42.22 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
   42.23 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   42.24 + */
   42.25 +
   42.26 +#include <xen/config.h>
   42.27 +#include <xen/lib.h>
   42.28 +#include <xen/errno.h>
   42.29 +#include <xen/sched.h>
   42.30 +#include <xen/smp.h>
   42.31 +#include <asm/ptrace.h>
   42.32 +#include <xen/delay.h>
   42.33 +
   42.34 +#include <linux/efi.h>  /* FOR EFI_UNIMPLEMENTED */
   42.35 +#include <asm/sal.h>    /* FOR struct ia64_sal_retval */
   42.36 +
   42.37 +#include <asm/system.h>
   42.38 +#include <asm/io.h>
   42.39 +#include <asm/processor.h>
   42.40 +#include <asm/desc.h>
   42.41 +//#include <asm/ldt.h>
   42.42 +#include <xen/irq.h>
   42.43 +#include <xen/event.h>
   42.44 +#include <asm/regionreg.h>
   42.45 +#include <asm/privop.h>
   42.46 +#include <asm/ia64_int.h>
   42.47 +#include <asm/hpsim_ssc.h>
   42.48 +#include <asm/dom_fw.h>
   42.49 +#include <asm/vmx_vcpu.h>
   42.50 +#include <asm/kregs.h>
   42.51 +#include <asm/vmx_mm_def.h>
   42.52 +/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
   42.53 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
   42.54 +
   42.55 +
   42.56 +extern struct ia64_sal_retval pal_emulator_static(UINT64);
   42.57 +extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
   42.58 +extern void rnat_consumption (VCPU *vcpu);
   42.59 +
   42.60 +IA64FAULT
   42.61 +vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   42.62 +{
   42.63 +	static int first_time = 1;
   42.64 +	struct domain *d = (struct domain *) current->domain;
   42.65 +	struct exec_domain *ed = (struct domain *) current;
   42.66 +	extern unsigned long running_on_sim;
   42.67 +	unsigned long i, sal_param[8];
   42.68 +
   42.69 +#if 0
   42.70 +	if (first_time) {
   42.71 +		if (platform_is_hp_ski()) running_on_sim = 1;
   42.72 +		else running_on_sim = 0;
   42.73 +		first_time = 0;
   42.74 +	}
   42.75 +	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   42.76 +		if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
   42.77 +		else do_ssc(vcpu_get_gr(current,36), regs);
   42.78 +	}
   42.79 +#endif
   42.80 +	if (iim == d->breakimm) {
   42.81 +		struct ia64_sal_retval x;
   42.82 +		switch (regs->r2) {
   42.83 +		    case FW_HYPERCALL_PAL_CALL:
   42.84 +			//printf("*** PAL hypercall: index=%d\n",regs->r28);
   42.85 +			//FIXME: This should call a C routine
   42.86 +			x = pal_emulator_static(VMX_VPD(ed, vgr[12]));
   42.87 +			regs->r8 = x.status; regs->r9 = x.v0;
   42.88 +			regs->r10 = x.v1; regs->r11 = x.v2;
   42.89 +#if 0
   42.90 +			if (regs->r8)
   42.91 +				printk("Failed vpal emulation, with index:0x%lx\n",
   42.92 +					VMX_VPD(ed, vgr[12]));
   42.93 +#endif
   42.94 +			break;
   42.95 +		    case FW_HYPERCALL_SAL_CALL:
   42.96 +			for (i = 0; i < 8; i++)
   42.97 +				vmx_vcpu_get_gr(ed, 32+i, &sal_param[i]);
   42.98 +			x = sal_emulator(sal_param[0], sal_param[1],
   42.99 +					 sal_param[2], sal_param[3],
  42.100 +					 sal_param[4], sal_param[5],
  42.101 +					 sal_param[6], sal_param[7]);
  42.102 +			regs->r8 = x.status; regs->r9 = x.v0;
  42.103 +			regs->r10 = x.v1; regs->r11 = x.v2;
  42.104 +#if 0
  42.105 +			if (regs->r8)
  42.106 +				printk("Failed vsal emulation, with index:0x%lx\n",
  42.107 +					sal_param[0]);
  42.108 +#endif
  42.109 +			break;
  42.110 +		    case FW_HYPERCALL_EFI_RESET_SYSTEM:
  42.111 +			printf("efi.reset_system called ");
  42.112 +			if (current->domain == dom0) {
  42.113 +				printf("(by dom0)\n ");
  42.114 +				(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
  42.115 +			}
  42.116 +			printf("(not supported for non-0 domain)\n");
  42.117 +			regs->r8 = EFI_UNSUPPORTED;
  42.118 +			break;
  42.119 +		    case FW_HYPERCALL_EFI_GET_TIME:
  42.120 +			{
  42.121 +			unsigned long *tv, *tc;
  42.122 +			fooefi();
  42.123 +			vmx_vcpu_get_gr(ed, 32, &tv);
  42.124 +			vmx_vcpu_get_gr(ed, 33, &tc);
  42.125 +			printf("efi_get_time(%p,%p) called...",tv,tc);
  42.126 +			tv = __va(translate_domain_mpaddr(tv));
  42.127 +			if (tc) tc = __va(translate_domain_mpaddr(tc));
  42.128 +			regs->r8 = (*efi.get_time)(tv,tc);
  42.129 +			printf("and returns %lx\n",regs->r8);
  42.130 +			}
  42.131 +			break;
  42.132 +		    case FW_HYPERCALL_EFI_SET_TIME:
  42.133 +		    case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
  42.134 +		    case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
  42.135 +			// FIXME: need fixes in efi.h from 2.6.9
  42.136 +		    case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
  42.137 +			// FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
  42.138 +			// SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
  42.139 +			// POINTER ARGUMENTS WILL BE VIRTUAL!!
  42.140 +		    case FW_HYPERCALL_EFI_GET_VARIABLE:
  42.141 +			// FIXME: need fixes in efi.h from 2.6.9
  42.142 +		    case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
  42.143 +		    case FW_HYPERCALL_EFI_SET_VARIABLE:
  42.144 +		    case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
  42.145 +			// FIXME: need fixes in efi.h from 2.6.9
  42.146 +			regs->r8 = EFI_UNSUPPORTED;
  42.147 +			break;
  42.148 +		}
  42.149 +#if 0
  42.150 +		if (regs->r8)
  42.151 +			printk("Failed vgfw emulation, with index:0x%lx\n",
  42.152 +				regs->r2);
  42.153 +#endif
  42.154 +		vmx_vcpu_increment_iip(current);
  42.155 +	} else
  42.156 +		vmx_reflect_interruption(ifa,isr,iim,11);
  42.157 +}
  42.158 +
  42.159 +static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
  42.160 +    0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
  42.161 +    0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
  42.162 +    0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
  42.163 +    0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
  42.164 +    0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
  42.165 +    0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
  42.166 +    0x7f00,
  42.167 +};
  42.168 +
  42.169 +
  42.170 +
  42.171 +void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
  42.172 +     UINT64 vector)
  42.173 +{
  42.174 +    VCPU *vcpu = current;
  42.175 +    REGS *regs=vcpu_regs(vcpu);
  42.176 +    UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
  42.177 +    if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
  42.178 +        panic("Guest nested fault!");
  42.179 +    }
  42.180 +    VPD_CR(vcpu,isr)=isr;
  42.181 +    VPD_CR(vcpu,iipa) = regs->cr_iip;
  42.182 +    vector=vec2off[vector];
  42.183 +    if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
  42.184 +        VPD_CR(vcpu,iim) = iim;
  42.185 +    else {
  42.186 +        set_ifa_itir_iha(vcpu,ifa,1,1,1);
  42.187 +    }
  42.188 +    inject_guest_interruption(vcpu, vector);
  42.189 +}
  42.190 +
  42.191 +// ONLY gets called from ia64_leave_kernel
  42.192 +// ONLY call with interrupts disabled?? (else might miss one?)
  42.193 +// NEVER successful if already reflecting a trap/fault because psr.i==0
  42.194 +void vmx_deliver_pending_interrupt(struct pt_regs *regs)
  42.195 +{
  42.196 +	struct domain *d = current->domain;
  42.197 +	struct exec_domain *ed = current;
  42.198 +	// FIXME: Will this work properly if doing an RFI???
  42.199 +	if (!is_idle_task(d) ) {	// always comes from guest
  42.200 +		//vcpu_poke_timer(ed);
  42.201 +		//if (vcpu_deliverable_interrupts(ed)) {
  42.202 +		//	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
  42.203 +		//	foodpi();
  42.204 +		//	reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
  42.205 +		//}
  42.206 +	        extern void vmx_dorfirfi(void);
  42.207 +		struct pt_regs *user_regs = vcpu_regs(current);
  42.208 +
  42.209 +		if (user_regs != regs)
  42.210 +			printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
  42.211 +		if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
  42.212 +			return;
  42.213 +		vmx_check_pending_irq(ed);
  42.214 +	}
  42.215 +}
  42.216 +
  42.217 +extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
  42.218 +
  42.219 +/* We came here because the H/W VHPT walker failed to find an entry */
  42.220 +void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
  42.221 +{
  42.222 +    IA64_PSR vpsr;
  42.223 +    CACHE_LINE_TYPE type;
  42.224 +    u64 vhpt_adr;
  42.225 +    ISR misr;
  42.226 +    ia64_rr vrr;
  42.227 +    REGS *regs;
  42.228 +    thash_cb_t *vtlb, *vhpt;
  42.229 +    thash_data_t *data, me;
  42.230 +    vtlb=vmx_vcpu_get_vtlb(vcpu);
  42.231 +#ifdef  VTLB_DEBUG
  42.232 +    check_vtlb_sanity(vtlb);
  42.233 +    dump_vtlb(vtlb);
  42.234 +#endif
  42.235 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  42.236 +    regs = vcpu_regs(vcpu);
  42.237 +    misr.val=regs->cr_isr;
  42.238 +/*  TODO
  42.239 +    if(vcpu->domain->id && vec == 2 &&
  42.240 +       vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
  42.241 +        emulate_ins(&v);
  42.242 +        return;
  42.243 +    }
  42.244 +*/
  42.245 +
  42.246 +    if((vec==1)&&(!vpsr.it)){
  42.247 +        physical_itlb_miss(vcpu, vadr);
  42.248 +        return;
  42.249 +    }
  42.250 +    if((vec==2)&&(!vpsr.dt)){
  42.251 +        physical_dtlb_miss(vcpu, vadr);
  42.252 +        return;
  42.253 +    }
  42.254 +    vrr = vmx_vcpu_rr(vcpu,vadr);
  42.255 +    if(vec == 1) type = ISIDE_TLB;
  42.256 +    else if(vec == 2) type = DSIDE_TLB;
  42.257 +    else panic("wrong vec\n");
  42.258 +
  42.259 +//    prepare_if_physical_mode(vcpu);
  42.260 +
  42.261 +    if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
  42.262 +    	if ( data->ps != vrr.ps ) {
  42.263 +    		machine_tlb_insert(vcpu, data);
  42.264 +    	}
  42.265 +    	else {
  42.266 +	        thash_insert(vtlb->ts->vhpt,data,vadr);
  42.267 +	    }
  42.268 +    }else if(type == DSIDE_TLB){
  42.269 +        if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
  42.270 +            if(vpsr.ic){
  42.271 +                vmx_vcpu_set_isr(vcpu, misr.val);
  42.272 +                alt_dtlb(vcpu, vadr);
  42.273 +                return IA64_FAULT;
  42.274 +            } else{
  42.275 +                if(misr.sp){
  42.276 +                    //TODO  lds emulation
  42.277 +                    panic("Don't support speculation load");
  42.278 +                }else{
  42.279 +                    nested_dtlb(vcpu);
  42.280 +                    return IA64_FAULT;
  42.281 +                }
  42.282 +            }
  42.283 +        } else{
  42.284 +            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  42.285 +            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  42.286 +            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  42.287 +            if(data){
  42.288 +                if(vpsr.ic){
  42.289 +                    vmx_vcpu_set_isr(vcpu, misr.val);
  42.290 +                    dtlb_fault(vcpu, vadr);
  42.291 +                    return IA64_FAULT;
  42.292 +                }else{
  42.293 +                    if(misr.sp){
  42.294 +                        //TODO  lds emulation
  42.295 +                        panic("Don't support speculation load");
  42.296 +                    }else{
  42.297 +                        nested_dtlb(vcpu);
  42.298 +                        return IA64_FAULT;
  42.299 +                    }
  42.300 +                }
  42.301 +            }else{
  42.302 +                if(vpsr.ic){
  42.303 +                    vmx_vcpu_set_isr(vcpu, misr.val);
  42.304 +                    dvhpt_fault(vcpu, vadr);
  42.305 +                    return IA64_FAULT;
  42.306 +                }else{
  42.307 +                    if(misr.sp){
  42.308 +                        //TODO  lds emulation
  42.309 +                        panic("Don't support speculation load");
  42.310 +                    }else{
  42.311 +                        nested_dtlb(vcpu);
  42.312 +                        return IA64_FAULT;
  42.313 +                    }
  42.314 +                }
  42.315 +            }
  42.316 +        }
  42.317 +    }else if(type == ISIDE_TLB){
  42.318 +        if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
  42.319 +            if(!vpsr.ic){
  42.320 +                misr.ni=1;
  42.321 +            }
  42.322 +            vmx_vcpu_set_isr(vcpu, misr.val);
  42.323 +            alt_itlb(vcpu, vadr);
  42.324 +            return IA64_FAULT;
  42.325 +        } else{
  42.326 +            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  42.327 +            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  42.328 +            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  42.329 +            if(data){
  42.330 +                if(!vpsr.ic){
  42.331 +                    misr.ni=1;
  42.332 +                }
  42.333 +                vmx_vcpu_set_isr(vcpu, misr.val);
  42.334 +                itlb_fault(vcpu, vadr);
  42.335 +                return IA64_FAULT;
  42.336 +            }else{
  42.337 +                if(!vpsr.ic){
  42.338 +                    misr.ni=1;
  42.339 +                }
  42.340 +                vmx_vcpu_set_isr(vcpu, misr.val);
  42.341 +                ivhpt_fault(vcpu, vadr);
  42.342 +                return IA64_FAULT;
  42.343 +            }
  42.344 +        }
  42.345 +    }
  42.346 +}
  42.347 +
  42.348 +
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/xen/arch/ia64/vmx_utility.c	Mon May 23 15:29:59 2005 +0000
    43.3 @@ -0,0 +1,659 @@
    43.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    43.5 +/*
    43.6 + * vmx_utility.c:
    43.7 + * Copyright (c) 2005, Intel Corporation.
    43.8 + *
    43.9 + * This program is free software; you can redistribute it and/or modify it
   43.10 + * under the terms and conditions of the GNU General Public License,
   43.11 + * version 2, as published by the Free Software Foundation.
   43.12 + *
   43.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   43.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   43.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   43.16 + * more details.
   43.17 + *
   43.18 + * You should have received a copy of the GNU General Public License along with
   43.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   43.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   43.21 + *
   43.22 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   43.23 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
   43.24 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   43.25 + */
   43.26 +
   43.27 +#include <xen/types.h>
   43.28 +#include <asm/vmx_vcpu.h>
   43.29 +#include <asm/processor.h>
   43.30 +#include <asm/vmx_mm_def.h>
   43.31 +
   43.32 +
   43.33 +/*
   43.34 + * Return:
   43.35 + *  0:  Not reserved indirect registers
   43.36 + *  1:  Is reserved indirect registers
   43.37 + */
   43.38 +int
   43.39 +is_reserved_indirect_register (
   43.40 +    int type,
   43.41 +    int index )
   43.42 +{
   43.43 +    switch (type) {
   43.44 +        case IA64_CPUID:
   43.45 +            if ( index >= 5 ) {
   43.46 +                return 1;
   43.47 +            }
   43.48 +
   43.49 +        case IA64_DBR:
   43.50 +        case IA64_IBR:
   43.51 +            //bugbugbug:check with pal about the max ibr/dbr!!!!
   43.52 +            break;
   43.53 +
   43.54 +        case IA64_PMC:
   43.55 +            //bugbugbug:check with pal about the max ibr/dbr!!!!
   43.56 +            break;
   43.57 +
   43.58 +        case IA64_PMD:
   43.59 +            //bugbugbug:check with pal about the max ibr/dbr!!!!
   43.60 +            break;
   43.61 +
   43.62 +        case IA64_PKR:
   43.63 +            //bugbugbug:check with pal about the max pkr!!!!
   43.64 +            break;
   43.65 +
   43.66 +        case IA64_RR:
   43.67 +            //bugbugbug:check with pal about the max rr!!!!
   43.68 +            break;
   43.69 +
   43.70 +        default:
   43.71 +            panic ("Unsupported instruction!");
   43.72 +    }
   43.73 +
   43.74 +    return 0;
   43.75 +
   43.76 +}
   43.77 +
   43.78 +/*
   43.79 + * Return:
   43.80 + *  Set all ignored fields in value to 0 and return
   43.81 + */
   43.82 +u64
   43.83 +indirect_reg_igfld_MASK (
   43.84 +    int type,
   43.85 +    int index,
   43.86 +    u64 value
   43.87 +    )
   43.88 +{
   43.89 +    u64 nvalue;
   43.90 +
   43.91 +    nvalue = value;
   43.92 +    switch ( type ) {
   43.93 +        case IA64_CPUID:
   43.94 +            if ( index == 2 ) {
   43.95 +                nvalue = 0;
   43.96 +            }
   43.97 +            break;
   43.98 +
   43.99 +        case IA64_DBR:
  43.100 +        case IA64_IBR:
  43.101 +            /* Refer to SDM Vol2 Table 7-1,7-2 */
  43.102 +            if ( index % 2 != 0) {
  43.103 +                /* Ignore field: {61:60} */
  43.104 +                nvalue = value & (~MASK (60, 2));
  43.105 +            }
  43.106 +            break;
  43.107 +        case IA64_PMC:
  43.108 +            if ( index == 0 ) {
  43.109 +                /* Ignore field: 3:1 */
  43.110 +                nvalue = value & (~MASK (1, 3));
  43.111 +            }
  43.112 +            break;
  43.113 +        case IA64_PMD:
  43.114 +            if ( index >= 4 ) {
  43.115 +                /* Ignore field: 7:7 */
  43.116 +                /* bugbug: this code is correct for generic
  43.117 +                 * PMD. However, for implementation specific
  43.118 +                 * PMD, it's WRONG. need more info to judge
  43.119 +                 * what's implementation specific PMD.
  43.120 +                 */
  43.121 +                nvalue = value & (~MASK (7, 1));
  43.122 +            }
  43.123 +            break;
  43.124 +        case IA64_PKR:
  43.125 +        case IA64_RR:
  43.126 +            break;
  43.127 +        default:
  43.128 +            panic ("Unsupported instruction!");
  43.129 +    }
  43.130 +
  43.131 +    return nvalue;
  43.132 +}
  43.133 +
  43.134 +/*
  43.135 + * Return:
  43.136 + *  Set all ignored fields in value to 0 and return
  43.137 + */
  43.138 +u64
  43.139 +cr_igfld_mask (int index, u64 value)
  43.140 +{
  43.141 +    u64 nvalue;
  43.142 +
  43.143 +    nvalue = value;
  43.144 +
  43.145 +    switch ( index ) {
  43.146 +    case IA64_REG_CR_IVA:
  43.147 +        /* Ignore filed: 14:0 */
  43.148 +        nvalue = value & (~MASK (0, 15));
  43.149 +        break;
  43.150 +
  43.151 +    case IA64_REG_CR_IHA:
  43.152 +        /* Ignore filed: 1:0 */
  43.153 +        nvalue = value & (~MASK (0, 2));
  43.154 +        break;
  43.155 +
  43.156 +    case IA64_REG_CR_LID:
  43.157 +        /* Ignore filed: 63:32 */
  43.158 +        nvalue = value & (~MASK (32, 32));
  43.159 +        break;
  43.160 +
  43.161 +    case IA64_REG_CR_TPR:
  43.162 +        /* Ignore filed: 63:17,3:0 */
  43.163 +        nvalue = value & (~MASK (17, 47));
  43.164 +        nvalue = nvalue & (~MASK (0, 4));
  43.165 +        break;
  43.166 +
  43.167 +    case IA64_REG_CR_EOI:
  43.168 +        /* Ignore filed: 63:0 */
  43.169 +        nvalue = 0;
  43.170 +        break;
  43.171 +
  43.172 +    case IA64_REG_CR_ITV:
  43.173 +    case IA64_REG_CR_PMV:
  43.174 +    case IA64_REG_CR_CMCV:
  43.175 +    case IA64_REG_CR_LRR0:
  43.176 +    case IA64_REG_CR_LRR1:
  43.177 +        /* Ignore filed: 63:17,12:12 */
  43.178 +        nvalue = value & (~MASK (17, 47));
  43.179 +        nvalue = nvalue & (~MASK (12, 1));
  43.180 +        break;
  43.181 +    }
  43.182 +
  43.183 +    return nvalue;
  43.184 +}
  43.185 +
  43.186 +
  43.187 +/*
  43.188 + * Return:
  43.189 + *  1: PSR reserved fields are not zero
  43.190 + *  0:  PSR reserved fields are all zero
  43.191 + */
  43.192 +int
  43.193 +check_psr_rsv_fields (u64 value)
  43.194 +{
  43.195 +    /* PSR reserved fields: 0, 12~6, 16, 31~28, 63~46
  43.196 +     * These reserved fields shall all be zero
  43.197 +     * Otherwise we will panic
  43.198 +     */
  43.199 +
  43.200 +    if ( value & MASK (0, 1) ||
  43.201 +         value & MASK (6, 7) ||
  43.202 +         value & MASK (16, 1) ||
  43.203 +         value & MASK (28, 4) ||
  43.204 +         value & MASK (46, 18)
  43.205 +         ) {
  43.206 +             return 1;
  43.207 +         }
  43.208 +
  43.209 +    return 0;
  43.210 +}
  43.211 +
  43.212 +
  43.213 +
  43.214 +/*
  43.215 + * Return:
  43.216 + *  1: CR reserved fields are not zero
  43.217 + *  0:  CR reserved fields are all zero
  43.218 + */
  43.219 +int
  43.220 +check_cr_rsv_fields (int index, u64 value)
  43.221 +{
  43.222 +    switch (index) {
  43.223 +        case IA64_REG_CR_DCR:
  43.224 +            if ( (value & MASK ( 3, 5 )) ||
  43.225 +                (value & MASK (15, 49))) {
  43.226 +                    return 1;
  43.227 +            }
  43.228 +            return 0;
  43.229 +
  43.230 +        case IA64_REG_CR_ITM:
  43.231 +        case IA64_REG_CR_IVA:
  43.232 +        case IA64_REG_CR_IIP:
  43.233 +        case IA64_REG_CR_IFA:
  43.234 +        case IA64_REG_CR_IIPA:
  43.235 +        case IA64_REG_CR_IIM:
  43.236 +        case IA64_REG_CR_IHA:
  43.237 +        case IA64_REG_CR_EOI:
  43.238 +            return 0;
  43.239 +
  43.240 +        case IA64_REG_CR_PTA:
  43.241 +            if ( (value & MASK ( 1, 1 )) ||
  43.242 +                (value & MASK (9, 6))) {
  43.243 +                    return 1;
  43.244 +            }
  43.245 +            return 0;
  43.246 +
  43.247 +        case IA64_REG_CR_IPSR:
  43.248 +            return check_psr_rsv_fields (value);
  43.249 +
  43.250 +
  43.251 +        case IA64_REG_CR_ISR:
  43.252 +            if ( (value & MASK ( 24, 8 )) ||
  43.253 +                (value & MASK (44, 20))) {
  43.254 +                    return 1;
  43.255 +            }
  43.256 +            return 0;
  43.257 +
  43.258 +        case IA64_REG_CR_ITIR:
  43.259 +            if ( (value & MASK ( 0, 2 )) ||
  43.260 +                (value & MASK (32, 32))) {
  43.261 +                    return 1;
  43.262 +            }
  43.263 +            return 0;
  43.264 +
  43.265 +        case IA64_REG_CR_IFS:
  43.266 +            if ( (value & MASK ( 38, 25 ))) {
  43.267 +                return 1;
  43.268 +            }
  43.269 +            return 0;
  43.270 +
  43.271 +        case IA64_REG_CR_LID:
  43.272 +            if ( (value & MASK ( 0, 16 ))) {
  43.273 +                return 1;
  43.274 +            }
  43.275 +            return 0;
  43.276 +
  43.277 +        case IA64_REG_CR_IVR:
  43.278 +            if ( (value & MASK ( 8, 56 ))) {
  43.279 +                return 1;
  43.280 +            }
  43.281 +            return 0;
  43.282 +
  43.283 +        case IA64_REG_CR_TPR:
  43.284 +            if ( (value & MASK ( 8, 8 ))) {
  43.285 +                return 1;
  43.286 +            }
  43.287 +            return 0;
  43.288 +
  43.289 +        case IA64_REG_CR_IRR0:
  43.290 +            if ( (value & MASK ( 1, 1 )) ||
  43.291 +                (value & MASK (3, 13))) {
  43.292 +                    return 1;
  43.293 +            }
  43.294 +            return 0;
  43.295 +
  43.296 +        case IA64_REG_CR_ITV:
  43.297 +        case IA64_REG_CR_PMV:
  43.298 +        case IA64_REG_CR_CMCV:
  43.299 +            if ( (value & MASK ( 8, 4 )) ||
  43.300 +                (value & MASK (13, 3))) {
  43.301 +                    return 1;
  43.302 +            }
  43.303 +            return 0;
  43.304 +
  43.305 +        case IA64_REG_CR_LRR0:
  43.306 +        case IA64_REG_CR_LRR1:
  43.307 +            if ( (value & MASK ( 11, 1 )) ||
  43.308 +                (value & MASK (14, 1))) {
  43.309 +                    return 1;
  43.310 +            }
  43.311 +            return 0;
  43.312 +    }
  43.313 +
  43.314 +
  43.315 +    panic ("Unsupported CR");
  43.316 +}
  43.317 +
  43.318 +
  43.319 +
  43.320 +/*
  43.321 + * Return:
  43.322 + *  0:  Indirect Reg reserved fields are not zero
  43.323 + *  1:  Indirect Reg reserved fields are all zero
  43.324 + */
  43.325 +int
  43.326 +check_indirect_reg_rsv_fields ( int type, int index, u64 value )
  43.327 +{
  43.328 +
  43.329 +    switch ( type ) {
  43.330 +        case IA64_CPUID:
  43.331 +            if ( index == 3 ) {
  43.332 +                if ( value & MASK (40, 24 )) {
  43.333 +                    return 0;
  43.334 +                }
  43.335 +            } else if ( index == 4 ) {
  43.336 +                if ( value & MASK (2, 62 )) {
  43.337 +                    return 0;
  43.338 +                }
  43.339 +            }
  43.340 +            break;
  43.341 +
  43.342 +        case IA64_DBR:
  43.343 +        case IA64_IBR:
  43.344 +        case IA64_PMC:
  43.345 +        case IA64_PMD:
  43.346 +            break;
  43.347 +
  43.348 +        case IA64_PKR:
  43.349 +            if ( value & MASK (4, 4) ||
  43.350 +                value & MASK (32, 32 )) {
  43.351 +                return 0;
  43.352 +                }
  43.353 +            break;
  43.354 +
  43.355 +        case IA64_RR:
  43.356 +            if ( value & MASK (1, 1) ||
  43.357 +                value & MASK (32, 32 )) {
  43.358 +                return 0;
  43.359 +                }
  43.360 +            break;
  43.361 +
  43.362 +        default:
  43.363 +            panic ("Unsupported instruction!");
  43.364 +    }
  43.365 +
  43.366 +    return 1;
  43.367 +}
  43.368 +
  43.369 +
  43.370 +
  43.371 +
  43.372 +/* Return
  43.373 + * Same format as isr_t
  43.374 + * Only ei/ni bits are valid, all other bits are zero
  43.375 + */
  43.376 +u64
  43.377 +set_isr_ei_ni (VCPU *vcpu)
  43.378 +{
  43.379 +
  43.380 +    IA64_PSR vpsr,ipsr;
  43.381 +    ISR visr;
  43.382 +    REGS *regs;
  43.383 +
  43.384 +    regs=vcpu_regs(vcpu);
  43.385 +
  43.386 +    visr.val = 0;
  43.387 +
  43.388 +    vpsr.val = vmx_vcpu_get_psr (vcpu);
  43.389 +
  43.390 +    if (!vpsr.ic == 1 ) {
  43.391 +        /* Set ISR.ni */
  43.392 +        visr.ni = 1;
  43.393 +    }
  43.394 +    ipsr.val = regs->cr_ipsr;
  43.395 +
  43.396 +    visr.ei = ipsr.ri;
  43.397 +    return visr.val;
  43.398 +}
  43.399 +
  43.400 +
  43.401 +/* Set up ISR.na/code{3:0}/r/w for no-access instructions
  43.402 + * Refer to SDM Vol Table 5-1
  43.403 + * Parameter:
  43.404 + *  setr: if 1, indicates this function will set up ISR.r
  43.405 + *  setw: if 1, indicates this function will set up ISR.w
  43.406 + * Return:
  43.407 + *  Same format as ISR. All fields are zero, except na/code{3:0}/r/w
  43.408 + */
  43.409 +u64
  43.410 +set_isr_for_na_inst(VCPU *vcpu, int op)
  43.411 +{
  43.412 +    ISR visr;
  43.413 +    visr.val = 0;
  43.414 +    switch (op) {
  43.415 +        case IA64_INST_TPA:
  43.416 +            visr.na = 1;
  43.417 +            visr.code = 0;
  43.418 +            break;
  43.419 +        case IA64_INST_TAK:
  43.420 +            visr.na = 1;
  43.421 +            visr.code = 3;
  43.422 +            break;
  43.423 +    }
  43.424 +    return visr.val;
  43.425 +}
  43.426 +
  43.427 +
  43.428 +
  43.429 +/*
  43.430 + * Set up ISR for registe Nat consumption fault
  43.431 + * Parameters:
  43.432 + *  read: if 1, indicates this is a read access;
  43.433 + *  write: if 1, indicates this is a write access;
  43.434 + */
  43.435 +void
  43.436 +set_rnat_consumption_isr (VCPU *vcpu,int inst,int read,int write)
  43.437 +{
  43.438 +    ISR visr;
  43.439 +    u64 value;
  43.440 +    /* Need set up ISR: code, ei, ni, na, r/w */
  43.441 +    visr.val = 0;
  43.442 +
  43.443 +    /* ISR.code{7:4} =1,
  43.444 +     * Set up ISR.code{3:0}, ISR.na
  43.445 +     */
  43.446 +    visr.code = (1 << 4);
  43.447 +    if (inst) {
  43.448 +
  43.449 +        value = set_isr_for_na_inst (vcpu,inst);
  43.450 +        visr.val = visr.val | value;
  43.451 +    }
  43.452 +
  43.453 +    /* Set up ISR.r/w */
  43.454 +    visr.r = read;
  43.455 +    visr.w = write;
  43.456 +
  43.457 +    /* Set up ei/ni */
  43.458 +    value = set_isr_ei_ni (vcpu);
  43.459 +    visr.val = visr.val | value;
  43.460 +
  43.461 +    vmx_vcpu_set_isr (vcpu,visr.val);
  43.462 +}
  43.463 +
  43.464 +
  43.465 +
  43.466 +/*
  43.467 + * Set up ISR for break fault
  43.468 + */
  43.469 +void set_break_isr (VCPU *vcpu)
  43.470 +{
  43.471 +    ISR visr;
  43.472 +    u64 value;
  43.473 +
  43.474 +    /* Need set up ISR: ei, ni */
  43.475 +
  43.476 +    visr.val = 0;
  43.477 +
  43.478 +    /* Set up ei/ni */
  43.479 +    value = set_isr_ei_ni (vcpu);
  43.480 +    visr.val = visr.val | value;
  43.481 +
  43.482 +    vmx_vcpu_set_isr(vcpu, visr.val);
  43.483 +}
  43.484 +
  43.485 +
  43.486 +
  43.487 +
  43.488 +
  43.489 +
  43.490 +/*
  43.491 + * Set up ISR for Priviledged Operation fault
  43.492 + */
  43.493 +void set_privileged_operation_isr (VCPU *vcpu,int inst)
  43.494 +{
  43.495 +    ISR visr;
  43.496 +    u64 value;
  43.497 +
  43.498 +    /* Need set up ISR: code, ei, ni, na */
  43.499 +
  43.500 +    visr.val = 0;
  43.501 +
  43.502 +    /* Set up na, code{3:0} for no-access instruction */
  43.503 +    value = set_isr_for_na_inst (vcpu, inst);
  43.504 +    visr.val = visr.val | value;
  43.505 +
  43.506 +
  43.507 +    /* ISR.code{7:4} =1 */
  43.508 +    visr.code = (1 << 4) | visr.code;
  43.509 +
  43.510 +    /* Set up ei/ni */
  43.511 +    value = set_isr_ei_ni (vcpu);
  43.512 +    visr.val = visr.val | value;
  43.513 +
  43.514 +    vmx_vcpu_set_isr (vcpu, visr.val);
  43.515 +}
  43.516 +
  43.517 +
  43.518 +
  43.519 +
  43.520 +/*
  43.521 + * Set up ISR for Priviledged Register fault
  43.522 + */
  43.523 +void set_privileged_reg_isr (VCPU *vcpu, int inst)
  43.524 +{
  43.525 +    ISR visr;
  43.526 +    u64 value;
  43.527 +
  43.528 +    /* Need set up ISR: code, ei, ni */
  43.529 +
  43.530 +    visr.val = 0;
  43.531 +
  43.532 +    /* ISR.code{7:4} =2 */
  43.533 +    visr.code = 2 << 4;
  43.534 +
  43.535 +    /* Set up ei/ni */
  43.536 +    value = set_isr_ei_ni (vcpu);
  43.537 +    visr.val = visr.val | value;
  43.538 +
  43.539 +    vmx_vcpu_set_isr (vcpu, visr.val);
  43.540 +}
  43.541 +
  43.542 +
  43.543 +
  43.544 +
  43.545 +
  43.546 +/*
  43.547 + * Set up ISR for Reserved Register/Field fault
  43.548 + */
  43.549 +void set_rsv_reg_field_isr (VCPU *vcpu)
  43.550 +{
  43.551 +    ISR visr;
  43.552 +    u64 value;
  43.553 +
  43.554 +    /* Need set up ISR: code, ei, ni */
  43.555 +
  43.556 +    visr.val = 0;
  43.557 +
  43.558 +    /* ISR.code{7:4} =4 */
  43.559 +    visr.code = (3 << 4) | visr.code;
  43.560 +
  43.561 +    /* Set up ei/ni */
  43.562 +    value = set_isr_ei_ni (vcpu);
  43.563 +    visr.val = visr.val | value;
  43.564 +
  43.565 +    vmx_vcpu_set_isr (vcpu, visr.val);
  43.566 +}
  43.567 +
  43.568 +
  43.569 +
  43.570 +/*
  43.571 + * Set up ISR for Illegal Operation fault
  43.572 + */
  43.573 +void set_illegal_op_isr (VCPU *vcpu)
  43.574 +{
  43.575 +    ISR visr;
  43.576 +    u64 value;
  43.577 +
  43.578 +    /* Need set up ISR: ei, ni */
  43.579 +
  43.580 +    visr.val = 0;
  43.581 +
  43.582 +    /* Set up ei/ni */
  43.583 +    value = set_isr_ei_ni (vcpu);
  43.584 +    visr.val = visr.val | value;
  43.585 +
  43.586 +    vmx_vcpu_set_isr (vcpu, visr.val);
  43.587 +}
  43.588 +
  43.589 +
  43.590 +void set_isr_reg_nat_consumption(VCPU *vcpu, u64 flag, u64 non_access)
  43.591 +{
  43.592 +    ISR isr;
  43.593 +
  43.594 +    isr.val = 0;
  43.595 +    isr.val = set_isr_ei_ni(vcpu);
  43.596 +    isr.code = IA64_REG_NAT_CONSUMPTION_FAULT | flag;
  43.597 +    isr.na = non_access;
  43.598 +    isr.r = 1;
  43.599 +    isr.w = 0;
  43.600 +    vmx_vcpu_set_isr(vcpu, isr.val);
  43.601 +    return;
  43.602 +}
  43.603 +
  43.604 +void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
  43.605 +{
  43.606 +    u64 value;
  43.607 +    ISR isr;
  43.608 +
  43.609 +    isr.val = set_isr_ei_ni(vcpu);
  43.610 +    isr.code = IA64_PRIV_OP_FAULT;
  43.611 +    isr.na = non_access;
  43.612 +    vmx_vcpu_set_isr(vcpu, isr.val);
  43.613 +
  43.614 +    return;
  43.615 +}
  43.616 +
  43.617 +
  43.618 +IA64FAULT check_target_register(VCPU *vcpu, u64 reg_index)
  43.619 +{
  43.620 +    u64 sof;
  43.621 +    REGS *regs;
  43.622 +    regs=vcpu_regs(vcpu);
  43.623 +    sof = regs->cr_ifs & 0x7f;
  43.624 +    if(reg_index >= sof + 32)
  43.625 +        return IA64_FAULT;
  43.626 +    return IA64_NO_FAULT;;
  43.627 +}
  43.628 +
  43.629 +
  43.630 +int is_reserved_rr_register(VCPU* vcpu, int reg_index)
  43.631 +{
  43.632 +    return (reg_index >= 8);
  43.633 +}
  43.634 +
  43.635 +#define  ITIR_RSV_MASK		(0x3UL | (((1UL<<32)-1) << 32))
  43.636 +int is_reserved_itir_field(VCPU* vcpu, u64 itir)
  43.637 +{
  43.638 +	if ( itir & ITIR_RSV_MASK ) {
  43.639 +		return 1;
  43.640 +	}
  43.641 +	return 0;
  43.642 +}
  43.643 +
  43.644 +int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
  43.645 +{
  43.646 +    ia64_rr rr;
  43.647 +    rr.rrval = reg_value;
  43.648 +
  43.649 +    if(rr.reserved0 != 0 || rr.reserved1 != 0){
  43.650 +        return 1;
  43.651 +    }
  43.652 +    if(rr.ps < 12 || rr.ps > 28){
  43.653 +        // page too big or small.
  43.654 +        return 1;
  43.655 +    }
  43.656 +    if(rr.ps > 15 && rr.ps % 2 != 0){
  43.657 +        // unsupported page size.
  43.658 +        return 1;
  43.659 +    }
  43.660 +    return 0;
  43.661 +}
  43.662 +
    44.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    44.2 +++ b/xen/arch/ia64/vmx_vcpu.c	Mon May 23 15:29:59 2005 +0000
    44.3 @@ -0,0 +1,436 @@
    44.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    44.5 +/*
    44.6 + * vmx_vcpu.c: handling all virtual cpu related thing.
    44.7 + * Copyright (c) 2005, Intel Corporation.
    44.8 + *
    44.9 + * This program is free software; you can redistribute it and/or modify it
   44.10 + * under the terms and conditions of the GNU General Public License,
   44.11 + * version 2, as published by the Free Software Foundation.
   44.12 + *
   44.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   44.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   44.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   44.16 + * more details.
   44.17 + *
   44.18 + * You should have received a copy of the GNU General Public License along with
   44.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   44.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   44.21 + *
   44.22 + *  Fred yang (fred.yang@intel.com)
   44.23 + *  Arun Sharma (arun.sharma@intel.com)
   44.24 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   44.25 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   44.26 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   44.27 + */
   44.28 +
   44.29 +
   44.30 +
   44.31 +#include <linux/sched.h>
   44.32 +#include <public/arch-ia64.h>
   44.33 +#include <asm/ia64_int.h>
   44.34 +#include <asm/vmx_vcpu.h>
   44.35 +#include <asm/regionreg.h>
   44.36 +#include <asm/tlb.h>
   44.37 +#include <asm/processor.h>
   44.38 +#include <asm/delay.h>
   44.39 +#include <asm/regs.h>
   44.40 +#include <asm/gcc_intrin.h>
   44.41 +#include <asm/vmx_mm_def.h>
   44.42 +#include <asm/vmx.h>
   44.43 +
   44.44 +//u64  fire_itc;
   44.45 +//u64  fire_itc2;
   44.46 +//u64  fire_itm;
   44.47 +//u64  fire_itm2;
   44.48 +/*
   44.49 + * Copyright (c) 2005 Intel Corporation.
   44.50 + *    Anthony Xu (anthony.xu@intel.com)
   44.51 + *    Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   44.52 + *
   44.53 + * This program is free software; you can redistribute it and/or modify it
   44.54 + * under the terms and conditions of the GNU General Public License,
   44.55 + * version 2, as published by the Free Software Foundation.
   44.56 + *
   44.57 + * This program is distributed in the hope it will be useful, but WITHOUT
   44.58 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   44.59 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   44.60 + * more details.
   44.61 + *
   44.62 + * You should have received a copy of the GNU General Public License along with
   44.63 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   44.64 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   44.65 + *
   44.66 + */
   44.67 +
   44.68 +/**************************************************************************
   44.69 + VCPU general register access routines
   44.70 +**************************************************************************/
   44.71 +#include <asm/hw_irq.h>
   44.72 +#include <asm/vmx_pal_vsa.h>
   44.73 +#include <asm/kregs.h>
   44.74 +
   44.75 +//unsigned long last_guest_rsm = 0x0;
   44.76 +struct guest_psr_bundle{
   44.77 +	unsigned long ip;
   44.78 +	unsigned long psr;
   44.79 +};
   44.80 +
   44.81 +struct guest_psr_bundle guest_psr_buf[100];
   44.82 +unsigned long guest_psr_index = 0;
   44.83 +
   44.84 +void
   44.85 +vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
   44.86 +{
   44.87 +
   44.88 +    UINT64 mask;
   44.89 +    REGS *regs;
   44.90 +    IA64_PSR old_psr, new_psr;
   44.91 +    old_psr.val=vmx_vcpu_get_psr(vcpu);
   44.92 +
   44.93 +    regs=vcpu_regs(vcpu);
   44.94 +    /* We only support guest as:
   44.95 +     *  vpsr.pk = 0
   44.96 +     *  vpsr.is = 0
   44.97 +     * Otherwise panic
   44.98 +     */
   44.99 +    if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
  44.100 +        panic ("Setting unsupport guest psr!");
  44.101 +    }
  44.102 +
  44.103 +    /*
  44.104 +     * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  44.105 +     * Since these bits will become 0, after success execution of each
  44.106 +     * instruction, we will change set them to mIA64_PSR
  44.107 +     */
  44.108 +    VMX_VPD(vcpu,vpsr) = value &
  44.109 +            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
  44.110 +                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
  44.111 +            ));
  44.112 +
  44.113 +    new_psr.val=vmx_vcpu_get_psr(vcpu);
  44.114 +    {
  44.115 +	struct xen_regs *regs = vcpu_regs(vcpu);
  44.116 +	guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
  44.117 +	guest_psr_buf[guest_psr_index].psr = new_psr.val;
  44.118 +	if (++guest_psr_index >= 100)
  44.119 +	    guest_psr_index = 0;
  44.120 +    }
  44.121 +#if 0
  44.122 +    if (old_psr.i != new_psr.i) {
  44.123 +	if (old_psr.i)
  44.124 +		last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
  44.125 +	else
  44.126 +		last_guest_rsm = 0;
  44.127 +    }
  44.128 +#endif
  44.129 +
  44.130 +    /*
  44.131 +     * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  44.132 +     * , except for the following bits:
  44.133 +     *  ic/i/dt/si/rt/mc/it/bn/vm
  44.134 +     */
  44.135 +    mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  44.136 +        IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  44.137 +        IA64_PSR_VM;
  44.138 +
  44.139 +    regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
  44.140 +
  44.141 +    check_mm_mode_switch(vcpu, old_psr, new_psr);
  44.142 +    return IA64_NO_FAULT;
  44.143 +}
  44.144 +
  44.145 +/* Adjust slot both in xen_regs and vpd, upon vpsr.ri which
  44.146 + * should have sync with ipsr in entry.
  44.147 + *
  44.148 + * Clear some bits due to successfully emulation.
  44.149 + */
  44.150 +IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
  44.151 +{
  44.152 +    // TODO: trap_bounce?? Eddie
  44.153 +    REGS *regs = vcpu_regs(vcpu);
  44.154 +    IA64_PSR vpsr;
  44.155 +    IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
  44.156 +
  44.157 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  44.158 +    if (vpsr.ri == 2) {
  44.159 +    vpsr.ri = 0;
  44.160 +    regs->cr_iip += 16;
  44.161 +    } else {
  44.162 +    vpsr.ri++;
  44.163 +    }
  44.164 +
  44.165 +    ipsr->ri = vpsr.ri;
  44.166 +    vpsr.val &=
  44.167 +            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
  44.168 +                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
  44.169 +            ));
  44.170 +
  44.171 +    VMX_VPD(vcpu, vpsr) = vpsr.val;
  44.172 +
  44.173 +    ipsr->val &=
  44.174 +            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
  44.175 +                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
  44.176 +            ));
  44.177 +
  44.178 +    return (IA64_NO_FAULT);
  44.179 +}
  44.180 +
  44.181 +
  44.182 +IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
  44.183 +{
  44.184 +    REGS *regs = vcpu_regs(vcpu);
  44.185 +    IA64_PSR vpsr;
  44.186 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  44.187 +
  44.188 +    if(!vpsr.ic)
  44.189 +        VPD_CR(vcpu,ifs) = regs->cr_ifs;
  44.190 +    regs->cr_ifs = IA64_IFS_V;
  44.191 +    return (IA64_NO_FAULT);
  44.192 +}
  44.193 +
  44.194 +
  44.195 +thash_cb_t *
  44.196 +vmx_vcpu_get_vtlb(VCPU *vcpu)
  44.197 +{
  44.198 +    return vcpu->arch.vtlb;
  44.199 +}
  44.200 +
  44.201 +
  44.202 +struct virutal_platform_def *
  44.203 +vmx_vcpu_get_plat(VCPU *vcpu)
  44.204 +{
  44.205 +    return &(vcpu->arch.arch_vmx.vmx_platform);
  44.206 +}
  44.207 +
  44.208 +
  44.209 +ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
  44.210 +{
  44.211 +        return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
  44.212 +}
  44.213 +
  44.214 +
  44.215 +IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  44.216 +{
  44.217 +    extern void set_one_rr(UINT64, UINT64);
  44.218 +    ia64_rr oldrr,newrr;
  44.219 +    thash_cb_t *hcb;
  44.220 +    oldrr=vmx_vcpu_rr(vcpu,reg);
  44.221 +    newrr.rrval=val;
  44.222 +#if 1
  44.223 +    if(oldrr.ps!=newrr.ps){
  44.224 +        hcb = vmx_vcpu_get_vtlb(vcpu);
  44.225 +        thash_purge_all(hcb);
  44.226 +    }
  44.227 +#endif
  44.228 +    VMX(vcpu,vrr[reg>>61]) = val;
  44.229 +    switch((u64)(reg>>61)) {
  44.230 +    case VRN5:
  44.231 +        VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
  44.232 +        break;
  44.233 +    case VRN6:
  44.234 +        VMX(vcpu,mrr6)=vmx_vrrtomrr(vcpu,val);
  44.235 +        break;
  44.236 +    case VRN7:
  44.237 +        VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
  44.238 +        /* Change double mapping for this domain */
  44.239 +        vmx_change_double_mapping(vcpu,
  44.240 +                      vmx_vrrtomrr(vcpu,oldrr.rrval),
  44.241 +                      vmx_vrrtomrr(vcpu,newrr.rrval));
  44.242 +        break;
  44.243 +    default:
  44.244 +        ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
  44.245 +        break;
  44.246 +    }
  44.247 +
  44.248 +    return (IA64_NO_FAULT);
  44.249 +}
  44.250 +
  44.251 +
  44.252 +
  44.253 +/**************************************************************************
  44.254 + VCPU protection key register access routines
  44.255 +**************************************************************************/
  44.256 +
  44.257 +IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  44.258 +{
  44.259 +    UINT64 val = (UINT64)ia64_get_pkr(reg);
  44.260 +    *pval = val;
  44.261 +    return (IA64_NO_FAULT);
  44.262 +}
  44.263 +
  44.264 +IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
  44.265 +{
  44.266 +    ia64_set_pkr(reg,val);
  44.267 +    return (IA64_NO_FAULT);
  44.268 +}
  44.269 +
  44.270 +#if 0
  44.271 +int tlb_debug=0;
  44.272 +check_entry(u64 va, u64 ps, char *str)
  44.273 +{
  44.274 +     va &= ~ (PSIZE(ps)-1);
  44.275 +     if ( va == 0x2000000002908000UL ||
  44.276 +	  va == 0x600000000000C000UL ) {
  44.277 +	stop();
  44.278 +     }
  44.279 +     if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
  44.280 +}
  44.281 +#endif
  44.282 +
  44.283 +
  44.284 +u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
  44.285 +{
  44.286 +    ia64_rr rr,rr1;
  44.287 +    rr=vmx_vcpu_rr(vcpu,ifa);
  44.288 +    rr1.rrval=0;
  44.289 +    rr1.ps=rr.ps;
  44.290 +    rr1.rid=rr.rid;
  44.291 +    return (rr1.rrval);
  44.292 +}
  44.293 +
  44.294 +
  44.295 +
  44.296 +
  44.297 +IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
  44.298 +{
  44.299 +    // TODO: Only allowed for current vcpu
  44.300 +    UINT64 ifs, psr;
  44.301 +    REGS *regs = vcpu_regs(vcpu);
  44.302 +    psr = VPD_CR(vcpu,ipsr);
  44.303 +    vmx_vcpu_set_psr(vcpu,psr);
  44.304 +    ifs=VPD_CR(vcpu,ifs);
  44.305 +    if((ifs>>63)&&(ifs<<1)){
  44.306 +        ifs=(regs->cr_ifs)&0x7f;
  44.307 +        regs->rfi_pfs = (ifs<<7)|ifs;
  44.308 +        regs->cr_ifs = VPD_CR(vcpu,ifs);
  44.309 +    }
  44.310 +    regs->cr_iip = VPD_CR(vcpu,iip);
  44.311 +    return (IA64_NO_FAULT);
  44.312 +}
  44.313 +
  44.314 +
  44.315 +UINT64
  44.316 +vmx_vcpu_get_psr(VCPU *vcpu)
  44.317 +{
  44.318 +    return VMX_VPD(vcpu,vpsr);
  44.319 +}
  44.320 +
  44.321 +
  44.322 +IA64FAULT
  44.323 +vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
  44.324 +{
  44.325 +    IA64_PSR vpsr;
  44.326 +
  44.327 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  44.328 +    if ( vpsr.bn ) {
  44.329 +        *val=VMX_VPD(vcpu,vgr[reg-16]);
  44.330 +        // Check NAT bit
  44.331 +        if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) {
  44.332 +            // TODO
  44.333 +            //panic ("NAT consumption fault\n");
  44.334 +            return IA64_FAULT;
  44.335 +        }
  44.336 +
  44.337 +    }
  44.338 +    else {
  44.339 +        *val=VMX_VPD(vcpu,vbgr[reg-16]);
  44.340 +        if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) {
  44.341 +            //panic ("NAT consumption fault\n");
  44.342 +            return IA64_FAULT;
  44.343 +        }
  44.344 +
  44.345 +    }
  44.346 +    return IA64_NO_FAULT;
  44.347 +}
  44.348 +
  44.349 +IA64FAULT
  44.350 +vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
  44.351 +{
  44.352 +    IA64_PSR vpsr;
  44.353 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  44.354 +    if ( vpsr.bn ) {
  44.355 +        VMX_VPD(vcpu,vgr[reg-16]) = val;
  44.356 +        if(nat){
  44.357 +            VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) );
  44.358 +        }else{
  44.359 +            VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
  44.360 +        }
  44.361 +    }
  44.362 +    else {
  44.363 +        VMX_VPD(vcpu,vbgr[reg-16]) = val;
  44.364 +        if(nat){
  44.365 +            VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) );
  44.366 +        }else{
  44.367 +            VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) );
  44.368 +        }
  44.369 +    }
  44.370 +    return IA64_NO_FAULT;
  44.371 +}
  44.372 +
  44.373 +
  44.374 +
  44.375 +IA64FAULT
  44.376 +vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
  44.377 +{
  44.378 +    REGS *regs=vcpu_regs(vcpu);
  44.379 +    u64 nat;
  44.380 +    //TODO, Eddie
  44.381 +    if (!regs) return 0;
  44.382 +    if (reg >= 16 && reg < 32) {
  44.383 +        return vmx_vcpu_get_bgr(vcpu,reg,val);
  44.384 +    }
  44.385 +    getreg(reg,val,&nat,regs);    // FIXME: handle NATs later
  44.386 +    if(nat){
  44.387 +        return IA64_FAULT;
  44.388 +    }
  44.389 +    return IA64_NO_FAULT;
  44.390 +}
  44.391 +
  44.392 +// returns:
  44.393 +//   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
  44.394 +//   IA64_NO_FAULT otherwise
  44.395 +
  44.396 +IA64FAULT
  44.397 +vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
  44.398 +{
  44.399 +    REGS *regs = vcpu_regs(vcpu);
  44.400 +    long sof = (regs->cr_ifs) & 0x7f;
  44.401 +    //TODO Eddie
  44.402 +
  44.403 +    if (!regs) return IA64_ILLOP_FAULT;
  44.404 +    if (reg >= sof + 32) return IA64_ILLOP_FAULT;
  44.405 +    if ( reg >= 16 && reg < 32 ) {
  44.406 +        return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
  44.407 +    }
  44.408 +    setreg(reg,value,nat,regs);
  44.409 +    return IA64_NO_FAULT;
  44.410 +}
  44.411 +
  44.412 +
  44.413 +IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
  44.414 +{
  44.415 +    UINT64 vpsr;
  44.416 +    vpsr = vmx_vcpu_get_psr(vcpu);
  44.417 +    vpsr &= (~imm24);
  44.418 +    vmx_vcpu_set_psr(vcpu, vpsr);
  44.419 +    return IA64_NO_FAULT;
  44.420 +}
  44.421 +
  44.422 +
  44.423 +IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
  44.424 +{
  44.425 +    UINT64 vpsr;
  44.426 +    vpsr = vmx_vcpu_get_psr(vcpu);
  44.427 +    vpsr |= imm24;
  44.428 +    vmx_vcpu_set_psr(vcpu, vpsr);
  44.429 +    return IA64_NO_FAULT;
  44.430 +}
  44.431 +
  44.432 +
  44.433 +IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
  44.434 +{
  44.435 +    vmx_vcpu_set_psr(vcpu, val);
  44.436 +    return IA64_NO_FAULT;
  44.437 +}
  44.438 +
  44.439 +
    45.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.2 +++ b/xen/arch/ia64/vmx_virt.c	Mon May 23 15:29:59 2005 +0000
    45.3 @@ -0,0 +1,1501 @@
    45.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    45.5 +/*
    45.6 + * vmx_virt.c:
    45.7 + * Copyright (c) 2005, Intel Corporation.
    45.8 + *
    45.9 + * This program is free software; you can redistribute it and/or modify it
   45.10 + * under the terms and conditions of the GNU General Public License,
   45.11 + * version 2, as published by the Free Software Foundation.
   45.12 + *
   45.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   45.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   45.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   45.16 + * more details.
   45.17 + *
   45.18 + * You should have received a copy of the GNU General Public License along with
   45.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   45.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   45.21 + *
   45.22 + *  Fred yang (fred.yang@intel.com)
   45.23 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   45.24 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   45.25 + */
   45.26 +
   45.27 +
   45.28 +
   45.29 +#include <asm/privop.h>
   45.30 +#include <asm/vmx_vcpu.h>
   45.31 +#include <asm/processor.h>
   45.32 +#include <asm/delay.h>	// Debug only
   45.33 +#include <asm/vmmu.h>
   45.34 +#include <asm/vmx_mm_def.h>
   45.35 +#include <asm/smp.h>
   45.36 +
   45.37 +#include <asm/virt_event.h>
   45.38 +extern UINT64 privop_trace;
   45.39 +
   45.40 +void
   45.41 +ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64  * cause)
   45.42 +{
   45.43 +    *cause=0;
   45.44 +    switch (slot_type) {
   45.45 +        case M:
   45.46 +        if (inst.generic.major==0){
   45.47 +            if(inst.M28.x3==0){
   45.48 +                if(inst.M44.x4==6){
   45.49 +                    *cause=EVENT_SSM;
   45.50 +                }else if(inst.M44.x4==7){
   45.51 +                    *cause=EVENT_RSM;
   45.52 +                }else if(inst.M30.x4==8&&inst.M30.x2==2){
   45.53 +                    *cause=EVENT_MOV_TO_AR_IMM;
   45.54 +                }
   45.55 +            }
   45.56 +        }
   45.57 +        else if(inst.generic.major==1){
   45.58 +            if(inst.M28.x3==0){
   45.59 +                if(inst.M32.x6==0x2c){
   45.60 +                    *cause=EVENT_MOV_TO_CR;
   45.61 +                }else if(inst.M33.x6==0x24){
   45.62 +                    *cause=EVENT_MOV_FROM_CR;
   45.63 +                }else if(inst.M35.x6==0x2d){
   45.64 +                    *cause=EVENT_MOV_TO_PSR;
   45.65 +                }else if(inst.M36.x6==0x25){
   45.66 +                    *cause=EVENT_MOV_FROM_PSR;
   45.67 +                }else if(inst.M29.x6==0x2A){
   45.68 +                    *cause=EVENT_MOV_TO_AR;
   45.69 +                }else if(inst.M31.x6==0x22){
   45.70 +                    *cause=EVENT_MOV_FROM_AR;
   45.71 +                }else if(inst.M45.x6==0x09){
   45.72 +                    *cause=EVENT_PTC_L;
   45.73 +                }else if(inst.M45.x6==0x0A){
   45.74 +                    *cause=EVENT_PTC_G;
   45.75 +                }else if(inst.M45.x6==0x0B){
   45.76 +                    *cause=EVENT_PTC_GA;
   45.77 +                }else if(inst.M45.x6==0x0C){
   45.78 +                    *cause=EVENT_PTR_D;
   45.79 +                }else if(inst.M45.x6==0x0D){
   45.80 +                    *cause=EVENT_PTR_I;
   45.81 +                }else if(inst.M46.x6==0x1A){
   45.82 +                    *cause=EVENT_THASH;
   45.83 +                }else if(inst.M46.x6==0x1B){
   45.84 +                    *cause=EVENT_TTAG;
   45.85 +                }else if(inst.M46.x6==0x1E){
   45.86 +                    *cause=EVENT_TPA;
   45.87 +                }else if(inst.M46.x6==0x1F){
   45.88 +                    *cause=EVENT_TAK;
   45.89 +                }else if(inst.M47.x6==0x34){
   45.90 +                    *cause=EVENT_PTC_E;
   45.91 +                }else if(inst.M41.x6==0x2E){
   45.92 +                    *cause=EVENT_ITC_D;
   45.93 +                }else if(inst.M41.x6==0x2F){
   45.94 +                    *cause=EVENT_ITC_I;
   45.95 +                }else if(inst.M42.x6==0x00){
   45.96 +                    *cause=EVENT_MOV_TO_RR;
   45.97 +                }else if(inst.M42.x6==0x01){
   45.98 +                    *cause=EVENT_MOV_TO_DBR;
   45.99 +                }else if(inst.M42.x6==0x02){
  45.100 +                    *cause=EVENT_MOV_TO_IBR;
  45.101 +                }else if(inst.M42.x6==0x03){
  45.102 +                    *cause=EVENT_MOV_TO_PKR;
  45.103 +                }else if(inst.M42.x6==0x04){
  45.104 +                    *cause=EVENT_MOV_TO_PMC;
  45.105 +                }else if(inst.M42.x6==0x05){
  45.106 +                    *cause=EVENT_MOV_TO_PMD;
  45.107 +                }else if(inst.M42.x6==0x0E){
  45.108 +                    *cause=EVENT_ITR_D;
  45.109 +                }else if(inst.M42.x6==0x0F){
  45.110 +                    *cause=EVENT_ITR_I;
  45.111 +                }else if(inst.M43.x6==0x10){
  45.112 +                    *cause=EVENT_MOV_FROM_RR;
  45.113 +                }else if(inst.M43.x6==0x11){
  45.114 +                    *cause=EVENT_MOV_FROM_DBR;
  45.115 +                }else if(inst.M43.x6==0x12){
  45.116 +                    *cause=EVENT_MOV_FROM_IBR;
  45.117 +                }else if(inst.M43.x6==0x13){
  45.118 +                    *cause=EVENT_MOV_FROM_PKR;
  45.119 +                }else if(inst.M43.x6==0x14){
  45.120 +                    *cause=EVENT_MOV_FROM_PMC;
  45.121 +/*
  45.122 +                }else if(inst.M43.x6==0x15){
  45.123 +                    *cause=EVENT_MOV_FROM_PMD;
  45.124 +*/
  45.125 +                }else if(inst.M43.x6==0x17){
  45.126 +                    *cause=EVENT_MOV_FROM_CPUID;
  45.127 +                }
  45.128 +            }
  45.129 +        }
  45.130 +        break;
  45.131 +        case B:
  45.132 +        if(inst.generic.major==0){
  45.133 +            if(inst.B8.x6==0x02){
  45.134 +                *cause=EVENT_COVER;
  45.135 +            }else if(inst.B8.x6==0x08){
  45.136 +                *cause=EVENT_RFI;
  45.137 +            }else if(inst.B8.x6==0x0c){
  45.138 +                *cause=EVENT_BSW_0;
  45.139 +            }else if(inst.B8.x6==0x0d){
  45.140 +                *cause=EVENT_BSW_1;
  45.141 +            }
  45.142 +        }
  45.143 +    }
  45.144 +}
  45.145 +
  45.146 +IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
  45.147 +{
  45.148 +    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
  45.149 +    return vmx_vcpu_reset_psr_sm(vcpu,imm24);
  45.150 +}
  45.151 +
  45.152 +IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
  45.153 +{
  45.154 +    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
  45.155 +    return vmx_vcpu_set_psr_sm(vcpu,imm24);
  45.156 +}
  45.157 +
  45.158 +unsigned long last_guest_psr = 0x0;
  45.159 +IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
  45.160 +{
  45.161 +    UINT64 tgt = inst.M33.r1;
  45.162 +    UINT64 val;
  45.163 +    IA64FAULT fault;
  45.164 +
  45.165 +/*
  45.166 +    if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
  45.167 +        return vmx_vcpu_set_gr(vcpu, tgt, val);
  45.168 +    else return fault;
  45.169 +    */
  45.170 +    val = vmx_vcpu_get_psr(vcpu);
  45.171 +    val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  45.172 +    last_guest_psr = val;
  45.173 +    return vmx_vcpu_set_gr(vcpu, tgt, val, 0);
  45.174 +}
  45.175 +
  45.176 +/**
  45.177 + * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
  45.178 + */
  45.179 +IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
  45.180 +{
  45.181 +    UINT64 val;
  45.182 +    IA64FAULT fault;
  45.183 +    if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
  45.184 +	panic(" get_psr nat bit fault\n");
  45.185 +
  45.186 +	val = (val & MASK(0, 32)) | (VMX_VPD(vcpu, vpsr) & MASK(32, 32));
  45.187 +#if 0
  45.188 +	if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
  45.189 +		while(1);
  45.190 +	else
  45.191 +		last_mov_from_psr = 0;
  45.192 +#endif
  45.193 +        return vmx_vcpu_set_psr_l(vcpu,val);
  45.194 +}
  45.195 +
  45.196 +
  45.197 +/**************************************************************************
  45.198 +Privileged operation emulation routines
  45.199 +**************************************************************************/
  45.200 +
  45.201 +IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
  45.202 +{
  45.203 +    IA64_PSR  vpsr;
  45.204 +    REGS *regs;
  45.205 +#ifdef  CHECK_FAULT
  45.206 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.207 +    if ( vpsr.cpl != 0) {
  45.208 +        /* Inject Privileged Operation fault into guest */
  45.209 +        set_privileged_operation_isr (vcpu, 0);
  45.210 +        privilege_op (vcpu);
  45.211 +        return IA64_FAULT;
  45.212 +    }
  45.213 +#endif // CHECK_FAULT
  45.214 +    regs=vcpu_regs(vcpu);
  45.215 +    vpsr.val=regs->cr_ipsr;
  45.216 +    if ( vpsr.is == 1 ) {
  45.217 +        panic ("We do not support IA32 instruction yet");
  45.218 +    }
  45.219 +
  45.220 +    return vmx_vcpu_rfi(vcpu);
  45.221 +}
  45.222 +
  45.223 +IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
  45.224 +{
  45.225 +#ifdef  CHECK_FAULT
  45.226 +    IA64_PSR  vpsr;
  45.227 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.228 +    if ( vpsr.cpl != 0) {
  45.229 +        /* Inject Privileged Operation fault into guest */
  45.230 +        set_privileged_operation_isr (vcpu, 0);
  45.231 +        privilege_op (vcpu);
  45.232 +        return IA64_FAULT;
  45.233 +    }
  45.234 +#endif // CHECK_FAULT
  45.235 +   return vmx_vcpu_bsw0(vcpu);
  45.236 +}
  45.237 +
  45.238 +IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
  45.239 +{
  45.240 +#ifdef  CHECK_FAULT
  45.241 +    IA64_PSR  vpsr;
  45.242 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.243 +    if ( vpsr.cpl != 0) {
  45.244 +        /* Inject Privileged Operation fault into guest */
  45.245 +        set_privileged_operation_isr (vcpu, 0);
  45.246 +        privilege_op (vcpu);
  45.247 +        return IA64_FAULT;
  45.248 +    }
  45.249 +#endif // CHECK_FAULT
  45.250 +    return vmx_vcpu_bsw1(vcpu);
  45.251 +}
  45.252 +
  45.253 +IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
  45.254 +{
  45.255 +    return vmx_vcpu_cover(vcpu);
  45.256 +}
  45.257 +
  45.258 +IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
  45.259 +{
  45.260 +    u64 r2,r3;
  45.261 +    ISR isr;
  45.262 +    IA64_PSR  vpsr;
  45.263 +
  45.264 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.265 +    if ( vpsr.cpl != 0) {
  45.266 +        /* Inject Privileged Operation fault into guest */
  45.267 +        set_privileged_operation_isr (vcpu, 0);
  45.268 +        privilege_op (vcpu);
  45.269 +        return IA64_FAULT;
  45.270 +    }
  45.271 +    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){
  45.272 +#ifdef  VMAL_NO_FAULT_CHECK
  45.273 +        set_isr_reg_nat_consumption(vcpu,0,0);
  45.274 +        rnat_comsumption(vcpu);
  45.275 +        return IA64_FAULT;
  45.276 +#endif // VMAL_NO_FAULT_CHECK
  45.277 +    }
  45.278 +#ifdef  VMAL_NO_FAULT_CHECK
  45.279 +    if (unimplemented_gva(vcpu,r3) ) {
  45.280 +        isr.val = set_isr_ei_ni(vcpu);
  45.281 +        isr.code = IA64_RESERVED_REG_FAULT;
  45.282 +        vcpu_set_isr(vcpu, isr.val);
  45.283 +        unimpl_daddr(vcpu);
  45.284 +        return IA64_FAULT;
  45.285 +   }
  45.286 +#endif // VMAL_NO_FAULT_CHECK
  45.287 +    return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
  45.288 +}
  45.289 +
  45.290 +IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
  45.291 +{
  45.292 +    u64 r3;
  45.293 +    ISR isr;
  45.294 +    IA64_PSR  vpsr;
  45.295 +
  45.296 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.297 +#ifdef  VMAL_NO_FAULT_CHECK
  45.298 +    if ( vpsr.cpl != 0) {
  45.299 +        /* Inject Privileged Operation fault into guest */
  45.300 +        set_privileged_operation_isr (vcpu, 0);
  45.301 +        privilege_op (vcpu);
  45.302 +        return IA64_FAULT;
  45.303 +    }
  45.304 +#endif // VMAL_NO_FAULT_CHECK
  45.305 +    if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){
  45.306 +#ifdef  VMAL_NO_FAULT_CHECK
  45.307 +        set_isr_reg_nat_consumption(vcpu,0,0);
  45.308 +        rnat_comsumption(vcpu);
  45.309 +        return IA64_FAULT;
  45.310 +#endif // VMAL_NO_FAULT_CHECK
  45.311 +    }
  45.312 +    return vmx_vcpu_ptc_e(vcpu,r3);
  45.313 +}
  45.314 +
  45.315 +IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
  45.316 +{
  45.317 +    return vmx_emul_ptc_l(vcpu, inst);
  45.318 +}
  45.319 +
  45.320 +IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
  45.321 +{
  45.322 +    return vmx_emul_ptc_l(vcpu, inst);
  45.323 +}
  45.324 +
  45.325 +IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
  45.326 +{
  45.327 +    ISR isr;
  45.328 +    IA64FAULT	ret1, ret2;
  45.329 +
  45.330 +#ifdef  VMAL_NO_FAULT_CHECK
  45.331 +    IA64_PSR  vpsr;
  45.332 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.333 +    if ( vpsr.cpl != 0) {
  45.334 +        /* Inject Privileged Operation fault into guest */
  45.335 +        set_privileged_operation_isr (vcpu, 0);
  45.336 +        privilege_op (vcpu);
  45.337 +        return IA64_FAULT;
  45.338 +    }
  45.339 +#endif // VMAL_NO_FAULT_CHECK
  45.340 +    ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3);
  45.341 +    ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2);
  45.342 +#ifdef  VMAL_NO_FAULT_CHECK
  45.343 +    if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
  45.344 +        set_isr_reg_nat_consumption(vcpu,0,0);
  45.345 +        rnat_comsumption(vcpu);
  45.346 +        return IA64_FAULT;
  45.347 +    }
  45.348 +    if (unimplemented_gva(vcpu,r3) ) {
  45.349 +        isr.val = set_isr_ei_ni(vcpu);
  45.350 +        isr.code = IA64_RESERVED_REG_FAULT;
  45.351 +        vcpu_set_isr(vcpu, isr.val);
  45.352 +        unimpl_daddr(vcpu);
  45.353 +        return IA64_FAULT;
  45.354 +   }
  45.355 +#endif // VMAL_NO_FAULT_CHECK
  45.356 +   return IA64_NO_FAULT;
  45.357 +}
  45.358 +
  45.359 +IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
  45.360 +{
  45.361 +    u64 r2,r3;
  45.362 +    if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
  45.363 +    	return IA64_FAULT;
  45.364 +    return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
  45.365 +}
  45.366 +
  45.367 +IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
  45.368 +{
  45.369 +    u64 r2,r3;
  45.370 +    if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
  45.371 +    	return IA64_FAULT;
  45.372 +    return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
  45.373 +}
  45.374 +
  45.375 +
  45.376 +IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
  45.377 +{
  45.378 +    u64 r1,r3;
  45.379 +    ISR visr;
  45.380 +    IA64_PSR vpsr;
  45.381 +#ifdef  CHECK_FAULT
  45.382 +    if(check_target_register(vcpu, inst.M46.r1)){
  45.383 +        set_illegal_op_isr(vcpu);
  45.384 +        illegal_op(vcpu);
  45.385 +        return IA64_FAULT;
  45.386 +    }
  45.387 +#endif //CHECK_FAULT
  45.388 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  45.389 +#ifdef  CHECK_FAULT
  45.390 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  45.391 +        return IA64_NO_FAULT;
  45.392 +#endif  //CHECK_FAULT
  45.393 +    }
  45.394 +#ifdef  CHECK_FAULT
  45.395 +    if(unimplemented_gva(vcpu, r3)){
  45.396 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  45.397 +        return IA64_NO_FAULT;
  45.398 +    }
  45.399 +#endif  //CHECK_FAULT
  45.400 +    vmx_vcpu_thash(vcpu, r3, &r1);
  45.401 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  45.402 +    return(IA64_NO_FAULT);
  45.403 +}
  45.404 +
  45.405 +
  45.406 +IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
  45.407 +{
  45.408 +    u64 r1,r3;
  45.409 +    ISR visr;
  45.410 +    IA64_PSR vpsr;
  45.411 + #ifdef  CHECK_FAULT
  45.412 +    if(check_target_register(vcpu, inst.M46.r1)){
  45.413 +        set_illegal_op_isr(vcpu);
  45.414 +        illegal_op(vcpu);
  45.415 +        return IA64_FAULT;
  45.416 +    }
  45.417 +#endif //CHECK_FAULT
  45.418 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  45.419 +#ifdef  CHECK_FAULT
  45.420 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  45.421 +        return IA64_NO_FAULT;
  45.422 +#endif  //CHECK_FAULT
  45.423 +    }
  45.424 +#ifdef  CHECK_FAULT
  45.425 +    if(unimplemented_gva(vcpu, r3)){
  45.426 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  45.427 +        return IA64_NO_FAULT;
  45.428 +    }
  45.429 +#endif  //CHECK_FAULT
  45.430 +    vmx_vcpu_ttag(vcpu, r3, &r1);
  45.431 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  45.432 +    return(IA64_NO_FAULT);
  45.433 +}
  45.434 +
  45.435 +
  45.436 +IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
  45.437 +{
  45.438 +    u64 r1,r3;
  45.439 +    ISR visr;
  45.440 +#ifdef  CHECK_FAULT
  45.441 +    if(check_target_register(vcpu, inst.M46.r1)){
  45.442 +        set_illegal_op_isr(vcpu);
  45.443 +        illegal_op(vcpu);
  45.444 +        return IA64_FAULT;
  45.445 +    }
  45.446 +    IA64_PSR vpsr;
  45.447 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.448 +    if(vpsr.cpl!=0){
  45.449 +        visr.val=0;
  45.450 +        vcpu_set_isr(vcpu, visr.val);
  45.451 +        return IA64_FAULT;
  45.452 +    }
  45.453 +#endif  //CHECK_FAULT
  45.454 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  45.455 +#ifdef  CHECK_FAULT
  45.456 +        set_isr_reg_nat_consumption(vcpu,0,1);
  45.457 +        rnat_comsumption(vcpu);
  45.458 +        return IA64_FAULT;
  45.459 +#endif  //CHECK_FAULT
  45.460 +    }
  45.461 +#ifdef  CHECK_FAULT
  45.462 +    if (unimplemented_gva(vcpu,r3) ) {
  45.463 +        // inject unimplemented_data_address_fault
  45.464 +        visr.val = set_isr_ei_ni(vcpu);
  45.465 +        visr.code = IA64_RESERVED_REG_FAULT;
  45.466 +        vcpu_set_isr(vcpu, isr.val);
  45.467 +        // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
  45.468 +        unimpl_daddr(vcpu);
  45.469 +        return IA64_FAULT;
  45.470 +   }
  45.471 +#endif  //CHECK_FAULT
  45.472 +
  45.473 +    if(vmx_vcpu_tpa(vcpu, r3, &r1)){
  45.474 +        return IA64_FAULT;
  45.475 +    }
  45.476 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  45.477 +    return(IA64_NO_FAULT);
  45.478 +}
  45.479 +
  45.480 +IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
  45.481 +{
  45.482 +    u64 r1,r3;
  45.483 +    ISR visr;
  45.484 +    IA64_PSR vpsr;
  45.485 +    int fault=IA64_NO_FAULT;
  45.486 +#ifdef  CHECK_FAULT
  45.487 +    visr.val=0;
  45.488 +    if(check_target_register(vcpu, inst.M46.r1)){
  45.489 +        set_illegal_op_isr(vcpu);
  45.490 +        illegal_op(vcpu);
  45.491 +        return IA64_FAULT;
  45.492 +    }
  45.493 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.494 +    if(vpsr.cpl!=0){
  45.495 +        vcpu_set_isr(vcpu, visr.val);
  45.496 +        return IA64_FAULT;
  45.497 +    }
  45.498 +#endif
  45.499 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  45.500 +#ifdef  CHECK_FAULT
  45.501 +        set_isr_reg_nat_consumption(vcpu,0,1);
  45.502 +        rnat_comsumption(vcpu);
  45.503 +        return IA64_FAULT;
  45.504 +#endif
  45.505 +    }
  45.506 +    if(vmx_vcpu_tak(vcpu, r3, &r1)){
  45.507 +        return IA64_FAULT;
  45.508 +    }
  45.509 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  45.510 +    return(IA64_NO_FAULT);
  45.511 +}
  45.512 +
  45.513 +
  45.514 +/************************************
  45.515 + * Insert translation register/cache
  45.516 +************************************/
  45.517 +
  45.518 +IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
  45.519 +{
  45.520 +    UINT64 fault, itir, ifa, pte, slot;
  45.521 +    ISR isr;
  45.522 +    IA64_PSR  vpsr;
  45.523 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.524 +    if ( vpsr.ic ) {
  45.525 +        set_illegal_op_isr(vcpu);
  45.526 +        illegal_op(vcpu);
  45.527 +        return IA64_FAULT;
  45.528 +    }
  45.529 +#ifdef  VMAL_NO_FAULT_CHECK
  45.530 +    if ( vpsr.cpl != 0) {
  45.531 +        /* Inject Privileged Operation fault into guest */
  45.532 +        set_privileged_operation_isr (vcpu, 0);
  45.533 +        privilege_op (vcpu);
  45.534 +        return IA64_FAULT;
  45.535 +    }
  45.536 +#endif // VMAL_NO_FAULT_CHECK
  45.537 +    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
  45.538 +#ifdef  VMAL_NO_FAULT_CHECK
  45.539 +        set_isr_reg_nat_consumption(vcpu,0,0);
  45.540 +        rnat_comsumption(vcpu);
  45.541 +        return IA64_FAULT;
  45.542 +#endif // VMAL_NO_FAULT_CHECK
  45.543 +    }
  45.544 +#ifdef  VMAL_NO_FAULT_CHECK
  45.545 +    if(is_reserved_rr_register(vcpu, slot)){
  45.546 +        set_illegal_op_isr(vcpu);
  45.547 +        illegal_op(vcpu);
  45.548 +        return IA64_FAULT;
  45.549 +    }
  45.550 +#endif // VMAL_NO_FAULT_CHECK
  45.551 +
  45.552 +    if (vmx_vcpu_get_itir(vcpu,&itir)){
  45.553 +        return(IA64_FAULT);
  45.554 +    }
  45.555 +    if (vmx_vcpu_get_ifa(vcpu,&ifa)){
  45.556 +        return(IA64_FAULT);
  45.557 +    }
  45.558 +#ifdef  VMAL_NO_FAULT_CHECK
  45.559 +    if (is_reserved_itir_field(vcpu, itir)) {
  45.560 +    	// TODO
  45.561 +    	return IA64_FAULT;
  45.562 +    }
  45.563 +    if (unimplemented_gva(vcpu,ifa) ) {
  45.564 +        isr.val = set_isr_ei_ni(vcpu);
  45.565 +        isr.code = IA64_RESERVED_REG_FAULT;
  45.566 +        vcpu_set_isr(vcpu, isr.val);
  45.567 +        unimpl_daddr(vcpu);
  45.568 +        return IA64_FAULT;
  45.569 +   }
  45.570 +#endif // VMAL_NO_FAULT_CHECK
  45.571 +
  45.572 +    return (vmx_vcpu_itr_d(vcpu,pte,itir,ifa,slot));
  45.573 +}
  45.574 +
  45.575 +IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
  45.576 +{
  45.577 +    UINT64 fault, itir, ifa, pte, slot;
  45.578 +    ISR isr;
  45.579 +    IA64_PSR  vpsr;
  45.580 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  45.581 +    if ( vpsr.ic ) {
  45.582 +        set_illegal_op_isr(vcpu);
  45.583 +        illegal_op(vcpu);
  45.584 +        return IA64_FAULT;
  45.585 +    }
  45.586 +#ifdef  VMAL_NO_FAULT_CHECK
  45.587 +    if ( vpsr.cpl != 0) {
  45.588 +        /* Inject Privileged Operation fault into guest */
  45.589 +        set_privileged_operation_isr (vcpu, 0);
  45.590 +        privilege_op (vcpu);
  45.591 +        return IA64_FAULT;
  45.592 +    }
  45.593 +#endif // VMAL_NO_FAULT_CHECK
  45.594 +    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
  45.595 +#ifdef  VMAL_NO_FAULT_CHECK
  45.596 +        set_isr_reg_nat_consumption(vcpu,0,0);
  45.597 +        rnat_comsumption(vcpu);
  45.598 +        return IA64_FAULT;
  45.599 +#endif // VMAL_NO_FAULT_CHECK
  45.600 +    }
  45.601 +#ifdef  VMAL_NO_FAULT_CHECK
  45.602 +    if(is_reserved_rr_register(vcpu, slot)){
  45.603 +        set_illegal_op_isr(vcpu);
  45.604 +        illegal_op(vcpu);
  45.605 +        return IA64_FAULT;
  45.606 +    }
  45.607 +#endif // VMAL_NO_FAULT_CHECK
  45.608 +
  45.609 +    if (vmx_vcpu_get_itir(vcpu,&itir)){
  45.610 +        return(IA64_FAULT);
  45.611 +    }
  45.612 +    if (vmx_vcpu_get_ifa(vcpu,&ifa)){
  45.613 +        return(IA64_FAULT);
  45.614 +    }
  45.615 +#ifdef  VMAL_NO_FAULT_CHECK
  45.616 +    if (is_reserved_itir_field(vcpu, itir)) {
  45.617 +