ia64/xen-unstable

changeset 5051:541012edd6e5

bitkeeper revision 1.1472.2.3 (428e1d27PlLcEe8kqMtV2NBLhophjg)

Merge
author adsharma@linux-t08.sc.intel.com
date Fri May 20 17:23:51 2005 +0000 (2005-05-20)
parents 035a3dd3409a 0554a6615257
children 2ea8c80c91eb
files .rootkeys xen/arch/ia64/Makefile xen/arch/ia64/Rules.mk xen/arch/ia64/acpi.c xen/arch/ia64/asm-offsets.c xen/arch/ia64/dom_fw.c xen/arch/ia64/domain.c xen/arch/ia64/mm_init.c xen/arch/ia64/mmio.c xen/arch/ia64/patch/linux-2.6.11/entry.S xen/arch/ia64/patch/linux-2.6.11/entry.h xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h xen/arch/ia64/patch/linux-2.6.11/head.S xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h xen/arch/ia64/patch/linux-2.6.11/ia64regs.h xen/arch/ia64/patch/linux-2.6.11/interrupt.h xen/arch/ia64/patch/linux-2.6.11/io.h xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c xen/arch/ia64/patch/linux-2.6.11/kregs.h xen/arch/ia64/patch/linux-2.6.11/mca_asm.h xen/arch/ia64/patch/linux-2.6.11/page.h xen/arch/ia64/patch/linux-2.6.11/pal.S xen/arch/ia64/patch/linux-2.6.11/pal.h xen/arch/ia64/patch/linux-2.6.11/processor.h xen/arch/ia64/patch/linux-2.6.11/ptrace.h xen/arch/ia64/patch/linux-2.6.11/setup.c xen/arch/ia64/patch/linux-2.6.11/system.h xen/arch/ia64/patch/linux-2.6.11/unaligned.c xen/arch/ia64/process.c xen/arch/ia64/tools/README.RunVT xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/vcpu.c xen/arch/ia64/vlsapic.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_entry.S xen/arch/ia64/vmx_init.c xen/arch/ia64/vmx_interrupt.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_minstate.h xen/arch/ia64/vmx_phy_mode.c xen/arch/ia64/vmx_process.c xen/arch/ia64/vmx_utility.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vmx_virt.c xen/arch/ia64/vmx_vsa.S xen/arch/ia64/vtlb.c xen/arch/ia64/xenmem.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/arch/ia64/xentime.c xen/common/elf.c xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/mmu_context.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/regs.h xen/include/asm-ia64/serial.h xen/include/asm-ia64/tlb.h xen/include/asm-ia64/virt_event.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_mm_def.h xen/include/asm-ia64/vmx_pal.h xen/include/asm-ia64/vmx_pal_vsa.h xen/include/asm-ia64/vmx_phy_mode.h xen/include/asm-ia64/vmx_platform.h xen/include/asm-ia64/vmx_ptrace.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/vtm.h xen/include/asm-ia64/xenprocessor.h xen/include/asm-ia64/xensystem.h xen/include/xen/sched.h
line diff
     1.1 --- a/.rootkeys	Thu May 19 21:22:49 2005 +0000
     1.2 +++ b/.rootkeys	Fri May 20 17:23:51 2005 +0000
     1.3 @@ -1116,12 +1116,16 @@ 421098b3ys5GAr4z6_H1jD33oem82g xen/arch/
     1.4  4272a8e4lavI6DrTvqaIhXeR5RuKBw xen/arch/ia64/ivt.S
     1.5  421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile
     1.6  421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c
     1.7 +428b9f38Gp0KcPokG9Nq5v1rGk2FkA xen/arch/ia64/mmio.c
     1.8  425ae516maKAsHBJVSzs19cdRgt3Nw xen/arch/ia64/patch/linux-2.6.11/cpumask.h
     1.9  425ae516cGqvMzGtihTEsQXAXsuOhQ xen/arch/ia64/patch/linux-2.6.11/efi.c
    1.10  425ae516Y1A4q4_Kfre3qnDj7lbHJg xen/arch/ia64/patch/linux-2.6.11/entry.S
    1.11 +428bb037eJ4qs48I-tUdhht5_95obA xen/arch/ia64/patch/linux-2.6.11/entry.h
    1.12 +428bb037jPbybWNkNymaqkFr83vT6Q xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h
    1.13  425ae516txAP-owjzpTJ7ThfzWR8nw xen/arch/ia64/patch/linux-2.6.11/hardirq.h
    1.14  425ae516PDO1ESDHXHVeDNvlqUfmdQ xen/arch/ia64/patch/linux-2.6.11/head.S
    1.15  425ae516JR7HWvt1zxJ-wLvEWmJGgg xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h
    1.16 +428bb037UxfxIhZaslk-qHazO4w0yg xen/arch/ia64/patch/linux-2.6.11/ia64regs.h
    1.17  425ae516AHRNmaVuZjJY-9YjmKRDqg xen/arch/ia64/patch/linux-2.6.11/interrupt.h
    1.18  425ae516U2wFUzrUJQUpy3z38jZHsQ xen/arch/ia64/patch/linux-2.6.11/io.h
    1.19  425ae516GGRmXijPBLC5ii6yWOn0rg xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
    1.20 @@ -1131,8 +1135,10 @@ 425ae5160-9wHxh0tOnIjavEjt6W0A xen/arch/
    1.21  425ae516N7SaORdbodDr90tmtCzYXw xen/arch/ia64/patch/linux-2.6.11/mm_contig.c
    1.22  425ae516WDLrfEA4zr40d00z0VIWPg xen/arch/ia64/patch/linux-2.6.11/page.h
    1.23  425ae516pVQ75NhdItT593SiWI0lbQ xen/arch/ia64/patch/linux-2.6.11/pal.S
    1.24 +428bb037THuiyhERFP8RhRgapNkWXg xen/arch/ia64/patch/linux-2.6.11/pal.h
    1.25  425ae516QfmjiF_a-mabAXqV8Imzkg xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
    1.26  425ae516EWaNOBEnc1xnphTbRmNZsw xen/arch/ia64/patch/linux-2.6.11/processor.h
    1.27 +428bb037KSxe7_UyqseK5bWhGe3KwA xen/arch/ia64/patch/linux-2.6.11/ptrace.h
    1.28  425ae516LecDyXlwh3NLBtHZKXmMcA xen/arch/ia64/patch/linux-2.6.11/series
    1.29  425ae516RFiPn2CGkpJ21LM-1lJcQg xen/arch/ia64/patch/linux-2.6.11/setup.c
    1.30  425ae516FX_10YaKGMU8Ysf7kkdm_A xen/arch/ia64/patch/linux-2.6.11/swiotlb.c
    1.31 @@ -1190,6 +1196,7 @@ 41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/
    1.32  41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
    1.33  421098b69pUiIJrqu_w0JMUnZ2uc2A xen/arch/ia64/smp.c
    1.34  421098b6_ToSGrf6Pk1Uwg5aMAIBxg xen/arch/ia64/smpboot.c
    1.35 +428b9f38JJDW35iDn5DlfXTu700rkQ xen/arch/ia64/tools/README.RunVT
    1.36  421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64
    1.37  42376c6dfyY0eq8MS2dK3BW2rFuEGg xen/arch/ia64/tools/README.xenia64linux
    1.38  421098b6rQ2BQ103qu1n1HNofbS2Og xen/arch/ia64/tools/mkbuildtree
    1.39 @@ -1200,6 +1207,20 @@ 4252ace7H2dIMPFeFwczAVoP4yAHxA xen/arch/
    1.40  4252ace74lKUPFnO8PmF0Dtpk7Xkng xen/arch/ia64/tools/privify/privify_elf64.c
    1.41  41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
    1.42  421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c
    1.43 +428b9f38PglyXM-mJJfo19ycuQrEhw xen/arch/ia64/vlsapic.c
    1.44 +428b9f38EmpBsMHL3WbOZoieteBGdQ xen/arch/ia64/vmmu.c
    1.45 +428b9f38hU-X5aX0MIY3EU0Yw4PjcA xen/arch/ia64/vmx_entry.S
    1.46 +428b9f38S76bWI96g7uPLmE-uAcmdg xen/arch/ia64/vmx_init.c
    1.47 +428b9f385AMSyCRYBsckQClQY4ZgHA xen/arch/ia64/vmx_interrupt.c
    1.48 +428b9f380IOjPmj0N6eelH-WJjl1xg xen/arch/ia64/vmx_ivt.S
    1.49 +428b9f38Y7tp9uyNRdru3lPDXLjOCA xen/arch/ia64/vmx_minstate.h
    1.50 +428b9f38H9Pz0ZhRUT0-11A6jceE1w xen/arch/ia64/vmx_phy_mode.c
    1.51 +428b9f38pXU56r2OjoFW2Z8H1XY17w xen/arch/ia64/vmx_process.c
    1.52 +428b9f38GmZxD-GMDnQB3m7tOoukTA xen/arch/ia64/vmx_utility.c
    1.53 +428b9f38Pflg6Z4CtXeVGv7dyEOM4g xen/arch/ia64/vmx_vcpu.c
    1.54 +428b9f38Y7p7hXHWx9QF_oYUjdD__g xen/arch/ia64/vmx_virt.c
    1.55 +428b9f38EL7qKbbKkhBNr0KzMLS4Gg xen/arch/ia64/vmx_vsa.S
    1.56 +428b9f3805WejQ1E-OqAPANPAu8vPw xen/arch/ia64/vtlb.c
    1.57  41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
    1.58  4272adaeit9raZ9KnjO_wR4Ii9LJNQ xen/arch/ia64/xenirq.c
    1.59  427162263zDUiPmTj-lP4eGyXs5eIg xen/arch/ia64/xenmem.c
    1.60 @@ -1339,7 +1360,21 @@ 421098b7GkWOnlzSmPvNAhByOSZ1Dw xen/inclu
    1.61  421098b7FK3xgShpnH0I0Ou3O4fJ2Q xen/include/asm-ia64/tlb.h
    1.62  421098b78IGdFOGUlPmpS7h_QBmoFg xen/include/asm-ia64/vcpu.h
    1.63  421098b7PiAencgmBFGAqALU-V5rqQ xen/include/asm-ia64/vhpt.h
    1.64 +428b9f38_b0DgWwkJcBEsTdEmO9WNQ xen/include/asm-ia64/virt_event.h
    1.65 +428b9f38B0KbUj3o2FBQJ5tmIIMDHg xen/include/asm-ia64/vmmu.h
    1.66 +428b9f38ewjoJ-RL-2lsXFT04H2aag xen/include/asm-ia64/vmx.h
    1.67 +428b9f38coGlYeXx-7hpvfCTAPOd7w xen/include/asm-ia64/vmx_mm_def.h
    1.68 +428b9f387tov0OtOEeF8fVWSR2v5Pg xen/include/asm-ia64/vmx_pal.h
    1.69 +428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
    1.70 +428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
    1.71 +428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
    1.72 +428b9f38lm0ntDBusHggeQXkx1-1HQ xen/include/asm-ia64/vmx_ptrace.h
    1.73 +428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
    1.74 +428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
    1.75 +428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
    1.76 +428e120a-H-bqn10zOlnhlzlVEuW8A xen/include/asm-ia64/xenprocessor.h
    1.77  421098b7LfwIHQ2lRYWhO4ruEXqIuQ xen/include/asm-ia64/xenserial.h
    1.78 +428e120esS-Tp1mX5VoUrsGJDNY_ow xen/include/asm-ia64/xensystem.h
    1.79  40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-x86/acpi.h
    1.80  3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-x86/apic.h
    1.81  3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-x86/apicdef.h
     2.1 --- a/xen/arch/ia64/Makefile	Thu May 19 21:22:49 2005 +0000
     2.2 +++ b/xen/arch/ia64/Makefile	Fri May 20 17:23:51 2005 +0000
     2.3 @@ -10,6 +10,12 @@ OBJS = xensetup.o setup.o time.o irq.o i
     2.4  	extable.o linuxextable.o xenirq.o xentime.o \
     2.5  	regionreg.o entry.o unaligned.o privop.o vcpu.o \
     2.6  	irq_ia64.o irq_lsapic.o vhpt.o xenasm.o dom_fw.o
     2.7 +
     2.8 +ifeq ($(CONFIG_VTI),y)
     2.9 +OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
    2.10 +	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
    2.11 +	vtlb.o mmio.o vlsapic.o
    2.12 +endif
    2.13  # perfmon.o
    2.14  # unwind.o needed for kernel unwinding (rare)
    2.15  
    2.16 @@ -38,6 +44,7 @@ ia64lib.o:
    2.17  clean:
    2.18  	rm -f *.o *~ core  xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
    2.19  	rm -f lib/*.o
    2.20 +	$(MAKE) -C lib clean
    2.21  
    2.22  # setup.o contains bits of compile.h so it must be blown away
    2.23  delete-unfresh-files:
     3.1 --- a/xen/arch/ia64/Rules.mk	Thu May 19 21:22:49 2005 +0000
     3.2 +++ b/xen/arch/ia64/Rules.mk	Fri May 20 17:23:51 2005 +0000
     3.3 @@ -1,6 +1,7 @@
     3.4  ########################################
     3.5  # ia64-specific definitions
     3.6  
     3.7 +CONFIG_VTI	?= n
     3.8  ifneq ($(COMPILE_ARCH),$(TARGET_ARCH))
     3.9  CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux-
    3.10  endif
    3.11 @@ -17,4 +18,7 @@ CFLAGS  += -Wno-pointer-arith -Wredundan
    3.12  CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
    3.13  CFLAGS	+= -ffixed-r13 -mfixed-range=f12-f15,f32-f127
    3.14  CFLAGS	+= -w -g
    3.15 +ifeq ($(CONFIG_VTI),y)
    3.16 +CFLAGS  += -DCONFIG_VTI
    3.17 +endif
    3.18  LDFLAGS := -g
     4.1 --- a/xen/arch/ia64/acpi.c	Thu May 19 21:22:49 2005 +0000
     4.2 +++ b/xen/arch/ia64/acpi.c	Fri May 20 17:23:51 2005 +0000
     4.3 @@ -1,9 +1,16 @@
     4.4  /*
     4.5   *  acpi.c - Architecture-Specific Low-Level ACPI Support
     4.6   *
     4.7 - *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
     4.8 - *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
     4.9 - *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
    4.10 + *  Copyright (C) 1999 VA Linux Systems
    4.11 + *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
    4.12 + *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
    4.13 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    4.14 + *  Copyright (C) 2000 Intel Corp.
    4.15 + *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
    4.16 + *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
    4.17 + *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
    4.18 + *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
    4.19 + *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
    4.20   *
    4.21   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    4.22   *
    4.23 @@ -19,667 +26,651 @@
    4.24   *
    4.25   *  You should have received a copy of the GNU General Public License
    4.26   *  along with this program; if not, write to the Free Software
    4.27 - *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    4.28 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
    4.29   *
    4.30   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    4.31   */
    4.32  
    4.33 -#include <xen/config.h>
    4.34 -#include <xen/kernel.h>
    4.35 -#include <xen/init.h>
    4.36 -#include <xen/types.h>
    4.37 -/*#include <xen/stddef.h>*/
    4.38 -#include <xen/slab.h>
    4.39 -//#include <xen/pci.h>
    4.40 -/*#include <xen/bootmem.h>*/
    4.41 -#include <xen/irq.h>
    4.42 -#include <xen/acpi.h>
    4.43 -//#include <asm/mpspec.h>
    4.44 +#include <linux/config.h>
    4.45 +#include <linux/module.h>
    4.46 +#include <linux/init.h>
    4.47 +#include <linux/kernel.h>
    4.48 +#include <linux/sched.h>
    4.49 +#include <linux/smp.h>
    4.50 +#include <linux/string.h>
    4.51 +#include <linux/types.h>
    4.52 +#include <linux/irq.h>
    4.53 +#include <linux/acpi.h>
    4.54 +#include <linux/efi.h>
    4.55 +#include <linux/mmzone.h>
    4.56  #include <asm/io.h>
    4.57 -//#include <asm/apic.h>
    4.58 -//#include <asm/apicdef.h>
    4.59 +//#include <asm/iosapic.h>
    4.60 +#include <asm/machvec.h>
    4.61  #include <asm/page.h>
    4.62 -/*#include <asm/pgtable.h>*/
    4.63 -#include <asm/pgalloc.h>
    4.64 -//#include <asm/io_apic.h>
    4.65 -#include <asm/acpi.h>
    4.66 -/*#include <asm/save_state.h>*/
    4.67 -//#include <asm/smpboot.h>
    4.68 +#include <asm/system.h>
    4.69 +#include <asm/numa.h>
    4.70 +#include <asm/sal.h>
    4.71 +//#include <asm/cyclone.h>
    4.72  
    4.73 +#define BAD_MADT_ENTRY(entry, end) (                                        \
    4.74 +		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
    4.75 +		((acpi_table_entry_header *)entry)->length != sizeof(*entry))
    4.76  
    4.77  #define PREFIX			"ACPI: "
    4.78  
    4.79 -int acpi_lapic = 0;
    4.80 -int acpi_ioapic = 0;
    4.81 +void (*pm_idle) (void);
    4.82 +EXPORT_SYMBOL(pm_idle);
    4.83 +void (*pm_power_off) (void);
    4.84  
    4.85 -/* --------------------------------------------------------------------------
    4.86 -                              Boot-time Configuration
    4.87 -   -------------------------------------------------------------------------- */
    4.88 +unsigned char acpi_kbd_controller_present = 1;
    4.89 +unsigned char acpi_legacy_devices;
    4.90 +
    4.91 +const char *
    4.92 +acpi_get_sysname (void)
    4.93 +{
    4.94 +#ifdef CONFIG_IA64_GENERIC
    4.95 +	unsigned long rsdp_phys;
    4.96 +	struct acpi20_table_rsdp *rsdp;
    4.97 +	struct acpi_table_xsdt *xsdt;
    4.98 +	struct acpi_table_header *hdr;
    4.99 +
   4.100 +	rsdp_phys = acpi_find_rsdp();
   4.101 +	if (!rsdp_phys) {
   4.102 +		printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n");
   4.103 +		return "dig";
   4.104 +	}
   4.105 +
   4.106 +	rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys);
   4.107 +	if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
   4.108 +		printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
   4.109 +		return "dig";
   4.110 +	}
   4.111 +
   4.112 +	xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address);
   4.113 +	hdr = &xsdt->header;
   4.114 +	if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
   4.115 +		printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
   4.116 +		return "dig";
   4.117 +	}
   4.118 +
   4.119 +	if (!strcmp(hdr->oem_id, "HP")) {
   4.120 +		return "hpzx1";
   4.121 +	}
   4.122 +	else if (!strcmp(hdr->oem_id, "SGI")) {
   4.123 +		return "sn2";
   4.124 +	}
   4.125 +
   4.126 +	return "dig";
   4.127 +#else
   4.128 +# if defined (CONFIG_IA64_HP_SIM)
   4.129 +	return "hpsim";
   4.130 +# elif defined (CONFIG_IA64_HP_ZX1)
   4.131 +	return "hpzx1";
   4.132 +# elif defined (CONFIG_IA64_SGI_SN2)
   4.133 +	return "sn2";
   4.134 +# elif defined (CONFIG_IA64_DIG)
   4.135 +	return "dig";
   4.136 +# else
   4.137 +#	error Unknown platform.  Fix acpi.c.
   4.138 +# endif
   4.139 +#endif
   4.140 +}
   4.141  
   4.142  #ifdef CONFIG_ACPI_BOOT
   4.143 -//int acpi_noirq __initdata = 0;  /* skip ACPI IRQ initialization */
   4.144 -int acpi_ht __initdata = 1;     /* enable HT */
   4.145  
   4.146 -enum acpi_irq_model_id		acpi_irq_model;
   4.147 +#define ACPI_MAX_PLATFORM_INTERRUPTS	256
   4.148  
   4.149 +#if 0
   4.150 +/* Array to record platform interrupt vectors for generic interrupt routing. */
   4.151 +int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
   4.152 +	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
   4.153 +};
   4.154 +
   4.155 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
   4.156  
   4.157  /*
   4.158 - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
   4.159 - * to map the target physical address. The problem is that set_fixmap()
   4.160 - * provides a single page, and it is possible that the page is not
   4.161 - * sufficient.
   4.162 - * By using this area, we can map up to MAX_IO_APICS pages temporarily,
   4.163 - * i.e. until the next __va_range() call.
   4.164 - *
   4.165 - * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
   4.166 - * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
   4.167 - * count idx down while incrementing the phys address.
   4.168 + * Interrupt routing API for device drivers.  Provides interrupt vector for
   4.169 + * a generic platform event.  Currently only CPEI is implemented.
   4.170   */
   4.171 -char *__acpi_map_table(unsigned long phys, unsigned long size)
   4.172 +int
   4.173 +acpi_request_vector (u32 int_type)
   4.174  {
   4.175 -	unsigned long base, offset, mapped_size;
   4.176 -	int idx;
   4.177 -
   4.178 -	if (phys + size < 8*1024*1024) 
   4.179 -		return __va(phys); 
   4.180 +	int vector = -1;
   4.181  
   4.182 -	offset = phys & (PAGE_SIZE - 1);
   4.183 -	mapped_size = PAGE_SIZE - offset;
   4.184 -#ifndef XEN
   4.185 -// where is FIX_ACPI_*? hack for now, FIXME later
   4.186 -	set_fixmap(FIX_ACPI_END, phys);
   4.187 -	base = fix_to_virt(FIX_ACPI_END);
   4.188 -
   4.189 -	/*
   4.190 -	 * Most cases can be covered by the below.
   4.191 -	 */
   4.192 -	idx = FIX_ACPI_END;
   4.193 -	while (mapped_size < size) {
   4.194 -		if (--idx < FIX_ACPI_BEGIN)
   4.195 -			return 0;	/* cannot handle this */
   4.196 -		phys += PAGE_SIZE;
   4.197 -		set_fixmap(idx, phys);
   4.198 -		mapped_size += PAGE_SIZE;
   4.199 -	}
   4.200 +	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
   4.201 +		/* corrected platform error interrupt */
   4.202 +		vector = platform_intr_list[int_type];
   4.203 +	} else
   4.204 +		printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n");
   4.205 +	return vector;
   4.206 +}
   4.207  #endif
   4.208 -
   4.209 -	return ((unsigned char *) base + offset);
   4.210 +char *
   4.211 +__acpi_map_table (unsigned long phys_addr, unsigned long size)
   4.212 +{
   4.213 +	return __va(phys_addr);
   4.214  }
   4.215  
   4.216 -
   4.217 -#ifdef CONFIG_X86_LOCAL_APIC
   4.218 +/* --------------------------------------------------------------------------
   4.219 +                            Boot-time Table Parsing
   4.220 +   -------------------------------------------------------------------------- */
   4.221  
   4.222 -static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
   4.223 +static int			total_cpus __initdata;
   4.224 +static int			available_cpus __initdata;
   4.225 +struct acpi_table_madt *	acpi_madt __initdata;
   4.226 +static u8			has_8259;
   4.227  
   4.228 -
   4.229 +#if 0
   4.230  static int __init
   4.231 -acpi_parse_madt (
   4.232 -	unsigned long		phys_addr,
   4.233 -	unsigned long		size)
   4.234 +acpi_parse_lapic_addr_ovr (
   4.235 +	acpi_table_entry_header *header, const unsigned long end)
   4.236  {
   4.237 -	struct acpi_table_madt	*madt = NULL;
   4.238 +	struct acpi_table_lapic_addr_ovr *lapic;
   4.239  
   4.240 -	if (!phys_addr || !size)
   4.241 +	lapic = (struct acpi_table_lapic_addr_ovr *) header;
   4.242 +
   4.243 +	if (BAD_MADT_ENTRY(lapic, end))
   4.244  		return -EINVAL;
   4.245  
   4.246 -	madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
   4.247 -	if (!madt) {
   4.248 -		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
   4.249 -		return -ENODEV;
   4.250 -	}
   4.251 +	acpi_table_print_madt_entry(header);
   4.252  
   4.253 -	if (madt->lapic_address)
   4.254 -		acpi_lapic_addr = (u64) madt->lapic_address;
   4.255 -
   4.256 -	printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n",
   4.257 -		madt->lapic_address);
   4.258 -
   4.259 -	detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id);
   4.260 -
   4.261 +	if (lapic->address) {
   4.262 +		iounmap((void *) ipi_base_addr);
   4.263 +		ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
   4.264 +	}
   4.265  	return 0;
   4.266  }
   4.267  
   4.268  
   4.269  static int __init
   4.270 -acpi_parse_lapic (
   4.271 -	acpi_table_entry_header *header)
   4.272 +acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end)
   4.273  {
   4.274 -	struct acpi_table_lapic	*processor = NULL;
   4.275 +	struct acpi_table_lsapic *lsapic;
   4.276  
   4.277 -	processor = (struct acpi_table_lapic*) header;
   4.278 -	if (!processor)
   4.279 +	lsapic = (struct acpi_table_lsapic *) header;
   4.280 +
   4.281 +	if (BAD_MADT_ENTRY(lsapic, end))
   4.282  		return -EINVAL;
   4.283  
   4.284  	acpi_table_print_madt_entry(header);
   4.285  
   4.286 -	mp_register_lapic (
   4.287 -		processor->id,					   /* APIC ID */
   4.288 -		processor->flags.enabled);			  /* Enabled? */
   4.289 +	printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
   4.290  
   4.291 +	if (!lsapic->flags.enabled)
   4.292 +		printk(" disabled");
   4.293 +	else {
   4.294 +		printk(" enabled");
   4.295 +#ifdef CONFIG_SMP
   4.296 +		smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
   4.297 +		if (hard_smp_processor_id()
   4.298 +		    == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
   4.299 +			printk(" (BSP)");
   4.300 +#endif
   4.301 +		++available_cpus;
   4.302 +	}
   4.303 +
   4.304 +	printk("\n");
   4.305 +
   4.306 +	total_cpus++;
   4.307  	return 0;
   4.308  }
   4.309  
   4.310  
   4.311  static int __init
   4.312 -acpi_parse_lapic_addr_ovr (
   4.313 -	acpi_table_entry_header *header)
   4.314 +acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end)
   4.315  {
   4.316 -	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
   4.317 -
   4.318 -	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
   4.319 -	if (!lapic_addr_ovr)
   4.320 -		return -EINVAL;
   4.321 -
   4.322 -	acpi_lapic_addr = lapic_addr_ovr->address;
   4.323 +	struct acpi_table_lapic_nmi *lacpi_nmi;
   4.324  
   4.325 -	return 0;
   4.326 -}
   4.327 +	lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
   4.328  
   4.329 -static int __init
   4.330 -acpi_parse_lapic_nmi (
   4.331 -	acpi_table_entry_header *header)
   4.332 -{
   4.333 -	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
   4.334 -
   4.335 -	lapic_nmi = (struct acpi_table_lapic_nmi*) header;
   4.336 -	if (!lapic_nmi)
   4.337 +	if (BAD_MADT_ENTRY(lacpi_nmi, end))
   4.338  		return -EINVAL;
   4.339  
   4.340  	acpi_table_print_madt_entry(header);
   4.341  
   4.342 -	if (lapic_nmi->lint != 1)
   4.343 -		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
   4.344 -
   4.345 -	return 0;
   4.346 -}
   4.347 -
   4.348 -#endif /*CONFIG_X86_LOCAL_APIC*/
   4.349 -
   4.350 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   4.351 -
   4.352 -static int __init
   4.353 -acpi_parse_ioapic (
   4.354 -	acpi_table_entry_header *header)
   4.355 -{
   4.356 -	struct acpi_table_ioapic *ioapic = NULL;
   4.357 -
   4.358 -	ioapic = (struct acpi_table_ioapic*) header;
   4.359 -	if (!ioapic)
   4.360 -		return -EINVAL;
   4.361 - 
   4.362 -	acpi_table_print_madt_entry(header);
   4.363 -
   4.364 -	mp_register_ioapic (
   4.365 -		ioapic->id,
   4.366 -		ioapic->address,
   4.367 -		ioapic->global_irq_base);
   4.368 - 
   4.369 +	/* TBD: Support lapic_nmi entries */
   4.370  	return 0;
   4.371  }
   4.372  
   4.373  
   4.374  static int __init
   4.375 -acpi_parse_int_src_ovr (
   4.376 -	acpi_table_entry_header *header)
   4.377 +acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
   4.378  {
   4.379 -	struct acpi_table_int_src_ovr *intsrc = NULL;
   4.380 +	struct acpi_table_iosapic *iosapic;
   4.381  
   4.382 -	intsrc = (struct acpi_table_int_src_ovr*) header;
   4.383 -	if (!intsrc)
   4.384 +	iosapic = (struct acpi_table_iosapic *) header;
   4.385 +
   4.386 +	if (BAD_MADT_ENTRY(iosapic, end))
   4.387  		return -EINVAL;
   4.388  
   4.389  	acpi_table_print_madt_entry(header);
   4.390  
   4.391 -	mp_override_legacy_irq (
   4.392 -		intsrc->bus_irq,
   4.393 -		intsrc->flags.polarity,
   4.394 -		intsrc->flags.trigger,
   4.395 -		intsrc->global_irq);
   4.396 +	iosapic_init(iosapic->address, iosapic->global_irq_base);
   4.397  
   4.398  	return 0;
   4.399  }
   4.400  
   4.401  
   4.402  static int __init
   4.403 -acpi_parse_nmi_src (
   4.404 -	acpi_table_entry_header *header)
   4.405 +acpi_parse_plat_int_src (
   4.406 +	acpi_table_entry_header *header, const unsigned long end)
   4.407  {
   4.408 -	struct acpi_table_nmi_src *nmi_src = NULL;
   4.409 +	struct acpi_table_plat_int_src *plintsrc;
   4.410 +	int vector;
   4.411  
   4.412 -	nmi_src = (struct acpi_table_nmi_src*) header;
   4.413 -	if (!nmi_src)
   4.414 +	plintsrc = (struct acpi_table_plat_int_src *) header;
   4.415 +
   4.416 +	if (BAD_MADT_ENTRY(plintsrc, end))
   4.417  		return -EINVAL;
   4.418  
   4.419  	acpi_table_print_madt_entry(header);
   4.420  
   4.421 -	/* TBD: Support nimsrc entries? */
   4.422 +	/*
   4.423 +	 * Get vector assignment for this interrupt, set attributes,
   4.424 +	 * and program the IOSAPIC routing table.
   4.425 +	 */
   4.426 +	vector = iosapic_register_platform_intr(plintsrc->type,
   4.427 +						plintsrc->global_irq,
   4.428 +						plintsrc->iosapic_vector,
   4.429 +						plintsrc->eid,
   4.430 +						plintsrc->id,
   4.431 +						(plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   4.432 +						(plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
   4.433  
   4.434 +	platform_intr_list[plintsrc->type] = vector;
   4.435  	return 0;
   4.436  }
   4.437  
   4.438 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   4.439 +
   4.440 +static int __init
   4.441 +acpi_parse_int_src_ovr (
   4.442 +	acpi_table_entry_header *header, const unsigned long end)
   4.443 +{
   4.444 +	struct acpi_table_int_src_ovr *p;
   4.445 +
   4.446 +	p = (struct acpi_table_int_src_ovr *) header;
   4.447 +
   4.448 +	if (BAD_MADT_ENTRY(p, end))
   4.449 +		return -EINVAL;
   4.450 +
   4.451 +	acpi_table_print_madt_entry(header);
   4.452 +
   4.453 +	iosapic_override_isa_irq(p->bus_irq, p->global_irq,
   4.454 +				 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   4.455 +				 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
   4.456 +	return 0;
   4.457 +}
   4.458  
   4.459  
   4.460 -static unsigned long __init
   4.461 -acpi_scan_rsdp (
   4.462 -	unsigned long		start,
   4.463 -	unsigned long		length)
   4.464 +static int __init
   4.465 +acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
   4.466  {
   4.467 -	unsigned long		offset = 0;
   4.468 -	unsigned long		sig_len = sizeof("RSD PTR ") - 1;
   4.469 +	struct acpi_table_nmi_src *nmi_src;
   4.470  
   4.471 -	/*
   4.472 -	 * Scan all 16-byte boundaries of the physical memory region for the
   4.473 -	 * RSDP signature.
   4.474 -	 */
   4.475 -	for (offset = 0; offset < length; offset += 16) {
   4.476 -		if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
   4.477 -			continue;
   4.478 -		return (start + offset);
   4.479 +	nmi_src = (struct acpi_table_nmi_src*) header;
   4.480 +
   4.481 +	if (BAD_MADT_ENTRY(nmi_src, end))
   4.482 +		return -EINVAL;
   4.483 +
   4.484 +	acpi_table_print_madt_entry(header);
   4.485 +
   4.486 +	/* TBD: Support nimsrc entries */
   4.487 +	return 0;
   4.488 +}
   4.489 +/* Hook from generic ACPI tables.c */
   4.490 +void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
   4.491 +{
   4.492 +	if (!strncmp(oem_id, "IBM", 3) &&
   4.493 +	    (!strncmp(oem_table_id, "SERMOW", 6))){
   4.494 +
   4.495 +		/* Unfortunatly ITC_DRIFT is not yet part of the
   4.496 +		 * official SAL spec, so the ITC_DRIFT bit is not
   4.497 +		 * set by the BIOS on this hardware.
   4.498 +		 */
   4.499 +		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
   4.500 +
   4.501 +		/*Start cyclone clock*/
   4.502 +		cyclone_setup(0);
   4.503  	}
   4.504 +}
   4.505 +
   4.506 +static int __init
   4.507 +acpi_parse_madt (unsigned long phys_addr, unsigned long size)
   4.508 +{
   4.509 +	if (!phys_addr || !size)
   4.510 +		return -EINVAL;
   4.511 +
   4.512 +	acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
   4.513 +
   4.514 +	/* remember the value for reference after free_initmem() */
   4.515 +#ifdef CONFIG_ITANIUM
   4.516 +	has_8259 = 1; /* Firmware on old Itanium systems is broken */
   4.517 +#else
   4.518 +	has_8259 = acpi_madt->flags.pcat_compat;
   4.519 +#endif
   4.520 +	iosapic_system_init(has_8259);
   4.521 +
   4.522 +	/* Get base address of IPI Message Block */
   4.523 +
   4.524 +	if (acpi_madt->lapic_address)
   4.525 +		ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0);
   4.526 +
   4.527 +	printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
   4.528 +
   4.529 +	acpi_madt_oem_check(acpi_madt->header.oem_id,
   4.530 +		acpi_madt->header.oem_table_id);
   4.531  
   4.532  	return 0;
   4.533  }
   4.534 +#endif
   4.535  
   4.536 +#ifdef CONFIG_ACPI_NUMA
   4.537 +
   4.538 +#undef SLIT_DEBUG
   4.539 +
   4.540 +#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
   4.541 +
   4.542 +static int __initdata srat_num_cpus;			/* number of cpus */
   4.543 +static u32 __initdata pxm_flag[PXM_FLAG_LEN];
   4.544 +#define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
   4.545 +#define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
   4.546 +/* maps to convert between proximity domain and logical node ID */
   4.547 +int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
   4.548 +int __initdata nid_to_pxm_map[MAX_NUMNODES];
   4.549 +static struct acpi_table_slit __initdata *slit_table;
   4.550 +
   4.551 +/*
   4.552 + * ACPI 2.0 SLIT (System Locality Information Table)
   4.553 + * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
   4.554 + */
   4.555 +void __init
   4.556 +acpi_numa_slit_init (struct acpi_table_slit *slit)
   4.557 +{
   4.558 +	u32 len;
   4.559 +
   4.560 +	len = sizeof(struct acpi_table_header) + 8
   4.561 +		+ slit->localities * slit->localities;
   4.562 +	if (slit->header.length != len) {
   4.563 +		printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
   4.564 +		       len, slit->header.length);
   4.565 +		memset(numa_slit, 10, sizeof(numa_slit));
   4.566 +		return;
   4.567 +	}
   4.568 +	slit_table = slit;
   4.569 +}
   4.570 +
   4.571 +void __init
   4.572 +acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
   4.573 +{
   4.574 +	/* record this node in proximity bitmap */
   4.575 +	pxm_bit_set(pa->proximity_domain);
   4.576 +
   4.577 +	node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
   4.578 +	/* nid should be overridden as logical node id later */
   4.579 +	node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
   4.580 +	srat_num_cpus++;
   4.581 +}
   4.582 +
   4.583 +void __init
   4.584 +acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
   4.585 +{
   4.586 +	unsigned long paddr, size;
   4.587 +	u8 pxm;
   4.588 +	struct node_memblk_s *p, *q, *pend;
   4.589 +
   4.590 +	pxm = ma->proximity_domain;
   4.591 +
   4.592 +	/* fill node memory chunk structure */
   4.593 +	paddr = ma->base_addr_hi;
   4.594 +	paddr = (paddr << 32) | ma->base_addr_lo;
   4.595 +	size = ma->length_hi;
   4.596 +	size = (size << 32) | ma->length_lo;
   4.597 +
   4.598 +	/* Ignore disabled entries */
   4.599 +	if (!ma->flags.enabled)
   4.600 +		return;
   4.601 +
   4.602 +	/* record this node in proximity bitmap */
   4.603 +	pxm_bit_set(pxm);
   4.604 +
   4.605 +	/* Insertion sort based on base address */
   4.606 +	pend = &node_memblk[num_node_memblks];
   4.607 +	for (p = &node_memblk[0]; p < pend; p++) {
   4.608 +		if (paddr < p->start_paddr)
   4.609 +			break;
   4.610 +	}
   4.611 +	if (p < pend) {
   4.612 +		for (q = pend - 1; q >= p; q--)
   4.613 +			*(q + 1) = *q;
   4.614 +	}
   4.615 +	p->start_paddr = paddr;
   4.616 +	p->size = size;
   4.617 +	p->nid = pxm;
   4.618 +	num_node_memblks++;
   4.619 +}
   4.620 +
   4.621 +void __init
   4.622 +acpi_numa_arch_fixup (void)
   4.623 +{
   4.624 +	int i, j, node_from, node_to;
   4.625 +
   4.626 +	/* If there's no SRAT, fix the phys_id */
   4.627 +	if (srat_num_cpus == 0) {
   4.628 +		node_cpuid[0].phys_id = hard_smp_processor_id();
   4.629 +		return;
   4.630 +	}
   4.631 +
   4.632 +	/* calculate total number of nodes in system from PXM bitmap */
   4.633 +	numnodes = 0;		/* init total nodes in system */
   4.634 +
   4.635 +	memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
   4.636 +	memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
   4.637 +	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
   4.638 +		if (pxm_bit_test(i)) {
   4.639 +			pxm_to_nid_map[i] = numnodes;
   4.640 +			node_set_online(numnodes);
   4.641 +			nid_to_pxm_map[numnodes++] = i;
   4.642 +		}
   4.643 +	}
   4.644 +
   4.645 +	/* set logical node id in memory chunk structure */
   4.646 +	for (i = 0; i < num_node_memblks; i++)
   4.647 +		node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
   4.648 +
   4.649 +	/* assign memory bank numbers for each chunk on each node */
   4.650 +	for (i = 0; i < numnodes; i++) {
   4.651 +		int bank;
   4.652 +
   4.653 +		bank = 0;
   4.654 +		for (j = 0; j < num_node_memblks; j++)
   4.655 +			if (node_memblk[j].nid == i)
   4.656 +				node_memblk[j].bank = bank++;
   4.657 +	}
   4.658 +
   4.659 +	/* set logical node id in cpu structure */
   4.660 +	for (i = 0; i < srat_num_cpus; i++)
   4.661 +		node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
   4.662 +
   4.663 +	printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes);
   4.664 +	printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks);
   4.665 +
   4.666 +	if (!slit_table) return;
   4.667 +	memset(numa_slit, -1, sizeof(numa_slit));
   4.668 +	for (i=0; i<slit_table->localities; i++) {
   4.669 +		if (!pxm_bit_test(i))
   4.670 +			continue;
   4.671 +		node_from = pxm_to_nid_map[i];
   4.672 +		for (j=0; j<slit_table->localities; j++) {
   4.673 +			if (!pxm_bit_test(j))
   4.674 +				continue;
   4.675 +			node_to = pxm_to_nid_map[j];
   4.676 +			node_distance(node_from, node_to) =
   4.677 +				slit_table->entry[i*slit_table->localities + j];
   4.678 +		}
   4.679 +	}
   4.680 +
   4.681 +#ifdef SLIT_DEBUG
   4.682 +	printk("ACPI 2.0 SLIT locality table:\n");
   4.683 +	for (i = 0; i < numnodes; i++) {
   4.684 +		for (j = 0; j < numnodes; j++)
   4.685 +			printk("%03d ", node_distance(i,j));
   4.686 +		printk("\n");
   4.687 +	}
   4.688 +#endif
   4.689 +}
   4.690 +#endif /* CONFIG_ACPI_NUMA */
   4.691 +
   4.692 +#if 0
   4.693 +unsigned int
   4.694 +acpi_register_gsi (u32 gsi, int polarity, int trigger)
   4.695 +{
   4.696 +	return acpi_register_irq(gsi, polarity, trigger);
   4.697 +}
   4.698 +EXPORT_SYMBOL(acpi_register_gsi);
   4.699 +static int __init
   4.700 +acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
   4.701 +{
   4.702 +	struct acpi_table_header *fadt_header;
   4.703 +	struct fadt_descriptor_rev2 *fadt;
   4.704 +
   4.705 +	if (!phys_addr || !size)
   4.706 +		return -EINVAL;
   4.707 +
   4.708 +	fadt_header = (struct acpi_table_header *) __va(phys_addr);
   4.709 +	if (fadt_header->revision != 3)
   4.710 +		return -ENODEV;		/* Only deal with ACPI 2.0 FADT */
   4.711 +
   4.712 +	fadt = (struct fadt_descriptor_rev2 *) fadt_header;
   4.713 +
   4.714 +	if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
   4.715 +		acpi_kbd_controller_present = 0;
   4.716 +
   4.717 +	if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
   4.718 +		acpi_legacy_devices = 1;
   4.719 +
   4.720 +	acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
   4.721 +	return 0;
   4.722 +}
   4.723 +#endif
   4.724  
   4.725  unsigned long __init
   4.726  acpi_find_rsdp (void)
   4.727  {
   4.728 -	unsigned long		rsdp_phys = 0;
   4.729 +	unsigned long rsdp_phys = 0;
   4.730  
   4.731 -	/*
   4.732 -	 * Scan memory looking for the RSDP signature. First search EBDA (low
   4.733 -	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
   4.734 -	 */
   4.735 -	rsdp_phys = acpi_scan_rsdp (0, 0x400);
   4.736 -	if (!rsdp_phys)
   4.737 -		rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF);
   4.738 -
   4.739 +	if (efi.acpi20)
   4.740 +		rsdp_phys = __pa(efi.acpi20);
   4.741 +	else if (efi.acpi)
   4.742 +		printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
   4.743  	return rsdp_phys;
   4.744  }
   4.745  
   4.746 -
   4.747 -/*
   4.748 - * acpi_boot_init()
   4.749 - *  called from setup_arch(), always.
   4.750 - *	1. maps ACPI tables for later use
   4.751 - *	2. enumerates lapics
   4.752 - *	3. enumerates io-apics
   4.753 - *
   4.754 - * side effects:
   4.755 - * 	acpi_lapic = 1 if LAPIC found
   4.756 - *	acpi_ioapic = 1 if IOAPIC found
   4.757 - *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
   4.758 - *	if acpi_blacklisted() acpi_disabled = 1;
   4.759 - *	acpi_irq_model=...
   4.760 - *	...
   4.761 - *
   4.762 - * return value: (currently ignored)
   4.763 - *	0: success
   4.764 - *	!0: failure
   4.765 - */
   4.766 +#if 0
   4.767  int __init
   4.768  acpi_boot_init (void)
   4.769  {
   4.770 -	int			result = 0;
   4.771 -
   4.772 -	if (acpi_disabled && !acpi_ht)
   4.773 -		return(1);
   4.774  
   4.775  	/*
   4.776 -	 * The default interrupt routing model is PIC (8259).  This gets
   4.777 -	 * overriden if IOAPICs are enumerated (below).
   4.778 -	 */
   4.779 -	acpi_irq_model = ACPI_IRQ_MODEL_PIC;
   4.780 -
   4.781 -	/* 
   4.782 -	 * Initialize the ACPI boot-time table parser.
   4.783 -	 */
   4.784 -	result = acpi_table_init();
   4.785 -	if (result) {
   4.786 -#ifndef XEN
   4.787 -// hack for now, FIXME later
   4.788 -		acpi_disabled = 1;
   4.789 -#endif
   4.790 -		return result;
   4.791 -	}
   4.792 -
   4.793 -	result = acpi_blacklisted();
   4.794 -	if (result) {
   4.795 -		printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
   4.796 -#ifndef XEN
   4.797 -// hack for now, FIXME later
   4.798 -		acpi_disabled = 1;
   4.799 -#endif
   4.800 -		return result;
   4.801 -	}
   4.802 -
   4.803 -#ifdef CONFIG_X86_LOCAL_APIC
   4.804 -
   4.805 -	/* 
   4.806  	 * MADT
   4.807  	 * ----
   4.808  	 * Parse the Multiple APIC Description Table (MADT), if exists.
   4.809 -	 * Note that this table provides platform SMP configuration 
   4.810 +	 * Note that this table provides platform SMP configuration
   4.811  	 * information -- the successor to MPS tables.
   4.812  	 */
   4.813  
   4.814 -	result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
   4.815 -	if (!result) {
   4.816 -		return 0;
   4.817 -	}
   4.818 -	else if (result < 0) {
   4.819 -		printk(KERN_ERR PREFIX "Error parsing MADT\n");
   4.820 -		return result;
   4.821 -	}
   4.822 -	else if (result > 1) 
   4.823 -		printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n");
   4.824 -
   4.825 -	/* 
   4.826 -	 * Local APIC
   4.827 -	 * ----------
   4.828 -	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
   4.829 -	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
   4.830 -	 */
   4.831 -
   4.832 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
   4.833 -	if (result < 0) {
   4.834 -		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
   4.835 -		return result;
   4.836 +	if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
   4.837 +		printk(KERN_ERR PREFIX "Can't find MADT\n");
   4.838 +		goto skip_madt;
   4.839  	}
   4.840  
   4.841 -	mp_register_lapic_address(acpi_lapic_addr);
   4.842 -
   4.843 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic);
   4.844 -	if (!result) { 
   4.845 -		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
   4.846 -		/* TBD: Cleanup to allow fallback to MPS */
   4.847 -		return -ENODEV;
   4.848 -	}
   4.849 -	else if (result < 0) {
   4.850 -		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
   4.851 -		/* TBD: Cleanup to allow fallback to MPS */
   4.852 -		return result;
   4.853 -	}
   4.854 +	/* Local APIC */
   4.855  
   4.856 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
   4.857 -	if (result < 0) {
   4.858 -		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   4.859 -		/* TBD: Cleanup to allow fallback to MPS */
   4.860 -		return result;
   4.861 -	}
   4.862 +	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
   4.863 +		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
   4.864  
   4.865 -	acpi_lapic = 1;
   4.866 -
   4.867 -#endif /*CONFIG_X86_LOCAL_APIC*/
   4.868 +	if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1)
   4.869 +		printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
   4.870  
   4.871 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   4.872 +	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0)
   4.873 +		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   4.874  
   4.875 -	/* 
   4.876 -	 * I/O APIC 
   4.877 -	 * --------
   4.878 -	 */
   4.879 +	/* I/O APIC */
   4.880 +
   4.881 +	if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
   4.882 +		printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
   4.883 +
   4.884 +	/* System-Level Interrupt Routing */
   4.885 +
   4.886 +	if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
   4.887 +		printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
   4.888 +
   4.889 +	if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
   4.890 +		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
   4.891 +
   4.892 +	if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
   4.893 +		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   4.894 +  skip_madt:
   4.895  
   4.896  	/*
   4.897 -	 * ACPI interpreter is required to complete interrupt setup,
   4.898 -	 * so if it is off, don't enumerate the io-apics with ACPI.
   4.899 -	 * If MPS is present, it will handle them,
   4.900 -	 * otherwise the system will stay in PIC mode
   4.901 -	 */
   4.902 -	if (acpi_disabled || acpi_noirq) {
   4.903 -		return 1;
   4.904 -	}
   4.905 -
   4.906 -	/*
   4.907 -	 * if "noapic" boot option, don't look for IO-APICs
   4.908 +	 * FADT says whether a legacy keyboard controller is present.
   4.909 +	 * The FADT also contains an SCI_INT line, by which the system
   4.910 +	 * gets interrupts such as power and sleep buttons.  If it's not
   4.911 +	 * on a Legacy interrupt, it needs to be setup.
   4.912  	 */
   4.913 -	if (ioapic_setup_disabled()) {
   4.914 -		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
   4.915 -			"due to 'noapic' option.\n");
   4.916 -		return 1;
   4.917 -        }
   4.918 -
   4.919 +	if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
   4.920 +		printk(KERN_ERR PREFIX "Can't find FADT\n");
   4.921  
   4.922 -	result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
   4.923 -	if (!result) { 
   4.924 -		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
   4.925 -		return -ENODEV;
   4.926 -	}
   4.927 -	else if (result < 0) {
   4.928 -		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
   4.929 -		return result;
   4.930 +#ifdef CONFIG_SMP
   4.931 +	if (available_cpus == 0) {
   4.932 +		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
   4.933 +		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
   4.934 +		smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
   4.935 +		available_cpus = 1; /* We've got at least one of these, no? */
   4.936  	}
   4.937 -
   4.938 -	/* Build a default routing table for legacy (ISA) interrupts. */
   4.939 -	mp_config_acpi_legacy_irqs();
   4.940 -
   4.941 -	result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
   4.942 -	if (result < 0) {
   4.943 -		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
   4.944 -		/* TBD: Cleanup to allow fallback to MPS */
   4.945 -		return result;
   4.946 -	}
   4.947 +	smp_boot_data.cpu_count = available_cpus;
   4.948  
   4.949 -	result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
   4.950 -	if (result < 0) {
   4.951 -		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   4.952 -		/* TBD: Cleanup to allow fallback to MPS */
   4.953 -		return result;
   4.954 +	smp_build_cpu_map();
   4.955 +# ifdef CONFIG_ACPI_NUMA
   4.956 +	if (srat_num_cpus == 0) {
   4.957 +		int cpu, i = 1;
   4.958 +		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
   4.959 +			if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
   4.960 +				node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
   4.961  	}
   4.962 -
   4.963 -	acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
   4.964 +	build_cpu_to_node_map();
   4.965 +# endif
   4.966 +#endif
   4.967 +	/* Make boot-up look pretty */
   4.968 +	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
   4.969 +	return 0;
   4.970 +}
   4.971 +int
   4.972 +acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
   4.973 +{
   4.974 +	int vector;
   4.975  
   4.976 -	acpi_irq_balance_set(NULL);
   4.977 -
   4.978 -	acpi_ioapic = 1;
   4.979 +	if (has_8259 && gsi < 16)
   4.980 +		*irq = isa_irq_to_vector(gsi);
   4.981 +	else {
   4.982 +		vector = gsi_to_vector(gsi);
   4.983 +		if (vector == -1)
   4.984 +			return -1;
   4.985  
   4.986 -	if (acpi_lapic && acpi_ioapic)
   4.987 -		smp_found_config = 1;
   4.988 -
   4.989 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   4.990 -
   4.991 +		*irq = vector;
   4.992 +	}
   4.993  	return 0;
   4.994  }
   4.995  
   4.996 -#endif /*CONFIG_ACPI_BOOT*/
   4.997 -
   4.998 -#ifdef	CONFIG_ACPI_BUS
   4.999 -/*
  4.1000 - * "acpi_pic_sci=level" (current default)
  4.1001 - * programs the PIC-mode SCI to Level Trigger.
  4.1002 - * (NO-OP if the BIOS set Level Trigger already)
  4.1003 - *
  4.1004 - * If a PIC-mode SCI is not recogznied or gives spurious IRQ7's
  4.1005 - * it may require Edge Trigger -- use "acpi_pic_sci=edge"
  4.1006 - * (NO-OP if the BIOS set Edge Trigger already)
  4.1007 - *
  4.1008 - * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
  4.1009 - * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
  4.1010 - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
  4.1011 - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
  4.1012 - */
  4.1013 -
  4.1014 -static __initdata int	acpi_pic_sci_trigger;	/* 0: level, 1: edge */
  4.1015 -
  4.1016 -void __init
  4.1017 -acpi_pic_sci_set_trigger(unsigned int irq)
  4.1018 +int
  4.1019 +acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
  4.1020  {
  4.1021 -	unsigned char mask = 1 << (irq & 7);
  4.1022 -	unsigned int port = 0x4d0 + (irq >> 3);
  4.1023 -	unsigned char val = inb(port);
  4.1024 -
  4.1025 -	
  4.1026 -	printk(PREFIX "IRQ%d SCI:", irq);
  4.1027 -	if (!(val & mask)) {
  4.1028 -		printk(" Edge");
  4.1029 -
  4.1030 -		if (!acpi_pic_sci_trigger) {
  4.1031 -			printk(" set to Level");
  4.1032 -			outb(val | mask, port);
  4.1033 -		}
  4.1034 -	} else {
  4.1035 -		printk(" Level");
  4.1036 -
  4.1037 -		if (acpi_pic_sci_trigger) {
  4.1038 -			printk(" set to Edge");
  4.1039 -			outb(val | mask, port);
  4.1040 -		}
  4.1041 -	}
  4.1042 -	printk(" Trigger.\n");
  4.1043 -}
  4.1044 -
  4.1045 -int __init
  4.1046 -acpi_pic_sci_setup(char *str)
  4.1047 -{
  4.1048 -	while (str && *str) {
  4.1049 -		if (strncmp(str, "level", 5) == 0)
  4.1050 -			acpi_pic_sci_trigger = 0;	/* force level trigger */
  4.1051 -		if (strncmp(str, "edge", 4) == 0)
  4.1052 -			acpi_pic_sci_trigger = 1;	/* force edge trigger */
  4.1053 -		str = strchr(str, ',');
  4.1054 -		if (str)
  4.1055 -			str += strspn(str, ", \t");
  4.1056 -	}
  4.1057 -	return 1;
  4.1058 -}
  4.1059 -
  4.1060 -__setup("acpi_pic_sci=", acpi_pic_sci_setup);
  4.1061 -
  4.1062 -#endif /* CONFIG_ACPI_BUS */
  4.1063 -
  4.1064 -
  4.1065 -
  4.1066 -/* --------------------------------------------------------------------------
  4.1067 -                              Low-Level Sleep Support
  4.1068 -   -------------------------------------------------------------------------- */
  4.1069 -
  4.1070 -#ifdef CONFIG_ACPI_SLEEP
  4.1071 -
  4.1072 -#define DEBUG
  4.1073 -
  4.1074 -#ifdef DEBUG
  4.1075 -#include <xen/serial.h>
  4.1076 -#endif
  4.1077 -
  4.1078 -/* address in low memory of the wakeup routine. */
  4.1079 -unsigned long acpi_wakeup_address = 0;
  4.1080 -
  4.1081 -/* new page directory that we will be using */
  4.1082 -static pmd_t *pmd;
  4.1083 -
  4.1084 -/* saved page directory */
  4.1085 -static pmd_t saved_pmd;
  4.1086 -
  4.1087 -/* page which we'll use for the new page directory */
  4.1088 -static pte_t *ptep;
  4.1089 -
  4.1090 -extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
  4.1091 +	if (has_8259 && gsi < 16)
  4.1092 +		return isa_irq_to_vector(gsi);
  4.1093  
  4.1094 -/*
  4.1095 - * acpi_create_identity_pmd
  4.1096 - *
  4.1097 - * Create a new, identity mapped pmd.
  4.1098 - *
  4.1099 - * Do this by creating new page directory, and marking all the pages as R/W
  4.1100 - * Then set it as the new Page Middle Directory.
  4.1101 - * And, of course, flush the TLB so it takes effect.
  4.1102 - *
  4.1103 - * We save the address of the old one, for later restoration.
  4.1104 - */
  4.1105 -static void acpi_create_identity_pmd (void)
  4.1106 -{
  4.1107 -	pgd_t *pgd;
  4.1108 -	int i;
  4.1109 -
  4.1110 -	ptep = (pte_t*)__get_free_page(GFP_KERNEL);
  4.1111 -
  4.1112 -	/* fill page with low mapping */
  4.1113 -	for (i = 0; i < PTRS_PER_PTE; i++)
  4.1114 -		set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
  4.1115 -
  4.1116 -	pgd = pgd_offset(current->active_mm, 0);
  4.1117 -	pmd = pmd_alloc(current->mm,pgd, 0);
  4.1118 -
  4.1119 -	/* save the old pmd */
  4.1120 -	saved_pmd = *pmd;
  4.1121 -
  4.1122 -	/* set the new one */
  4.1123 -	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep)));
  4.1124 -
  4.1125 -	/* flush the TLB */
  4.1126 -	local_flush_tlb();
  4.1127 -}
  4.1128 -
  4.1129 -/*
  4.1130 - * acpi_restore_pmd
  4.1131 - *
  4.1132 - * Restore the old pmd saved by acpi_create_identity_pmd and
  4.1133 - * free the page that said function alloc'd
  4.1134 - */
  4.1135 -static void acpi_restore_pmd (void)
  4.1136 -{
  4.1137 -	set_pmd(pmd, saved_pmd);
  4.1138 -	local_flush_tlb();
  4.1139 -	free_page((unsigned long)ptep);
  4.1140 +	return iosapic_register_intr(gsi,
  4.1141 +			(polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
  4.1142 +			(trigger == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
  4.1143  }
  4.1144 -
  4.1145 -/**
  4.1146 - * acpi_save_state_mem - save kernel state
  4.1147 - *
  4.1148 - * Create an identity mapped page table and copy the wakeup routine to
  4.1149 - * low memory.
  4.1150 - */
  4.1151 -int acpi_save_state_mem (void)
  4.1152 -{
  4.1153 -	acpi_create_identity_pmd();
  4.1154 -	acpi_copy_wakeup_routine(acpi_wakeup_address);
  4.1155 -
  4.1156 -	return 0;
  4.1157 -}
  4.1158 -
  4.1159 -/**
  4.1160 - * acpi_save_state_disk - save kernel state to disk
  4.1161 - *
  4.1162 - */
  4.1163 -int acpi_save_state_disk (void)
  4.1164 -{
  4.1165 -	return 1;
  4.1166 -}
  4.1167 -
  4.1168 -/*
  4.1169 - * acpi_restore_state
  4.1170 - */
  4.1171 -void acpi_restore_state_mem (void)
  4.1172 -{
  4.1173 -	acpi_restore_pmd();
  4.1174 -}
  4.1175 -
  4.1176 -/**
  4.1177 - * acpi_reserve_bootmem - do _very_ early ACPI initialisation
  4.1178 - *
  4.1179 - * We allocate a page in low memory for the wakeup
  4.1180 - * routine for when we come back from a sleep state. The
  4.1181 - * runtime allocator allows specification of <16M pages, but not
  4.1182 - * <1M pages.
  4.1183 - */
  4.1184 -void __init acpi_reserve_bootmem(void)
  4.1185 -{
  4.1186 -	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
  4.1187 -	printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
  4.1188 -}
  4.1189 -
  4.1190 -void do_suspend_lowlevel_s4bios(int resume)
  4.1191 -{
  4.1192 -	if (!resume) {
  4.1193 -		save_processor_context();
  4.1194 -		acpi_save_register_state((unsigned long)&&acpi_sleep_done);
  4.1195 -		acpi_enter_sleep_state_s4bios();
  4.1196 -		return;
  4.1197 -	}
  4.1198 -acpi_sleep_done:
  4.1199 -	restore_processor_context();
  4.1200 -}
  4.1201 -
  4.1202 -
  4.1203 -#endif /*CONFIG_ACPI_SLEEP*/
  4.1204 -
  4.1205 +EXPORT_SYMBOL(acpi_register_irq);
  4.1206 +#endif
  4.1207 +#endif /* CONFIG_ACPI_BOOT */
     5.1 --- a/xen/arch/ia64/asm-offsets.c	Thu May 19 21:22:49 2005 +0000
     5.2 +++ b/xen/arch/ia64/asm-offsets.c	Fri May 20 17:23:51 2005 +0000
     5.3 @@ -9,6 +9,9 @@
     5.4  #include <asm/processor.h>
     5.5  #include <asm/ptrace.h>
     5.6  #include <public/xen.h>
     5.7 +#ifdef CONFIG_VTI
     5.8 +#include <asm/tlb.h>
     5.9 +#endif // CONFIG_VTI
    5.10  
    5.11  #define task_struct exec_domain
    5.12  
    5.13 @@ -93,6 +96,24 @@ void foo(void)
    5.14  	DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
    5.15  	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
    5.16  	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
    5.17 +#ifdef CONFIG_VTI
    5.18 +	DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct xen_regs, r4));
    5.19 +	DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct xen_regs, r5));
    5.20 +	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct xen_regs, r6));
    5.21 +	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct xen_regs, r7));
    5.22 +	DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct xen_regs, cr_iipa));
    5.23 +	DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr));
    5.24 +	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs, eml_unat));
    5.25 +	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs, rfi_pfs));
    5.26 +	DEFINE(RFI_IIP_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_iip));
    5.27 +	DEFINE(RFI_IPSR_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_ipsr));
    5.28 +	DEFINE(RFI_IFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_ifs));
    5.29 +	DEFINE(RFI_PFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_pfs));
    5.30 +	DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr5));
    5.31 +	DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr6));
    5.32 +	DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr7));
    5.33 +	DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mpta));
    5.34 +#endif  //CONFIG_VTI
    5.35  	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
    5.36  	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
    5.37  	DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
    5.38 @@ -164,6 +185,13 @@ void foo(void)
    5.39  
    5.40  	BLANK();
    5.41  
    5.42 +#ifdef  CONFIG_VTI
    5.43 +	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain, arch.arch_vmx.vpd));
    5.44 +	DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
    5.45 +	DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
    5.46 +
    5.47 +	BLANK();
    5.48 +#endif  //CONFIG_VTI
    5.49  	//DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
    5.50  	//DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
    5.51  	//DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
     6.1 --- a/xen/arch/ia64/dom_fw.c	Thu May 19 21:22:49 2005 +0000
     6.2 +++ b/xen/arch/ia64/dom_fw.c	Fri May 20 17:23:51 2005 +0000
     6.3 @@ -13,6 +13,7 @@
     6.4  #include <asm/io.h>
     6.5  #include <asm/pal.h>
     6.6  #include <asm/sal.h>
     6.7 +#include <xen/acpi.h>
     6.8  
     6.9  #include <asm/dom_fw.h>
    6.10  
    6.11 @@ -297,6 +298,71 @@ void print_md(efi_memory_desc_t *md)
    6.12  #endif
    6.13  }
    6.14  
    6.15 +#define LSAPIC_NUM 16	// TEMP
    6.16 +static u32 lsapic_flag=1;
    6.17 +
    6.18 +/* Provide only one LP to guest */
    6.19 +static int 
    6.20 +acpi_update_lsapic (acpi_table_entry_header *header)
    6.21 +{
    6.22 +	struct acpi_table_lsapic *lsapic;
    6.23 +
    6.24 +	lsapic = (struct acpi_table_lsapic *) header;
    6.25 +	if (!lsapic)
    6.26 +		return -EINVAL;
    6.27 +
    6.28 +	if (lsapic->flags.enabled && lsapic_flag) {
    6.29 +		printk("enable lsapic entry: 0x%lx\n", (u64)lsapic);
    6.30 +		lsapic_flag = 0; /* disable all the following processros */
    6.31 +	} else if (lsapic->flags.enabled) {
    6.32 +		printk("DISABLE lsapic entry: 0x%lx\n", (u64)lsapic);
    6.33 +		lsapic->flags.enabled = 0;
    6.34 +	} else
    6.35 +		printk("lsapic entry is already disabled: 0x%lx\n", (u64)lsapic);
    6.36 +
    6.37 +	return 0;
    6.38 +}
    6.39 +
    6.40 +static int
    6.41 +acpi_update_madt_checksum (unsigned long phys_addr, unsigned long size)
    6.42 +{
    6.43 +	u8 checksum=0;
    6.44 +    	u8* ptr;
    6.45 +	int len;
    6.46 +	struct acpi_table_madt* acpi_madt;
    6.47 +
    6.48 +	if (!phys_addr || !size)
    6.49 +		return -EINVAL;
    6.50 +
    6.51 +	acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
    6.52 +	acpi_madt->header.checksum=0;
    6.53 +
    6.54 +    	/* re-calculate MADT checksum */
    6.55 +	ptr = (u8*)acpi_madt;
    6.56 +    	len = acpi_madt->header.length;
    6.57 +	while (len>0){
    6.58 +		checksum = (u8)( checksum + (*ptr++) );
    6.59 +		len--;
    6.60 +	}
    6.61 +    	acpi_madt->header.checksum = 0x0 - checksum;	
    6.62 +	
    6.63 +	return 0;
    6.64 +}
    6.65 +
    6.66 +/* base is physical address of acpi table */
    6.67 +void touch_acpi_table(void)
    6.68 +{
    6.69 +	u64 count = 0;
    6.70 +	count = acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, NR_CPUS);
    6.71 +	if ( count < 1)
    6.72 +		printk("Error parsing MADT - no LAPIC entires\n");
    6.73 +	printk("Total %d lsapic entry\n", count);
    6.74 +	acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
    6.75 +
    6.76 +	return;
    6.77 +}
    6.78 +
    6.79 +
    6.80  struct ia64_boot_param *
    6.81  dom_fw_init (struct domain *d, char *args, int arglen, char *fw_mem, int fw_mem_size)
    6.82  {
    6.83 @@ -414,6 +480,9 @@ dom_fw_init (struct domain *d, char *arg
    6.84  			printf(" MPS=%0xlx",efi_tables[i].table);
    6.85  			i++;
    6.86  		}
    6.87 +
    6.88 +		touch_acpi_table();
    6.89 +
    6.90  		if (efi.acpi20) {
    6.91  			efi_tables[i].guid = ACPI_20_TABLE_GUID;
    6.92  			efi_tables[i].table = __pa(efi.acpi20);
     7.1 --- a/xen/arch/ia64/domain.c	Thu May 19 21:22:49 2005 +0000
     7.2 +++ b/xen/arch/ia64/domain.c	Fri May 20 17:23:51 2005 +0000
     7.3 @@ -3,6 +3,11 @@
     7.4   *
     7.5   *  Pentium III FXSR, SSE support
     7.6   *	Gareth Hughes <gareth@valinux.com>, May 2000
     7.7 + *
     7.8 + *  Copyright (C) 2005 Intel Co
     7.9 + *	Kun Tian (Kevin Tian) <kevin.tian@intel.com>
    7.10 + *
    7.11 + * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add CONFIG_VTI domain support
    7.12   */
    7.13  
    7.14  #include <xen/config.h>
    7.15 @@ -32,12 +37,22 @@
    7.16  #include <asm/asm-offsets.h>  /* for IA64_THREAD_INFO_SIZE */
    7.17  
    7.18  #include <asm/vcpu.h>   /* for function declarations */
    7.19 +#ifdef CONFIG_VTI
    7.20 +#include <asm/vmx.h>
    7.21 +#include <asm/vmx_vcpu.h>
    7.22 +#endif // CONFIG_VTI
    7.23  
    7.24  #define CONFIG_DOMAIN0_CONTIGUOUS
    7.25  unsigned long dom0_start = -1L;
    7.26 +#ifdef CONFIG_VTI
    7.27  unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
    7.28  //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    7.29 +unsigned long dom0_align = 256*1024*1024;
    7.30 +#else // CONFIG_VTI
    7.31 +unsigned long dom0_size = 256*1024*1024; //FIXME: Should be configurable
    7.32 +//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    7.33  unsigned long dom0_align = 64*1024*1024;
    7.34 +#endif // CONFIG_VTI
    7.35  #ifdef DOMU_BUILD_STAGING
    7.36  unsigned long domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
    7.37  unsigned long domU_staging_start;
    7.38 @@ -151,6 +166,58 @@ void arch_free_exec_domain_struct(struct
    7.39  	free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
    7.40  }
    7.41  
    7.42 +#ifdef CONFIG_VTI
    7.43 +void arch_do_createdomain(struct exec_domain *ed)
    7.44 +{
    7.45 +	struct domain *d = ed->domain;
    7.46 +	struct thread_info *ti = alloc_thread_info(ed);
    7.47 +
    7.48 +	/* If domain is VMX domain, shared info area is created
    7.49 +	 * by domain and then domain notifies HV by specific hypercall.
    7.50 +	 * If domain is xenolinux, shared info area is created by
    7.51 +	 * HV.
    7.52 +	 * Since we have no idea about whether domain is VMX now,
    7.53 +	 * (dom0 when parse and domN when build), postpone possible
    7.54 +	 * allocation.
    7.55 +	 */
    7.56 +
    7.57 +	/* FIXME: Because full virtual cpu info is placed in this area,
    7.58 +	 * it's unlikely to put it into one shareinfo page. Later
    7.59 +	 * need split vcpu context from vcpu_info and conforms to
    7.60 +	 * normal xen convention.
    7.61 +	 */
    7.62 +	d->shared_info = NULL;
    7.63 +	ed->vcpu_info = (void *)alloc_xenheap_page();
    7.64 +	if (!ed->vcpu_info) {
    7.65 +   		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
    7.66 +   		while (1);
    7.67 +	}
    7.68 +	memset(ed->vcpu_info, 0, PAGE_SIZE);
    7.69 +
    7.70 +	/* Clear thread_info to clear some important fields, like preempt_count */
    7.71 +	memset(ti, 0, sizeof(struct thread_info));
    7.72 +
    7.73 +	/* Allocate per-domain vTLB and vhpt */
    7.74 +	ed->arch.vtlb = init_domain_tlb(ed);
    7.75 +
    7.76 +	/* Physical->machine page table will be allocated when 
    7.77 +	 * final setup, since we have no the maximum pfn number in 
    7.78 +	 * this stage
    7.79 +	 */
    7.80 +
    7.81 +	/* FIXME: This is identity mapped address for xenheap. 
    7.82 +	 * Do we need it at all?
    7.83 +	 */
    7.84 +	d->xen_vastart = 0xf000000000000000;
    7.85 +	d->xen_vaend = 0xf300000000000000;
    7.86 +	d->breakimm = 0x1000;
    7.87 +
    7.88 +	// stay on kernel stack because may get interrupts!
    7.89 +	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    7.90 +	// to user stack
    7.91 +	ed->arch._thread.on_ustack = 0;
    7.92 +}
    7.93 +#else // CONFIG_VTI
    7.94  void arch_do_createdomain(struct exec_domain *ed)
    7.95  {
    7.96  	struct domain *d = ed->domain;
    7.97 @@ -193,6 +260,7 @@ void arch_do_createdomain(struct exec_do
    7.98  	// to user stack
    7.99  	ed->arch._thread.on_ustack = 0;
   7.100  }
   7.101 +#endif // CONFIG_VTI
   7.102  
   7.103  void arch_do_boot_vcpu(struct exec_domain *p)
   7.104  {
   7.105 @@ -216,6 +284,70 @@ void domain_relinquish_resources(struct 
   7.106  	dummy();
   7.107  }
   7.108  
   7.109 +#ifdef CONFIG_VTI
   7.110 +void new_thread(struct exec_domain *ed,
   7.111 +                unsigned long start_pc,
   7.112 +                unsigned long start_stack,
   7.113 +                unsigned long start_info)
   7.114 +{
   7.115 +	struct domain *d = ed->domain;
   7.116 +	struct switch_stack *sw;
   7.117 +	struct xen_regs *regs;
   7.118 +	struct ia64_boot_param *bp;
   7.119 +	extern char ia64_ret_from_clone;
   7.120 +	extern char saved_command_line[];
   7.121 +	//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
   7.122 +
   7.123 +
   7.124 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   7.125 +	if (d == dom0) start_pc += dom0_start;
   7.126 +#endif
   7.127 +	regs = (struct xen_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
   7.128 +	sw = (struct switch_stack *) regs - 1;
   7.129 +	/* Sanity Clear */
   7.130 +	memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
   7.131 +
   7.132 +	if (VMX_DOMAIN(ed)) {
   7.133 +		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
   7.134 +		regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
   7.135 +	} else {
   7.136 +		regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   7.137 +			| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
   7.138 +			& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
   7.139 +		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   7.140 +	}
   7.141 +	regs->cr_iip = start_pc;
   7.142 +	regs->ar_rsc = 0x0;
   7.143 +	regs->cr_ifs = 0x0;
   7.144 +	regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
   7.145 +	sw->ar_bspstore = (unsigned long)ed + IA64_RBS_OFFSET;
   7.146 +	printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   7.147 +		ed,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
   7.148 +	printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
   7.149 +
   7.150 +	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   7.151 +	ed->arch._thread.ksp = (unsigned long) sw - 16;
   7.152 +	printk("new_thread, about to call init_all_rr\n");
   7.153 +	if (VMX_DOMAIN(ed)) {
   7.154 +		vmx_init_all_rr(ed);
   7.155 +	} else
   7.156 +		init_all_rr(ed);
   7.157 +	// set up boot parameters (and fake firmware)
   7.158 +	printk("new_thread, about to call dom_fw_setup\n");
   7.159 +	VMX_VPD(ed,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   7.160 +	printk("new_thread, done with dom_fw_setup\n");
   7.161 +
   7.162 +	if (VMX_DOMAIN(ed)) {
   7.163 +		/* Virtual processor context setup */
   7.164 +		VMX_VPD(ed, vpsr) = IA64_PSR_BN;
   7.165 +		VPD_CR(ed, dcr) = 0;
   7.166 +	} else {
   7.167 +		// don't forget to set this!
   7.168 +		ed->vcpu_info->arch.banknum = 1;
   7.169 +	}
   7.170 +}
   7.171 +#else // CONFIG_VTI
   7.172 +
   7.173  // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
   7.174  // and linux/arch/ia64/kernel/process.c:kernel_thread()
   7.175  void new_thread(struct exec_domain *ed,
   7.176 @@ -272,6 +404,7 @@ printk("new_thread, done with dom_fw_set
   7.177  	// don't forget to set this!
   7.178  	ed->vcpu_info->arch.banknum = 1;
   7.179  }
   7.180 +#endif // CONFIG_VTI
   7.181  
   7.182  static struct page * map_new_domain0_page(unsigned long mpaddr)
   7.183  {
   7.184 @@ -599,6 +732,214 @@ domU_staging_write_32(unsigned long at, 
   7.185  }
   7.186  #endif
   7.187  
   7.188 +#ifdef CONFIG_VTI
   7.189 +/* Up to whether domain is vmx one, different context may be setup
   7.190 + * here.
   7.191 + */
   7.192 +void
   7.193 +post_arch_do_create_domain(struct exec_domain *ed, int vmx_domain)
   7.194 +{
   7.195 +    struct domain *d = ed->domain;
   7.196 +
   7.197 +    if (!vmx_domain) {
   7.198 +	d->shared_info = (void*)alloc_xenheap_page();
   7.199 +	if (!d->shared_info)
   7.200 +		panic("Allocate share info for non-vmx domain failed.\n");
   7.201 +	d->shared_info_va = 0xfffd000000000000;
   7.202 +
   7.203 +	printk("Build shared info for non-vmx domain\n");
   7.204 +	build_shared_info(d);
   7.205 +	/* Setup start info area */
   7.206 +    }
   7.207 +}
   7.208 +
   7.209 +/* For VMX domain, this is invoked when kernel model in domain
   7.210 + * request actively
   7.211 + */
   7.212 +void build_shared_info(struct domain *d)
   7.213 +{
   7.214 +    int i;
   7.215 +
   7.216 +    /* Set up shared-info area. */
   7.217 +    update_dom_time(d);
   7.218 +    d->shared_info->domain_time = 0;
   7.219 +
   7.220 +    /* Mask all upcalls... */
   7.221 +    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   7.222 +        d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   7.223 +
   7.224 +    /* ... */
   7.225 +}
   7.226 +
   7.227 +extern unsigned long running_on_sim;
   7.228 +unsigned int vmx_dom0 = 0;
   7.229 +int construct_dom0(struct domain *d, 
   7.230 +	               unsigned long image_start, unsigned long image_len, 
   7.231 +	               unsigned long initrd_start, unsigned long initrd_len,
   7.232 +	               char *cmdline)
   7.233 +{
   7.234 +    char *dst;
   7.235 +    int i, rc;
   7.236 +    unsigned long pfn, mfn;
   7.237 +    unsigned long nr_pt_pages;
   7.238 +    unsigned long count;
   7.239 +    unsigned long alloc_start, alloc_end;
   7.240 +    struct pfn_info *page = NULL;
   7.241 +    start_info_t *si;
   7.242 +    struct exec_domain *ed = d->exec_domain[0];
   7.243 +    struct domain_setup_info dsi;
   7.244 +    unsigned long p_start;
   7.245 +    unsigned long pkern_start;
   7.246 +    unsigned long pkern_entry;
   7.247 +    unsigned long pkern_end;
   7.248 +
   7.249 +//printf("construct_dom0: starting\n");
   7.250 +    /* Sanity! */
   7.251 +#ifndef CLONE_DOMAIN0
   7.252 +    if ( d != dom0 ) 
   7.253 +        BUG();
   7.254 +    if ( test_bit(DF_CONSTRUCTED, &d->flags) ) 
   7.255 +        BUG();
   7.256 +#endif
   7.257 +
   7.258 +    printk("##Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   7.259 +    memset(&dsi, 0, sizeof(struct domain_setup_info));
   7.260 +
   7.261 +    printk("*** LOADING DOMAIN 0 ***\n");
   7.262 +
   7.263 +    alloc_start = dom0_start;
   7.264 +    alloc_end = dom0_start + dom0_size;
   7.265 +    d->tot_pages = d->max_pages = (alloc_end - alloc_start)/PAGE_SIZE;
   7.266 +    image_start = __va(ia64_boot_param->initrd_start);
   7.267 +    image_len = ia64_boot_param->initrd_size;
   7.268 +
   7.269 +    dsi.image_addr = (unsigned long)image_start;
   7.270 +    dsi.image_len  = image_len;
   7.271 +    rc = parseelfimage(&dsi);
   7.272 +    if ( rc != 0 )
   7.273 +        return rc;
   7.274 +
   7.275 +    /* Temp workaround */
   7.276 +    if (running_on_sim)
   7.277 +	dsi.xen_elf_image = 1;
   7.278 +
   7.279 +    if ((!vmx_enabled) && !dsi.xen_elf_image) {
   7.280 +	printk("Lack of hardware support for unmodified vmx dom0\n");
   7.281 +	panic("");
   7.282 +    }
   7.283 +
   7.284 +    if (vmx_enabled && !dsi.xen_elf_image) {
   7.285 +	printk("Dom0 is vmx domain!\n");
   7.286 +	vmx_dom0 = 1;
   7.287 +    }
   7.288 +
   7.289 +    p_start = dsi.v_start;
   7.290 +    pkern_start = dsi.v_kernstart;
   7.291 +    pkern_end = dsi.v_kernend;
   7.292 +    pkern_entry = dsi.v_kernentry;
   7.293 +
   7.294 +    printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",
   7.295 +	p_start,pkern_start,pkern_end,pkern_entry);
   7.296 +
   7.297 +    if ( (p_start & (PAGE_SIZE-1)) != 0 )
   7.298 +    {
   7.299 +        printk("Initial guest OS must load to a page boundary.\n");
   7.300 +        return -EINVAL;
   7.301 +    }
   7.302 +
   7.303 +    printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
   7.304 +           " Kernel image:  %lx->%lx\n"
   7.305 +           " Entry address: %lx\n"
   7.306 +           " Init. ramdisk:   (NOT IMPLEMENTED YET)\n",
   7.307 +           pkern_start, pkern_end, pkern_entry);
   7.308 +
   7.309 +    if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
   7.310 +    {
   7.311 +        printk("Initial guest OS requires too much space\n"
   7.312 +               "(%luMB is greater than %luMB limit)\n",
   7.313 +               (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
   7.314 +        return -ENOMEM;
   7.315 +    }
   7.316 +
   7.317 +    // Other sanity check about Dom0 image
   7.318 +
   7.319 +    /* Construct a frame-allocation list for the initial domain, since these
   7.320 +     * pages are allocated by boot allocator and pfns are not set properly
   7.321 +     */
   7.322 +    for ( mfn = (alloc_start>>PAGE_SHIFT); 
   7.323 +          mfn < (alloc_end>>PAGE_SHIFT); 
   7.324 +          mfn++ )
   7.325 +    {
   7.326 +        page = &frame_table[mfn];
   7.327 +        page_set_owner(page, d);
   7.328 +        page->u.inuse.type_info = 0;
   7.329 +        page->count_info        = PGC_allocated | 1;
   7.330 +        list_add_tail(&page->list, &d->page_list);
   7.331 +
   7.332 +	/* Construct 1:1 mapping */
   7.333 +	machine_to_phys_mapping[mfn] = mfn;
   7.334 +    }
   7.335 +
   7.336 +    post_arch_do_create_domain(ed, vmx_dom0);
   7.337 +
   7.338 +    /* Load Dom0 image to its own memory */
   7.339 +    loaddomainelfimage(d,image_start);
   7.340 +
   7.341 +    /* Copy the initial ramdisk. */
   7.342 +
   7.343 +    /* Sync d/i cache conservatively */
   7.344 +    {
   7.345 +        unsigned long ret;
   7.346 +        unsigned long progress;
   7.347 +        ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   7.348 +        if (ret != PAL_STATUS_SUCCESS)
   7.349 +                panic("PAL CACHE FLUSH failed for dom0.\n");
   7.350 +        printk("Sync i/d cache for dom0 image SUCC\n");
   7.351 +    }
   7.352 +    /* Physical mode emulation initialization, including
   7.353 +     * emulation ID allcation and related memory request
   7.354 +     */
   7.355 +    physical_mode_init(ed);
   7.356 +    /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
   7.357 +     * for dom0
   7.358 +     */
   7.359 +    d->arch.pmt = NULL;
   7.360 +
   7.361 +    /* Give up the VGA console if DOM0 is configured to grab it. */
   7.362 +    if (cmdline != NULL)
   7.363 +    	console_endboot(strstr(cmdline, "tty0") != NULL);
   7.364 +
   7.365 +    /* VMX specific construction for Dom0, if hardware supports VMX
   7.366 +     * and Dom0 is unmodified image
   7.367 +     */
   7.368 +    printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   7.369 +    if (vmx_dom0)
   7.370 +	vmx_final_setup_domain(dom0);
   7.371 +    
   7.372 +    /* vpd is ready now */
   7.373 +    vlsapic_reset(ed);
   7.374 +    vtm_init(ed);
   7.375 +    set_bit(DF_CONSTRUCTED, &d->flags);
   7.376 +
   7.377 +    new_thread(ed, pkern_entry, 0, 0);
   7.378 +
   7.379 +    // FIXME: Hack for keyboard input
   7.380 +#ifdef CLONE_DOMAIN0
   7.381 +if (d == dom0)
   7.382 +#endif
   7.383 +    serial_input_init();
   7.384 +    if (d == dom0) {
   7.385 +    	ed->vcpu_info->arch.delivery_mask[0] = -1L;
   7.386 +    	ed->vcpu_info->arch.delivery_mask[1] = -1L;
   7.387 +    	ed->vcpu_info->arch.delivery_mask[2] = -1L;
   7.388 +    	ed->vcpu_info->arch.delivery_mask[3] = -1L;
   7.389 +    }
   7.390 +    else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
   7.391 +
   7.392 +    return 0;
   7.393 +}
   7.394 +#else //CONFIG_VTI
   7.395 +
   7.396  int construct_dom0(struct domain *d, 
   7.397  	               unsigned long image_start, unsigned long image_len, 
   7.398  	               unsigned long initrd_start, unsigned long initrd_len,
   7.399 @@ -771,6 +1112,7 @@ if (d == dom0)
   7.400  
   7.401  	return 0;
   7.402  }
   7.403 +#endif // CONFIG_VTI
   7.404  
   7.405  // FIXME: When dom0 can construct domains, this goes away (or is rewritten)
   7.406  int construct_domU(struct domain *d,
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/ia64/mmio.c	Fri May 20 17:23:51 2005 +0000
     8.3 @@ -0,0 +1,325 @@
     8.4 +
     8.5 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     8.6 +/*
     8.7 + * mmio.c: MMIO emulation components.
     8.8 + * Copyright (c) 2004, Intel Corporation.
     8.9 + *
    8.10 + * This program is free software; you can redistribute it and/or modify it
    8.11 + * under the terms and conditions of the GNU General Public License,
    8.12 + * version 2, as published by the Free Software Foundation.
    8.13 + *
    8.14 + * This program is distributed in the hope it will be useful, but WITHOUT
    8.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    8.17 + * more details.
    8.18 + *
    8.19 + * You should have received a copy of the GNU General Public License along with
    8.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    8.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    8.22 + *
    8.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
    8.24 + *  Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
    8.25 + */
    8.26 +
    8.27 +#include <linux/sched.h>
    8.28 +#include <asm/tlb.h>
    8.29 +#include <asm/vmx_mm_def.h>
    8.30 +#include <asm/gcc_intrin.h>
    8.31 +#include <xen/interrupt.h>
    8.32 +#include <asm/vmx_vcpu.h>
    8.33 +
    8.34 +struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
    8.35 +{
    8.36 +    int     i;
    8.37 +    for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
    8.38 +        if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
    8.39 +            return &mio_base[i];
    8.40 +    }
    8.41 +    return NULL;
    8.42 +}
    8.43 +
    8.44 +
    8.45 +extern void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma);
    8.46 +static inline void mmio_write(VCPU *vcpu, void *src, u64 dest_pa, size_t s, int ma)
    8.47 +{
    8.48 +    struct virutal_platform_def *v_plat;
    8.49 +    struct mmio_list    *mio;
    8.50 +    
    8.51 +    v_plat = vmx_vcpu_get_plat(vcpu);
    8.52 +    mio = lookup_mmio(dest_pa, v_plat->mmio);
    8.53 +    if ( mio == NULL ) 
    8.54 +        panic ("Wrong address for MMIO\n");
    8.55 +    
    8.56 +    switch (mio->iot) {
    8.57 +    case PIB_MMIO:
    8.58 +        pib_write(vcpu, src, dest_pa - v_plat->pib_base, s, ma);
    8.59 +        break;
    8.60 +    case VGA_BUFF:
    8.61 +    case CHIPSET_IO:
    8.62 +    case LOW_MMIO:
    8.63 +    case LEGACY_IO:
    8.64 +    case IO_SAPIC:
    8.65 +    default:
    8.66 +        break;
    8.67 +    }
    8.68 +    return;
    8.69 +}
    8.70 +
    8.71 +static inline void mmio_read(VCPU *vcpu, u64 src_pa, void *dest, size_t s, int ma)
    8.72 +{
    8.73 +    struct virutal_platform_def *v_plat;
    8.74 +    struct mmio_list    *mio;
    8.75 +    
    8.76 +    v_plat = vmx_vcpu_get_plat(vcpu);
    8.77 +    mio = lookup_mmio(src_pa, v_plat->mmio);
    8.78 +    if ( mio == NULL ) 
    8.79 +        panic ("Wrong address for MMIO\n");
    8.80 +    
    8.81 +    switch (mio->iot) {
    8.82 +    case PIB_MMIO:
    8.83 +        pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    8.84 +        break;
    8.85 +    case VGA_BUFF:
    8.86 +    case CHIPSET_IO:
    8.87 +    case LOW_MMIO:
    8.88 +    case LEGACY_IO:
    8.89 +    case IO_SAPIC:
    8.90 +    default:
    8.91 +        break;
    8.92 +    }
    8.93 +    return;
    8.94 +}
    8.95 +
    8.96 +/*
    8.97 + * Read or write data in guest virtual address mode.
    8.98 + */
    8.99 + 
   8.100 +void
   8.101 +memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
   8.102 +{
   8.103 +    uint64_t pa;
   8.104 +
   8.105 +    if (!vtlb->nomap)
   8.106 +        panic("Normal memory write shouldn't go to this point!");
   8.107 +    pa = PPN_2_PA(vtlb->ppn);
   8.108 +    pa += POFFSET((u64)dest, vtlb->ps);
   8.109 +    mmio_write (vcpu, src, pa, s, vtlb->ma);
   8.110 +}
   8.111 +
   8.112 +
   8.113 +void
   8.114 +memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s)
   8.115 +{
   8.116 +    uint64_t pa = (uint64_t)dest;
   8.117 +    int    ma;
   8.118 +
   8.119 +    if ( pa & (1UL <<63) ) {
   8.120 +        // UC
   8.121 +        ma = 4;
   8.122 +        pa <<=1; 
   8.123 +        pa >>=1;
   8.124 +    } 
   8.125 +    else {
   8.126 +        // WBL
   8.127 +        ma = 0;     // using WB for WBL
   8.128 +    }
   8.129 +    mmio_write (vcpu, src, pa, s, ma);
   8.130 +}
   8.131 +
   8.132 +void
   8.133 +memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
   8.134 +{
   8.135 +    uint64_t pa;
   8.136 +
   8.137 +    if (!vtlb->nomap)
   8.138 +        panic("Normal memory write shouldn't go to this point!");
   8.139 +    pa = PPN_2_PA(vtlb->ppn);
   8.140 +    pa += POFFSET((u64)src, vtlb->ps);
   8.141 +    
   8.142 +    mmio_read(vcpu, pa, dest, s, vtlb->ma);
   8.143 +}
   8.144 +
   8.145 +void
   8.146 +memread_p(VCPU *vcpu, void *src, void *dest, size_t s)
   8.147 +{
   8.148 +    uint64_t pa = (uint64_t)src;
   8.149 +    int    ma;
   8.150 +
   8.151 +    if ( pa & (1UL <<63) ) {
   8.152 +        // UC
   8.153 +        ma = 4;
   8.154 +        pa <<=1; 
   8.155 +        pa >>=1;
   8.156 +    } 
   8.157 +    else {
   8.158 +        // WBL
   8.159 +        ma = 0;     // using WB for WBL
   8.160 +    }
   8.161 +    mmio_read(vcpu, pa, dest, s, ma);
   8.162 +}
   8.163 +
   8.164 +#define	PIB_LOW_HALF(ofst)	!(ofst&(1<<20))
   8.165 +#define PIB_OFST_INTA           0x1E0000
   8.166 +#define PIB_OFST_XTP            0x1E0008
   8.167 +
   8.168 +
   8.169 +/*
   8.170 + * Deliver IPI message. (Only U-VP is supported now)
   8.171 + *  offset: address offset to IPI space.
   8.172 + *  value:  deliver value.
   8.173 + */
   8.174 +static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
   8.175 +{
   8.176 +#ifdef  IPI_DEBUG
   8.177 +  printf ("deliver_ipi %lx %lx\n",dm,vector);
   8.178 +#endif
   8.179 +    switch ( dm ) {
   8.180 +    case 0:     // INT
   8.181 +        vmx_vcpu_pend_interrupt (vcpu, vector);
   8.182 +        break;
   8.183 +    case 2:     // PMI
   8.184 +        // TODO -- inject guest PMI
   8.185 +        panic ("Inject guest PMI!\n");
   8.186 +        break;
   8.187 +    case 4:     // NMI
   8.188 +        vmx_vcpu_pend_interrupt (vcpu, 2);     
   8.189 +        break;
   8.190 +    case 5:     // INIT
   8.191 +        // TODO -- inject guest INIT
   8.192 +        panic ("Inject guest INIT!\n");
   8.193 +        break;
   8.194 +    case 7:     // ExtINT
   8.195 +        vmx_vcpu_pend_interrupt (vcpu, 0);     
   8.196 +        break;
   8.197 +        
   8.198 +    case 1:
   8.199 +    case 3:
   8.200 +    case 6:
   8.201 +    default:
   8.202 +        panic ("Deliver reserved IPI!\n");
   8.203 +        break;
   8.204 +    }   
   8.205 +}
   8.206 +
   8.207 +/*
   8.208 + * TODO: Use hash table for the lookup.
   8.209 + */
   8.210 +static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
   8.211 +{
   8.212 +	int   i;
   8.213 +	VCPU  *vcpu;
   8.214 +	LID	  lid;
   8.215 +	
   8.216 +	for (i=0; i<MAX_VIRT_CPUS; i++) {
   8.217 +		vcpu = d->exec_domain[i];
   8.218 +		lid.val = VPD_CR(vcpu, lid);
   8.219 +		if ( lid.id == id && lid.eid == eid ) {
   8.220 +		    return vcpu;
   8.221 +		}
   8.222 +	}
   8.223 +	return NULL;
   8.224 +}
   8.225 +
   8.226 +/*
   8.227 + * execute write IPI op.
   8.228 + */
   8.229 +static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
   8.230 +{
   8.231 +    VCPU   *target_cpu;
   8.232 +    
   8.233 +    target_cpu = lid_2_vcpu(vcpu->domain, 
   8.234 +    				((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   8.235 +    if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
   8.236 +    if ( target_cpu == vcpu ) {
   8.237 +    	// IPI to self
   8.238 +        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
   8.239 +                ((ipi_d_t)value).vector);
   8.240 +        return 1;
   8.241 +    }
   8.242 +    else {
   8.243 +    	// TODO: send Host IPI to inject guest SMP IPI interruption
   8.244 +        panic ("No SM-VP supported!\n");
   8.245 +        return 0;
   8.246 +    }
   8.247 +}
   8.248 +
   8.249 +void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
   8.250 +{
   8.251 +    
   8.252 +    switch (pib_off) {
   8.253 +    case PIB_OFST_INTA:
   8.254 +        panic("Undefined write on PIB INTA\n");
   8.255 +        break;
   8.256 +    case PIB_OFST_XTP:
   8.257 +        if ( s == 1 && ma == 4 /* UC */) {
   8.258 +            vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
   8.259 +        }
   8.260 +        else {
   8.261 +            panic("Undefined write on PIB XTP\n");
   8.262 +        }
   8.263 +        break;
   8.264 +    default:
   8.265 +        if ( PIB_LOW_HALF(pib_off) ) {   // lower half
   8.266 +            if ( s != 8 || ma != 0x4 /* UC */ ) {
   8.267 +                panic("Undefined IPI-LHF write!\n");
   8.268 +            }
   8.269 +            else {
   8.270 +                write_ipi(vcpu, pib_off, *(uint64_t *)src);
   8.271 +                // TODO for SM-VP
   8.272 +            }
   8.273 +        }
   8.274 +        else {      // upper half
   8.275 +            printf("IPI-UHF write %lx\n",pib_off);
   8.276 +            panic("Not support yet for SM-VP\n");
   8.277 +        }
   8.278 +        break;
   8.279 +    }
   8.280 +}
   8.281 +
   8.282 +void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
   8.283 +{
   8.284 +    switch (pib_off) {
   8.285 +    case PIB_OFST_INTA:
   8.286 +        // todo --- emit on processor system bus.
   8.287 +        if ( s == 1 && ma == 4) { // 1 byte load
   8.288 +            // TODO: INTA read from IOSAPIC
   8.289 +        }
   8.290 +        else {
   8.291 +            panic("Undefined read on PIB INTA\n");
   8.292 +        }
   8.293 +        break;
   8.294 +    case PIB_OFST_XTP:
   8.295 +        if ( s == 1 && ma == 4) {
   8.296 +            *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
   8.297 +        }
   8.298 +        else {
   8.299 +            panic("Undefined read on PIB XTP\n");
   8.300 +        }
   8.301 +        break;
   8.302 +    default:
   8.303 +        if ( PIB_LOW_HALF(pib_off) ) {   // lower half
   8.304 +            if ( s != 8 || ma != 4 ) {
   8.305 +                panic("Undefined IPI-LHF read!\n");
   8.306 +            }
   8.307 +            else {
   8.308 +#ifdef  IPI_DEBUG
   8.309 +                printf("IPI-LHF read %lx\n",pib_off);
   8.310 +#endif
   8.311 +                *(uint64_t *)dest = 0;  // TODO for SM-VP
   8.312 +            }
   8.313 +        }
   8.314 +        else {      // upper half
   8.315 +            if ( s != 1 || ma != 4 ) {
   8.316 +                panic("Undefined PIB-UHF read!\n");
   8.317 +            }
   8.318 +            else {
   8.319 +#ifdef  IPI_DEBUG
   8.320 +                printf("IPI-UHF read %lx\n",pib_off);
   8.321 +#endif
   8.322 +                *(uint8_t *)dest = 0;   // TODO for SM-VP
   8.323 +            }
   8.324 +        }
   8.325 +        break;
   8.326 +    }
   8.327 +}
   8.328 +
     9.1 --- a/xen/arch/ia64/patch/linux-2.6.11/entry.S	Thu May 19 21:22:49 2005 +0000
     9.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.S	Fri May 20 17:23:51 2005 +0000
     9.3 @@ -1,5 +1,5 @@
     9.4 ---- ../../linux-2.6.11/arch/ia64/kernel/entry.S	2005-03-02 00:37:50.000000000 -0700
     9.5 -+++ arch/ia64/entry.S	2005-04-29 14:54:13.000000000 -0600
     9.6 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.S	2005-03-01 23:37:50.000000000 -0800
     9.7 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/entry.S	2005-05-18 12:40:51.000000000 -0700
     9.8  @@ -46,6 +46,7 @@
     9.9   
    9.10   #include "minstate.h"
    9.11 @@ -73,19 +73,23 @@
    9.12   
    9.13   GLOBAL_ENTRY(ia64_ret_from_clone)
    9.14   	PT_REGS_UNWIND_INFO(0)
    9.15 -@@ -604,6 +626,11 @@
    9.16 +@@ -604,6 +626,15 @@
    9.17   	 */
    9.18   	br.call.sptk.many rp=ia64_invoke_schedule_tail
    9.19   }
    9.20  +#ifdef XEN
    9.21  +	// new domains are cloned but not exec'ed so switch to user mode here
    9.22  +	cmp.ne pKStk,pUStk=r0,r0
    9.23 ++#ifdef CONFIG_VTI
    9.24 ++	br.cond.spnt ia64_leave_hypervisor
    9.25 ++#else // CONFIG_VTI
    9.26  +	br.cond.spnt ia64_leave_kernel
    9.27 ++#endif // CONFIG_VTI
    9.28  +#else
    9.29   .ret8:
    9.30   	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
    9.31   	;;
    9.32 -@@ -614,6 +641,7 @@
    9.33 +@@ -614,6 +645,7 @@
    9.34   	;;
    9.35   	cmp.ne p6,p0=r2,r0
    9.36   (p6)	br.cond.spnt .strace_check_retval
    9.37 @@ -93,7 +97,7 @@
    9.38   	;;					// added stop bits to prevent r8 dependency
    9.39   END(ia64_ret_from_clone)
    9.40   	// fall through
    9.41 -@@ -700,19 +728,25 @@
    9.42 +@@ -700,19 +732,25 @@
    9.43   .work_processed_syscall:
    9.44   	adds r2=PT(LOADRS)+16,r12
    9.45   	adds r3=PT(AR_BSPSTORE)+16,r12
    9.46 @@ -119,7 +123,7 @@
    9.47   	;;
    9.48   	// start restoring the state saved on the kernel stack (struct pt_regs):
    9.49   	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
    9.50 -@@ -757,7 +791,11 @@
    9.51 +@@ -757,7 +795,11 @@
    9.52   	;;
    9.53   	ld8.fill r12=[r2]	// restore r12 (sp)
    9.54   	ld8.fill r15=[r3]	// restore r15
    9.55 @@ -131,7 +135,7 @@
    9.56   	;;
    9.57   (pUStk)	ld4 r3=[r3]		// r3 = cpu_data->phys_stacked_size_p8
    9.58   (pUStk) st1 [r14]=r17
    9.59 -@@ -814,9 +852,18 @@
    9.60 +@@ -814,9 +856,18 @@
    9.61   (pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
    9.62   #endif
    9.63   .work_processed_kernel:
    9.64 @@ -150,7 +154,19 @@
    9.65   	adds r21=PT(PR)+16,r12
    9.66   	;;
    9.67   
    9.68 -@@ -838,7 +885,9 @@
    9.69 +@@ -828,17 +879,20 @@
    9.70 + 	ld8 r28=[r2],8		// load b6
    9.71 + 	adds r29=PT(R24)+16,r12
    9.72 + 
    9.73 +-	ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
    9.74 ++	ld8.fill r16=[r3]
    9.75 + 	adds r30=PT(AR_CCV)+16,r12
    9.76 + (p6)	and r19=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
    9.77 + 	;;
    9.78 ++	adds r3=PT(AR_CSD)-PT(R16),r3
    9.79 + 	ld8.fill r24=[r29]
    9.80 + 	ld8 r15=[r30]		// load ar.ccv
    9.81 + (p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?
    9.82   	;;
    9.83   	ld8 r29=[r2],16		// load b7
    9.84   	ld8 r30=[r3],16		// load ar.csd
    9.85 @@ -160,7 +176,7 @@
    9.86   	;;
    9.87   	ld8 r31=[r2],16		// load ar.ssd
    9.88   	ld8.fill r8=[r3],16
    9.89 -@@ -934,7 +983,11 @@
    9.90 +@@ -934,7 +988,11 @@
    9.91   	shr.u r18=r19,16	// get byte size of existing "dirty" partition
    9.92   	;;
    9.93   	mov r16=ar.bsp		// get existing backing store pointer
    9.94 @@ -172,7 +188,7 @@
    9.95   	;;
    9.96   	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
    9.97   (pKStk)	br.cond.dpnt skip_rbs_switch
    9.98 -@@ -1069,6 +1122,7 @@
    9.99 +@@ -1069,6 +1127,7 @@
   9.100   	mov pr=r31,-1		// I0
   9.101   	rfi			// B
   9.102   
   9.103 @@ -180,7 +196,7 @@
   9.104   	/*
   9.105   	 * On entry:
   9.106   	 *	r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
   9.107 -@@ -1130,6 +1184,7 @@
   9.108 +@@ -1130,6 +1189,7 @@
   9.109   	ld8 r8=[r2]
   9.110   	ld8 r10=[r3]
   9.111   	br.cond.sptk.many .work_processed_syscall	// re-check
   9.112 @@ -188,7 +204,7 @@
   9.113   
   9.114   END(ia64_leave_kernel)
   9.115   
   9.116 -@@ -1166,6 +1221,7 @@
   9.117 +@@ -1166,6 +1226,7 @@
   9.118   	br.ret.sptk.many rp
   9.119   END(ia64_invoke_schedule_tail)
   9.120   
   9.121 @@ -196,7 +212,7 @@
   9.122   	/*
   9.123   	 * Setup stack and call do_notify_resume_user().  Note that pSys and pNonSys need to
   9.124   	 * be set up by the caller.  We declare 8 input registers so the system call
   9.125 -@@ -1264,6 +1320,7 @@
   9.126 +@@ -1264,6 +1325,7 @@
   9.127   	mov ar.unat=r9
   9.128   	br.many b7
   9.129   END(sys_rt_sigreturn)
   9.130 @@ -204,7 +220,7 @@
   9.131   
   9.132   GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
   9.133   	.prologue
   9.134 -@@ -1278,6 +1335,7 @@
   9.135 +@@ -1278,6 +1340,7 @@
   9.136   	br.cond.sptk.many rp				// goes to ia64_leave_kernel
   9.137   END(ia64_prepare_handle_unaligned)
   9.138   
   9.139 @@ -212,7 +228,7 @@
   9.140   	//
   9.141   	// unw_init_running(void (*callback)(info, arg), void *arg)
   9.142   	//
   9.143 -@@ -1585,3 +1643,4 @@
   9.144 +@@ -1585,3 +1648,4 @@
   9.145   	data8 sys_ni_syscall
   9.146   
   9.147   	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.h	Fri May 20 17:23:51 2005 +0000
    10.3 @@ -0,0 +1,37 @@
    10.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/arch/ia64/kernel/entry.h	2005-03-01 23:38:07.000000000 -0800
    10.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/arch/ia64/entry.h	2005-05-18 14:00:53.000000000 -0700
    10.6 +@@ -7,6 +7,12 @@
    10.7 + #define PRED_LEAVE_SYSCALL	1 /* TRUE iff leave from syscall */
    10.8 + #define PRED_KERNEL_STACK	2 /* returning to kernel-stacks? */
    10.9 + #define PRED_USER_STACK		3 /* returning to user-stacks? */
   10.10 ++#ifdef CONFIG_VTI
   10.11 ++#define PRED_EMUL		2 /* Need to save r4-r7 for inst emulation */
   10.12 ++#define PRED_NON_EMUL		3 /* No need to save r4-r7 for normal path */
   10.13 ++#define PRED_BN0		6 /* Guest is in bank 0 */
   10.14 ++#define PRED_BN1		7 /* Guest is in bank 1 */
   10.15 ++#endif // CONFIG_VTI
   10.16 + #define PRED_SYSCALL		4 /* inside a system call? */
   10.17 + #define PRED_NON_SYSCALL	5 /* complement of PRED_SYSCALL */
   10.18 + 
   10.19 +@@ -17,12 +23,21 @@
   10.20 + # define pLvSys		PASTE(p,PRED_LEAVE_SYSCALL)
   10.21 + # define pKStk		PASTE(p,PRED_KERNEL_STACK)
   10.22 + # define pUStk		PASTE(p,PRED_USER_STACK)
   10.23 ++#ifdef CONFIG_VTI
   10.24 ++# define pEml		PASTE(p,PRED_EMUL)
   10.25 ++# define pNonEml	PASTE(p,PRED_NON_EMUL)
   10.26 ++# define pBN0		PASTE(p,PRED_BN0)
   10.27 ++# define pBN1		PASTE(p,PRED_BN1)
   10.28 ++#endif // CONFIG_VTI
   10.29 + # define pSys		PASTE(p,PRED_SYSCALL)
   10.30 + # define pNonSys	PASTE(p,PRED_NON_SYSCALL)
   10.31 + #endif
   10.32 + 
   10.33 + #define PT(f)		(IA64_PT_REGS_##f##_OFFSET)
   10.34 + #define SW(f)		(IA64_SWITCH_STACK_##f##_OFFSET)
   10.35 ++#ifdef CONFIG_VTI
   10.36 ++#define VPD(f)      (VPD_##f##_START_OFFSET)
   10.37 ++#endif // CONFIG_VTI
   10.38 + 
   10.39 + #define PT_REGS_SAVES(off)			\
   10.40 + 	.unwabi 3, 'i';				\
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/gcc_intrin.h	Fri May 20 17:23:51 2005 +0000
    11.3 @@ -0,0 +1,69 @@
    11.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/gcc_intrin.h	2005-03-01 23:38:08.000000000 -0800
    11.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/gcc_intrin.h	2005-05-18 14:00:53.000000000 -0700
    11.6 +@@ -368,6 +368,66 @@
    11.7 + #define ia64_mf()	asm volatile ("mf" ::: "memory")
    11.8 + #define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
    11.9 + 
   11.10 ++#ifdef CONFIG_VTI
   11.11 ++/*
   11.12 ++ * Flushrs instruction stream.
   11.13 ++ */
   11.14 ++#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
   11.15 ++
   11.16 ++#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
   11.17 ++
   11.18 ++#define ia64_get_rsc()                          \
   11.19 ++({                                  \
   11.20 ++    unsigned long val;                     \
   11.21 ++    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
   11.22 ++    val;                               \
   11.23 ++})
   11.24 ++
   11.25 ++#define ia64_set_rsc(val)                       \
   11.26 ++    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
   11.27 ++
   11.28 ++#define ia64_get_bspstore()     \
   11.29 ++({                                  \
   11.30 ++    unsigned long val;                     \
   11.31 ++    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
   11.32 ++    val;                               \
   11.33 ++})
   11.34 ++
   11.35 ++#define ia64_set_bspstore(val)                       \
   11.36 ++    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
   11.37 ++
   11.38 ++#define ia64_get_rnat()     \
   11.39 ++({                                  \
   11.40 ++    unsigned long val;                     \
   11.41 ++    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
   11.42 ++    val;                               \
   11.43 ++})
   11.44 ++
   11.45 ++#define ia64_set_rnat(val)                       \
   11.46 ++    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
   11.47 ++
   11.48 ++#define ia64_ttag(addr)							\
   11.49 ++({										\
   11.50 ++	__u64 ia64_intri_res;							\
   11.51 ++	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
   11.52 ++	ia64_intri_res;								\
   11.53 ++})
   11.54 ++
   11.55 ++#define ia64_get_dcr()                          \
   11.56 ++({                                      \
   11.57 ++    __u64 result;                               \
   11.58 ++    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
   11.59 ++    result;                                 \
   11.60 ++})
   11.61 ++
   11.62 ++#define ia64_set_dcr(val)                           \
   11.63 ++({                                      \
   11.64 ++    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
   11.65 ++})
   11.66 ++
   11.67 ++#endif // CONFIG_VTI
   11.68 ++
   11.69 ++
   11.70 + #define ia64_invala() asm volatile ("invala" ::: "memory")
   11.71 + 
   11.72 + #define ia64_thash(addr)							\
    12.1 --- a/xen/arch/ia64/patch/linux-2.6.11/head.S	Thu May 19 21:22:49 2005 +0000
    12.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/head.S	Fri May 20 17:23:51 2005 +0000
    12.3 @@ -1,6 +1,62 @@
    12.4 ---- ../../linux-2.6.11/arch/ia64/kernel/head.S	2005-03-02 00:38:13.000000000 -0700
    12.5 -+++ arch/ia64/head.S	2005-04-28 10:51:19.000000000 -0600
    12.6 -@@ -187,7 +187,11 @@
    12.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/head.S	2005-03-01 23:38:13.000000000 -0800
    12.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/head.S	2005-05-18 12:40:50.000000000 -0700
    12.9 +@@ -76,21 +76,21 @@
   12.10 + 	 * We initialize all of them to prevent inadvertently assuming
   12.11 + 	 * something about the state of address translation early in boot.
   12.12 + 	 */
   12.13 +-	mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.14 ++	movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.15 + 	movl r7=(0<<61)
   12.16 +-	mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.17 ++	movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.18 + 	movl r9=(1<<61)
   12.19 +-	mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.20 ++	movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.21 + 	movl r11=(2<<61)
   12.22 +-	mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.23 ++	movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.24 + 	movl r13=(3<<61)
   12.25 +-	mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.26 ++	movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.27 + 	movl r15=(4<<61)
   12.28 +-	mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.29 ++	movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   12.30 + 	movl r17=(5<<61)
   12.31 +-	mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   12.32 ++	movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   12.33 + 	movl r19=(6<<61)
   12.34 +-	mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   12.35 ++	movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   12.36 + 	movl r21=(7<<61)
   12.37 + 	;;
   12.38 + 	mov rr[r7]=r6
   12.39 +@@ -129,8 +129,13 @@
   12.40 + 	/*
   12.41 + 	 * Switch into virtual mode:
   12.42 + 	 */
   12.43 ++#ifdef CONFIG_VTI
   12.44 ++	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
   12.45 ++		  |IA64_PSR_DI)
   12.46 ++#else // CONFIG_VTI
   12.47 + 	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
   12.48 + 		  |IA64_PSR_DI)
   12.49 ++#endif // CONFIG_VTI
   12.50 + 	;;
   12.51 + 	mov cr.ipsr=r16
   12.52 + 	movl r17=1f
   12.53 +@@ -143,7 +148,11 @@
   12.54 + 1:	// now we are in virtual mode
   12.55 + 
   12.56 + 	// set IVT entry point---can't access I/O ports without it
   12.57 ++#ifdef CONFIG_VTI
   12.58 ++    movl r3=vmx_ia64_ivt
   12.59 ++#else // CONFIG_VTI
   12.60 + 	movl r3=ia64_ivt
   12.61 ++#endif // CONFIG_VTI
   12.62 + 	;;
   12.63 + 	mov cr.iva=r3
   12.64 + 	movl r2=FPSR_DEFAULT
   12.65 +@@ -187,7 +196,11 @@
   12.66   	dep r18=0,r3,0,12
   12.67   	;;
   12.68   	or r18=r17,r18
   12.69 @@ -12,7 +68,23 @@
   12.70   	;;
   12.71   	mov r17=rr[r2]
   12.72   	shr.u r16=r3,IA64_GRANULE_SHIFT
   12.73 -@@ -227,7 +231,11 @@
   12.74 +@@ -207,8 +220,15 @@
   12.75 + 
   12.76 + .load_current:
   12.77 + 	// load the "current" pointer (r13) and ar.k6 with the current task
   12.78 ++#ifdef CONFIG_VTI
   12.79 ++	mov r21=r2		// virtual address
   12.80 ++	;;
   12.81 ++	bsw.1
   12.82 ++	;;
   12.83 ++#else // CONFIG_VTI
   12.84 + 	mov IA64_KR(CURRENT)=r2		// virtual address
   12.85 + 	mov IA64_KR(CURRENT_STACK)=r16
   12.86 ++#endif // CONFIG_VTI
   12.87 + 	mov r13=r2
   12.88 + 	/*
   12.89 + 	 * Reserve space at the top of the stack for "struct pt_regs".  Kernel threads
   12.90 +@@ -227,7 +247,11 @@
   12.91   	;;
   12.92   	mov ar.rsc=0x3		// place RSE in eager mode
   12.93   
   12.94 @@ -24,7 +96,7 @@
   12.95   (isBP)	movl r2=ia64_boot_param
   12.96   	;;
   12.97   (isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
   12.98 -@@ -254,7 +262,9 @@
   12.99 +@@ -254,7 +278,9 @@
  12.100   	br.call.sptk.many b0=console_print
  12.101   
  12.102   self:	hint @pause
  12.103 @@ -34,7 +106,7 @@
  12.104   END(_start)
  12.105   
  12.106   GLOBAL_ENTRY(ia64_save_debug_regs)
  12.107 -@@ -850,7 +860,11 @@
  12.108 +@@ -850,7 +876,11 @@
  12.109    * intermediate precision so that we can produce a full 64-bit result.
  12.110    */
  12.111   GLOBAL_ENTRY(sched_clock)
    13.1 --- a/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h	Thu May 19 21:22:49 2005 +0000
    13.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h	Fri May 20 17:23:51 2005 +0000
    13.3 @@ -1,10 +1,5 @@
    13.4 - hpsim_ssc.h |   19 +++++++++++++++++++
    13.5 - 1 files changed, 19 insertions(+)
    13.6 -
    13.7 -Index: linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h
    13.8 -===================================================================
    13.9 ---- linux-2.6.11.orig/arch/ia64/hp/sim/hpsim_ssc.h	2005-03-02 01:38:17.000000000 -0600
   13.10 -+++ linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h	2005-03-19 13:34:01.705520375 -0600
   13.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h	2005-03-01 23:38:17.000000000 -0800
   13.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/hpsim_ssc.h	2005-05-18 12:40:19.000000000 -0700
   13.13  @@ -33,4 +33,23 @@
   13.14    */
   13.15   extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ia64regs.h	Fri May 20 17:23:51 2005 +0000
    14.3 @@ -0,0 +1,38 @@
    14.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ia64regs.h	2005-03-01 23:38:07.000000000 -0800
    14.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ia64regs.h	2005-05-18 14:00:53.000000000 -0700
    14.6 +@@ -87,6 +87,35 @@
    14.7 + #define _IA64_REG_CR_LRR0	4176
    14.8 + #define _IA64_REG_CR_LRR1	4177
    14.9 + 
   14.10 ++#ifdef  CONFIG_VTI
   14.11 ++#define IA64_REG_CR_DCR   0
   14.12 ++#define IA64_REG_CR_ITM   1
   14.13 ++#define IA64_REG_CR_IVA   2
   14.14 ++#define IA64_REG_CR_PTA   8
   14.15 ++#define IA64_REG_CR_IPSR  16
   14.16 ++#define IA64_REG_CR_ISR   17
   14.17 ++#define IA64_REG_CR_IIP   19
   14.18 ++#define IA64_REG_CR_IFA   20
   14.19 ++#define IA64_REG_CR_ITIR  21
   14.20 ++#define IA64_REG_CR_IIPA  22
   14.21 ++#define IA64_REG_CR_IFS   23
   14.22 ++#define IA64_REG_CR_IIM   24
   14.23 ++#define IA64_REG_CR_IHA   25
   14.24 ++#define IA64_REG_CR_LID   64
   14.25 ++#define IA64_REG_CR_IVR   65
   14.26 ++#define IA64_REG_CR_TPR   66
   14.27 ++#define IA64_REG_CR_EOI   67
   14.28 ++#define IA64_REG_CR_IRR0  68
   14.29 ++#define IA64_REG_CR_IRR1  69
   14.30 ++#define IA64_REG_CR_IRR2  70
   14.31 ++#define IA64_REG_CR_IRR3  71
   14.32 ++#define IA64_REG_CR_ITV   72
   14.33 ++#define IA64_REG_CR_PMV   73
   14.34 ++#define IA64_REG_CR_CMCV  74
   14.35 ++#define IA64_REG_CR_LRR0  80
   14.36 ++#define IA64_REG_CR_LRR1  81
   14.37 ++#endif  //  CONFIG_VTI
   14.38 ++
   14.39 + /* Indirect Registers for getindreg() and setindreg() */
   14.40 + 
   14.41 + #define _IA64_REG_INDR_CPUID	9000	/* getindreg only */
    15.1 --- a/xen/arch/ia64/patch/linux-2.6.11/interrupt.h	Thu May 19 21:22:49 2005 +0000
    15.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/interrupt.h	Fri May 20 17:23:51 2005 +0000
    15.3 @@ -1,11 +1,6 @@
    15.4 - interrupt.h |    2 ++
    15.5 - 1 files changed, 2 insertions(+)
    15.6 -
    15.7 -Index: linux-2.6.11/include/linux/interrupt.h
    15.8 -===================================================================
    15.9 ---- linux-2.6.11.orig/include/linux/interrupt.h	2005-03-02 01:38:09.000000000 -0600
   15.10 -+++ linux-2.6.11/include/linux/interrupt.h	2005-03-19 13:41:00.739901125 -0600
   15.11 -@@ -33,6 +33,7 @@ typedef int irqreturn_t;
   15.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/linux/interrupt.h	2005-03-01 23:38:09.000000000 -0800
   15.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/linux/interrupt.h	2005-05-18 12:40:50.000000000 -0700
   15.14 +@@ -33,6 +33,7 @@
   15.15   #define IRQ_HANDLED	(1)
   15.16   #define IRQ_RETVAL(x)	((x) != 0)
   15.17   
   15.18 @@ -13,7 +8,7 @@ Index: linux-2.6.11/include/linux/interr
   15.19   struct irqaction {
   15.20   	irqreturn_t (*handler)(int, void *, struct pt_regs *);
   15.21   	unsigned long flags;
   15.22 -@@ -49,6 +50,7 @@ extern int request_irq(unsigned int,
   15.23 +@@ -49,6 +50,7 @@
   15.24   		       irqreturn_t (*handler)(int, void *, struct pt_regs *),
   15.25   		       unsigned long, const char *, void *);
   15.26   extern void free_irq(unsigned int, void *);
   15.27 @@ -21,3 +16,12 @@ Index: linux-2.6.11/include/linux/interr
   15.28   
   15.29   
   15.30   #ifdef CONFIG_GENERIC_HARDIRQS
   15.31 +@@ -121,7 +123,7 @@
   15.32 + };
   15.33 + 
   15.34 + asmlinkage void do_softirq(void);
   15.35 +-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   15.36 ++//extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   15.37 + extern void softirq_init(void);
   15.38 + #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
   15.39 + extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
    16.1 --- a/xen/arch/ia64/patch/linux-2.6.11/io.h	Thu May 19 21:22:49 2005 +0000
    16.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/io.h	Fri May 20 17:23:51 2005 +0000
    16.3 @@ -1,16 +1,11 @@
    16.4 - io.h |    4 ++++
    16.5 - 1 files changed, 4 insertions(+)
    16.6 -
    16.7 -Index: linux-2.6.11/include/asm-ia64/io.h
    16.8 -===================================================================
    16.9 ---- linux-2.6.11.orig/include/asm-ia64/io.h	2005-03-02 01:38:34.000000000 -0600
   16.10 -+++ linux-2.6.11/include/asm-ia64/io.h	2005-03-19 13:42:06.541900818 -0600
   16.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/io.h	2005-03-01 23:38:34.000000000 -0800
   16.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/io.h	2005-05-18 12:40:50.000000000 -0700
   16.13  @@ -23,7 +23,11 @@
   16.14   #define __SLOW_DOWN_IO	do { } while (0)
   16.15   #define SLOW_DOWN_IO	do { } while (0)
   16.16   
   16.17  +#ifdef XEN
   16.18 -+#define __IA64_UNCACHED_OFFSET	0xdffc000000000000UL	/* region 6 */
   16.19 ++#define __IA64_UNCACHED_OFFSET	0xd000000000000000UL	/* region 6 */
   16.20  +#else
   16.21   #define __IA64_UNCACHED_OFFSET	0xc000000000000000UL	/* region 6 */
   16.22  +#endif
    17.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Thu May 19 21:22:49 2005 +0000
    17.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Fri May 20 17:23:51 2005 +0000
    17.3 @@ -1,5 +1,5 @@
    17.4 ---- ../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c	2005-03-02 00:38:07.000000000 -0700
    17.5 -+++ arch/ia64/irq_ia64.c	2005-04-29 16:05:30.000000000 -0600
    17.6 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c	2005-03-01 23:38:07.000000000 -0800
    17.7 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/irq_ia64.c	2005-05-18 12:40:51.000000000 -0700
    17.8  @@ -106,6 +106,9 @@
    17.9   	unsigned long saved_tpr;
   17.10   
   17.11 @@ -20,3 +20,99 @@
   17.12   			__do_IRQ(local_vector_to_irq(vector), regs);
   17.13   
   17.14   			/*
   17.15 +@@ -167,6 +173,95 @@
   17.16 + 	irq_exit();
   17.17 + }
   17.18 + 
   17.19 ++#ifdef  CONFIG_VTI
   17.20 ++/*
   17.21 ++ * That's where the IVT branches when we get an external
   17.22 ++ * interrupt. This branches to the correct hardware IRQ handler via
   17.23 ++ * function ptr.
   17.24 ++ */
   17.25 ++void
   17.26 ++vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
   17.27 ++{
   17.28 ++	unsigned long saved_tpr;
   17.29 ++	int	wake_dom0 = 0;
   17.30 ++
   17.31 ++
   17.32 ++#if IRQ_DEBUG
   17.33 ++	{
   17.34 ++		unsigned long bsp, sp;
   17.35 ++
   17.36 ++		/*
   17.37 ++		 * Note: if the interrupt happened while executing in
   17.38 ++		 * the context switch routine (ia64_switch_to), we may
   17.39 ++		 * get a spurious stack overflow here.  This is
   17.40 ++		 * because the register and the memory stack are not
   17.41 ++		 * switched atomically.
   17.42 ++		 */
   17.43 ++		bsp = ia64_getreg(_IA64_REG_AR_BSP);
   17.44 ++		sp = ia64_getreg(_IA64_REG_AR_SP);
   17.45 ++
   17.46 ++		if ((sp - bsp) < 1024) {
   17.47 ++			static unsigned char count;
   17.48 ++			static long last_time;
   17.49 ++
   17.50 ++			if (jiffies - last_time > 5*HZ)
   17.51 ++				count = 0;
   17.52 ++			if (++count < 5) {
   17.53 ++				last_time = jiffies;
   17.54 ++				printk("ia64_handle_irq: DANGER: less than "
   17.55 ++				       "1KB of free stack space!!\n"
   17.56 ++				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
   17.57 ++			}
   17.58 ++		}
   17.59 ++	}
   17.60 ++#endif /* IRQ_DEBUG */
   17.61 ++
   17.62 ++	/*
   17.63 ++	 * Always set TPR to limit maximum interrupt nesting depth to
   17.64 ++	 * 16 (without this, it would be ~240, which could easily lead
   17.65 ++	 * to kernel stack overflows).
   17.66 ++	 */
   17.67 ++	irq_enter();
   17.68 ++	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
   17.69 ++	ia64_srlz_d();
   17.70 ++	while (vector != IA64_SPURIOUS_INT_VECTOR) {
   17.71 ++	    if (!IS_RESCHEDULE(vector)) {
   17.72 ++		ia64_setreg(_IA64_REG_CR_TPR, vector);
   17.73 ++		ia64_srlz_d();
   17.74 ++
   17.75 ++		if (vector != IA64_TIMER_VECTOR) {
   17.76 ++			/* FIXME: Leave IRQ re-route later */
   17.77 ++			vmx_vcpu_pend_interrupt(dom0->exec_domain[0],vector);
   17.78 ++			wake_dom0 = 1;
   17.79 ++		}
   17.80 ++		else {	// FIXME: Handle Timer only now
   17.81 ++			__do_IRQ(local_vector_to_irq(vector), regs);
   17.82 ++		}
   17.83 ++		
   17.84 ++		/*
   17.85 ++		 * Disable interrupts and send EOI:
   17.86 ++		 */
   17.87 ++		local_irq_disable();
   17.88 ++		ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
   17.89 ++	    }
   17.90 ++	    else {
   17.91 ++                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
   17.92 ++            }
   17.93 ++	    ia64_eoi();
   17.94 ++	    vector = ia64_get_ivr();
   17.95 ++	}
   17.96 ++	/*
   17.97 ++	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
   17.98 ++	 * handler needs to be able to wait for further keyboard interrupts, which can't
   17.99 ++	 * come through until ia64_eoi() has been done.
  17.100 ++	 */
  17.101 ++	irq_exit();
  17.102 ++	if ( wake_dom0 && current != dom0 ) 
  17.103 ++		domain_wake(dom0->exec_domain[0]);
  17.104 ++}
  17.105 ++#endif
  17.106 ++
  17.107 ++
  17.108 + #ifdef CONFIG_HOTPLUG_CPU
  17.109 + /*
  17.110 +  * This function emulates a interrupt processing when a cpu is about to be
    18.1 --- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h	Thu May 19 21:22:49 2005 +0000
    18.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/kregs.h	Fri May 20 17:23:51 2005 +0000
    18.3 @@ -1,18 +1,65 @@
    18.4 - kregs.h |    4 ++++
    18.5 - 1 files changed, 4 insertions(+)
    18.6 -
    18.7 -Index: linux-2.6.11/include/asm-ia64/kregs.h
    18.8 -===================================================================
    18.9 ---- linux-2.6.11.orig/include/asm-ia64/kregs.h	2005-03-02 01:37:49.000000000 -0600
   18.10 -+++ linux-2.6.11/include/asm-ia64/kregs.h	2005-03-19 13:44:24.362628092 -0600
   18.11 -@@ -31,6 +31,10 @@
   18.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h	2005-03-01 23:37:49.000000000 -0800
   18.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h	2005-05-18 12:40:50.000000000 -0700
   18.14 +@@ -29,8 +29,20 @@
   18.15 +  */
   18.16 + #define IA64_TR_KERNEL		0	/* itr0, dtr0: maps kernel image (code & data) */
   18.17   #define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
   18.18 ++#ifdef CONFIG_VTI
   18.19 ++#define IA64_TR_XEN_IN_DOM	6	/* itr6, dtr6: Double mapping for xen image in domain space */
   18.20 ++#endif // CONFIG_VTI
   18.21   #define IA64_TR_PERCPU_DATA	1	/* dtr1: percpu data */
   18.22   #define IA64_TR_CURRENT_STACK	2	/* dtr2: maps kernel's memory- & register-stacks */
   18.23  +#ifdef XEN
   18.24  +#define IA64_TR_SHARED_INFO	3	/* dtr3: page shared with domain */
   18.25  +#define	IA64_TR_VHPT		4	/* dtr4: vhpt */
   18.26 ++#ifdef CONFIG_VTI
   18.27 ++#define IA64_TR_VHPT_IN_DOM	5	/* dtr5: Double mapping for vhpt table in domain space */
   18.28 ++#define IA64_TR_RR7_SWITCH_STUB	7	/* dtr7: mapping for rr7 switch stub */
   18.29 ++#define IA64_TEMP_PHYSICAL	8	/* itr8, dtr8: temp mapping for guest physical memory 256M */
   18.30 ++#endif // CONFIG_VTI
   18.31  +#endif
   18.32   
   18.33   /* Processor status register bits: */
   18.34   #define IA64_PSR_BE_BIT		1
   18.35 +@@ -66,6 +78,9 @@
   18.36 + #define IA64_PSR_ED_BIT		43
   18.37 + #define IA64_PSR_BN_BIT		44
   18.38 + #define IA64_PSR_IA_BIT		45
   18.39 ++#ifdef CONFIG_VTI
   18.40 ++#define IA64_PSR_VM_BIT		46
   18.41 ++#endif // CONFIG_VTI
   18.42 + 
   18.43 + /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
   18.44 +    execve().  Only list flags here that need to be cleared/set for BOTH clone2() and
   18.45 +@@ -107,6 +122,9 @@
   18.46 + #define IA64_PSR_ED	(__IA64_UL(1) << IA64_PSR_ED_BIT)
   18.47 + #define IA64_PSR_BN	(__IA64_UL(1) << IA64_PSR_BN_BIT)
   18.48 + #define IA64_PSR_IA	(__IA64_UL(1) << IA64_PSR_IA_BIT)
   18.49 ++#ifdef CONFIG_VTI
   18.50 ++#define IA64_PSR_VM	(__IA64_UL(1) << IA64_PSR_VM_BIT)
   18.51 ++#endif // CONFIG_VTI
   18.52 + 
   18.53 + /* User mask bits: */
   18.54 + #define IA64_PSR_UM	(IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
   18.55 +@@ -160,4 +178,21 @@
   18.56 + #define IA64_ISR_CODE_LFETCH	4
   18.57 + #define IA64_ISR_CODE_PROBEF	5
   18.58 + 
   18.59 ++#ifdef CONFIG_VTI
   18.60 ++/* Interruption Function State */
   18.61 ++#define IA64_IFS_V_BIT		63
   18.62 ++#define IA64_IFS_V	(__IA64_UL(1) << IA64_IFS_V_BIT)
   18.63 ++
   18.64 ++/* Page Table Address */
   18.65 ++#define IA64_PTA_VE_BIT 0
   18.66 ++#define IA64_PTA_SIZE_BIT 2
   18.67 ++#define IA64_PTA_VF_BIT 8
   18.68 ++#define IA64_PTA_BASE_BIT 15
   18.69 ++
   18.70 ++#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
   18.71 ++#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
   18.72 ++#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
   18.73 ++#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
   18.74 ++#endif // CONFIG_VTI
   18.75 ++
   18.76 + #endif /* _ASM_IA64_kREGS_H */
    19.1 --- a/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h	Thu May 19 21:22:49 2005 +0000
    19.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/mca_asm.h	Fri May 20 17:23:51 2005 +0000
    19.3 @@ -1,10 +1,5 @@
    19.4 - mca_asm.h |   11 +++++++++++
    19.5 - 1 files changed, 11 insertions(+)
    19.6 -
    19.7 -Index: linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h
    19.8 -===================================================================
    19.9 ---- linux-2.6.11-xendiffs.orig/include/asm-ia64/mca_asm.h	2005-03-02 01:38:38.000000000 -0600
   19.10 -+++ linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h	2005-04-06 22:41:57.392411032 -0500
   19.11 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/mca_asm.h	2005-03-01 23:38:38.000000000 -0800
   19.12 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/mca_asm.h	2005-05-18 12:40:19.000000000 -0700
   19.13  @@ -26,8 +26,13 @@
   19.14    * direct mapped to physical addresses.
   19.15    *	1. Lop off bits 61 thru 63 in the virtual address
    20.1 --- a/xen/arch/ia64/patch/linux-2.6.11/page.h	Thu May 19 21:22:49 2005 +0000
    20.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/page.h	Fri May 20 17:23:51 2005 +0000
    20.3 @@ -1,6 +1,14 @@
    20.4 ---- ../../linux-2.6.11/include/asm-ia64/page.h	2005-03-02 00:37:48.000000000 -0700
    20.5 -+++ include/asm-ia64/page.h	2005-05-02 11:25:33.000000000 -0600
    20.6 -@@ -95,9 +95,15 @@
    20.7 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/page.h	2005-03-01 23:37:48.000000000 -0800
    20.8 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/page.h	2005-05-20 09:36:02.000000000 -0700
    20.9 +@@ -32,6 +32,7 @@
   20.10 + #define PAGE_ALIGN(addr)	(((addr) + PAGE_SIZE - 1) & PAGE_MASK)
   20.11 + 
   20.12 + #define PERCPU_PAGE_SHIFT	16	/* log2() of max. size of per-CPU area */
   20.13 ++
   20.14 + #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
   20.15 + 
   20.16 + #define RGN_MAP_LIMIT	((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)	/* per region addr limit */
   20.17 +@@ -95,9 +96,15 @@
   20.18   #endif
   20.19   
   20.20   #ifndef CONFIG_DISCONTIGMEM
   20.21 @@ -16,7 +24,7 @@
   20.22   #else
   20.23   extern struct page *vmem_map;
   20.24   extern unsigned long max_low_pfn;
   20.25 -@@ -109,6 +115,11 @@
   20.26 +@@ -109,6 +116,11 @@
   20.27   #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
   20.28   #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   20.29   
   20.30 @@ -28,7 +36,7 @@
   20.31   typedef union ia64_va {
   20.32   	struct {
   20.33   		unsigned long off : 61;		/* intra-region offset */
   20.34 -@@ -124,8 +135,23 @@
   20.35 +@@ -124,8 +136,23 @@
   20.36    * expressed in this way to ensure they result in a single "dep"
   20.37    * instruction.
   20.38    */
   20.39 @@ -52,7 +60,7 @@
   20.40   
   20.41   #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
   20.42   #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
   20.43 -@@ -197,7 +223,11 @@
   20.44 +@@ -197,7 +224,11 @@
   20.45   # define __pgprot(x)	(x)
   20.46   #endif /* !STRICT_MM_TYPECHECKS */
   20.47   
    21.1 --- a/xen/arch/ia64/patch/linux-2.6.11/pal.S	Thu May 19 21:22:49 2005 +0000
    21.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/pal.S	Fri May 20 17:23:51 2005 +0000
    21.3 @@ -1,11 +1,6 @@
    21.4 - pal.S |    8 ++++++++
    21.5 - 1 files changed, 8 insertions(+)
    21.6 -
    21.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S
    21.8 -===================================================================
    21.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/pal.S	2005-03-02 01:38:33.000000000 -0600
   21.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S	2005-04-06 22:43:53.817885390 -0500
   21.11 -@@ -166,7 +166,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
   21.12 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/pal.S	2005-03-01 23:38:33.000000000 -0800
   21.13 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/pal.S	2005-05-18 12:40:19.000000000 -0700
   21.14 +@@ -166,7 +166,11 @@
   21.15   	adds r8  = 1f-1b,r8		// calculate return address for call
   21.16   	;;
   21.17   	mov loc4=ar.rsc			// save RSE configuration
   21.18 @@ -17,7 +12,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k
   21.19   	tpa r8=r8			// convert rp to physical
   21.20   	;;
   21.21   	mov b7 = loc2			// install target to branch reg
   21.22 -@@ -225,7 +229,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
   21.23 +@@ -225,7 +229,11 @@
   21.24   	mov loc3 = psr		// save psr
   21.25   	;;
   21.26   	mov loc4=ar.rsc			// save RSE configuration
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/pal.h	Fri May 20 17:23:51 2005 +0000
    22.3 @@ -0,0 +1,12 @@
    22.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/pal.h	2005-03-01 23:38:13.000000000 -0800
    22.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/pal.h	2005-05-18 14:00:53.000000000 -0700
    22.6 +@@ -1559,6 +1559,9 @@
    22.7 + 	return iprv.status;
    22.8 + }
    22.9 + 
   22.10 ++#ifdef CONFIG_VTI
   22.11 ++#include <asm/vmx_pal.h>
   22.12 ++#endif // CONFIG_VTI
   22.13 + #endif /* __ASSEMBLY__ */
   22.14 + 
   22.15 + #endif /* _ASM_IA64_PAL_H */
    23.1 --- a/xen/arch/ia64/patch/linux-2.6.11/processor.h	Thu May 19 21:22:49 2005 +0000
    23.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/processor.h	Fri May 20 17:23:51 2005 +0000
    23.3 @@ -1,17 +1,30 @@
    23.4 - processor.h |    4 ++++
    23.5 - 1 files changed, 4 insertions(+)
    23.6 -
    23.7 -Index: linux-2.6.11/include/asm-ia64/processor.h
    23.8 -===================================================================
    23.9 ---- linux-2.6.11.orig/include/asm-ia64/processor.h	2005-03-02 01:37:58.000000000 -0600
   23.10 -+++ linux-2.6.11/include/asm-ia64/processor.h	2005-03-19 14:26:01.062135543 -0600
   23.11 -@@ -408,12 +408,16 @@ extern void ia64_setreg_unknown_kr (void
   23.12 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/processor.h	2005-03-01 23:37:58.000000000 -0800
   23.13 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/processor.h	2005-05-20 09:36:02.000000000 -0700
   23.14 +@@ -94,7 +94,11 @@
   23.15 + #ifdef CONFIG_NUMA
   23.16 + #include <asm/nodedata.h>
   23.17 + #endif
   23.18 ++#ifdef XEN
   23.19 ++#include <asm/xenprocessor.h>
   23.20 ++#endif
   23.21 + 
   23.22 ++#ifndef XEN
   23.23 + /* like above but expressed as bitfields for more efficient access: */
   23.24 + struct ia64_psr {
   23.25 + 	__u64 reserved0 : 1;
   23.26 +@@ -133,6 +137,7 @@
   23.27 + 	__u64 bn : 1;
   23.28 + 	__u64 reserved4 : 19;
   23.29 + };
   23.30 ++#endif
   23.31 + 
   23.32 + /*
   23.33 +  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
   23.34 +@@ -408,12 +413,14 @@
   23.35    */
   23.36   
   23.37   /* Return TRUE if task T owns the fph partition of the CPU we're running on. */
   23.38 -+#ifdef XEN
   23.39 -+#define ia64_is_local_fpu_owner(t) 0
   23.40 -+#else
   23.41 ++#ifndef XEN
   23.42   #define ia64_is_local_fpu_owner(t)								\
   23.43   ({												\
   23.44   	struct task_struct *__ia64_islfo_task = (t);						\
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h	Fri May 20 17:23:51 2005 +0000
    24.3 @@ -0,0 +1,20 @@
    24.4 +--- /home/adsharma/disk2/xen-ia64/test3.bk/xen/../../linux-2.6.11/include/asm-ia64/ptrace.h	2005-03-01 23:38:38.000000000 -0800
    24.5 ++++ /home/adsharma/disk2/xen-ia64/test3.bk/xen/include/asm-ia64/ptrace.h	2005-05-18 14:00:53.000000000 -0700
    24.6 +@@ -95,6 +95,9 @@
    24.7 +  * (because the memory stack pointer MUST ALWAYS be aligned this way)
    24.8 +  *
    24.9 +  */
   24.10 ++#ifdef CONFIG_VTI
   24.11 ++#include "vmx_ptrace.h"
   24.12 ++#else  //CONFIG_VTI
   24.13 + struct pt_regs {
   24.14 + 	/* The following registers are saved by SAVE_MIN: */
   24.15 + 	unsigned long b6;		/* scratch */
   24.16 +@@ -170,6 +173,7 @@
   24.17 + 	struct ia64_fpreg f10;		/* scratch */
   24.18 + 	struct ia64_fpreg f11;		/* scratch */
   24.19 + };
   24.20 ++#endif // CONFIG_VTI
   24.21 + 
   24.22 + /*
   24.23 +  * This structure contains the addition registers that need to
    25.1 --- a/xen/arch/ia64/patch/linux-2.6.11/setup.c	Thu May 19 21:22:49 2005 +0000
    25.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/setup.c	Fri May 20 17:23:51 2005 +0000
    25.3 @@ -1,6 +1,16 @@
    25.4 ---- ../../linux-2.6.11/arch/ia64/kernel/setup.c	2005-03-02 00:37:49.000000000 -0700
    25.5 -+++ arch/ia64/setup.c	2005-05-02 10:04:03.000000000 -0600
    25.6 -@@ -127,7 +127,16 @@
    25.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/setup.c	2005-03-01 23:37:49.000000000 -0800
    25.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/setup.c	2005-05-18 12:40:50.000000000 -0700
    25.9 +@@ -51,6 +51,9 @@
   25.10 + #include <asm/smp.h>
   25.11 + #include <asm/system.h>
   25.12 + #include <asm/unistd.h>
   25.13 ++#ifdef CONFIG_VTI
   25.14 ++#include <asm/vmx.h>
   25.15 ++#endif // CONFIG_VTI
   25.16 + 
   25.17 + #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
   25.18 + # error "struct cpuinfo_ia64 too big!"
   25.19 +@@ -127,7 +130,16 @@
   25.20   		range_end   = min(end, rsvd_region[i].start);
   25.21   
   25.22   		if (range_start < range_end)
   25.23 @@ -17,7 +27,7 @@
   25.24   
   25.25   		/* nothing more available in this segment */
   25.26   		if (range_end == end) return 0;
   25.27 -@@ -185,7 +194,12 @@
   25.28 +@@ -185,7 +197,12 @@
   25.29   	n++;
   25.30   
   25.31   	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
   25.32 @@ -30,7 +40,7 @@
   25.33   	n++;
   25.34   
   25.35   #ifdef CONFIG_BLK_DEV_INITRD
   25.36 -@@ -299,7 +313,11 @@
   25.37 +@@ -299,7 +316,11 @@
   25.38   }
   25.39   
   25.40   void __init
   25.41 @@ -42,7 +52,7 @@
   25.42   {
   25.43   	unw_init();
   25.44   
   25.45 -@@ -308,8 +326,14 @@
   25.46 +@@ -308,8 +329,14 @@
   25.47   	*cmdline_p = __va(ia64_boot_param->command_line);
   25.48   	strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
   25.49   
   25.50 @@ -57,7 +67,7 @@
   25.51   
   25.52   #ifdef CONFIG_IA64_GENERIC
   25.53   	{
   25.54 -@@ -351,8 +375,17 @@
   25.55 +@@ -351,8 +378,18 @@
   25.56   # endif
   25.57   #endif /* CONFIG_APCI_BOOT */
   25.58   
   25.59 @@ -71,11 +81,23 @@
   25.60  +late_setup_arch (char **cmdline_p)
   25.61  +{
   25.62  +#undef CONFIG_ACPI_BOOT
   25.63 ++	acpi_table_init();
   25.64  +#endif
   25.65   	/* process SAL system table: */
   25.66   	ia64_sal_init(efi.sal_systab);
   25.67   
   25.68 -@@ -492,12 +525,14 @@
   25.69 +@@ -360,6 +397,10 @@
   25.70 + 	cpu_physical_id(0) = hard_smp_processor_id();
   25.71 + #endif
   25.72 + 
   25.73 ++#ifdef CONFIG_VTI
   25.74 ++	identify_vmx_feature();
   25.75 ++#endif // CONFIG_VTI
   25.76 ++
   25.77 + 	cpu_init();	/* initialize the bootstrap CPU */
   25.78 + 
   25.79 + #ifdef CONFIG_ACPI_BOOT
   25.80 +@@ -492,12 +533,14 @@
   25.81   {
   25.82   }
   25.83   
   25.84 @@ -90,7 +112,20 @@
   25.85   
   25.86   void
   25.87   identify_cpu (struct cpuinfo_ia64 *c)
   25.88 -@@ -659,7 +694,11 @@
   25.89 +@@ -551,6 +594,12 @@
   25.90 + 	}
   25.91 + 	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
   25.92 + 	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
   25.93 ++
   25.94 ++#ifdef CONFIG_VTI
   25.95 ++	/* If vmx feature is on, do necessary initialization for vmx */
   25.96 ++	if (vmx_enabled)
   25.97 ++		vmx_init_env();
   25.98 ++#endif
   25.99 + }
  25.100 + 
  25.101 + void
  25.102 +@@ -659,7 +708,11 @@
  25.103   					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
  25.104   	atomic_inc(&init_mm.mm_count);
  25.105   	current->active_mm = &init_mm;
    26.1 --- a/xen/arch/ia64/patch/linux-2.6.11/system.h	Thu May 19 21:22:49 2005 +0000
    26.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/system.h	Fri May 20 17:23:51 2005 +0000
    26.3 @@ -1,32 +1,38 @@
    26.4 ---- ../../linux-2.6.11/include/asm-ia64/system.h	2005-03-02 00:38:07.000000000 -0700
    26.5 -+++ include/asm-ia64/system.h	2005-05-02 10:18:30.000000000 -0600
    26.6 -@@ -24,8 +24,15 @@
    26.7 +--- /home/adsharma/xeno-unstable-ia64-staging.bk/xen/../../linux-2.6.11/include/asm-ia64/system.h	2005-03-01 23:38:07.000000000 -0800
    26.8 ++++ /home/adsharma/xeno-unstable-ia64-staging.bk/xen/include/asm-ia64/system.h	2005-05-20 09:36:02.000000000 -0700
    26.9 +@@ -18,14 +18,19 @@
   26.10 + #include <asm/page.h>
   26.11 + #include <asm/pal.h>
   26.12 + #include <asm/percpu.h>
   26.13 ++#ifdef XEN
   26.14 ++#include <asm/xensystem.h>
   26.15 ++#endif
   26.16 + 
   26.17 + #define GATE_ADDR		__IA64_UL_CONST(0xa000000000000000)
   26.18 + /*
   26.19    * 0xa000000000000000+2*PERCPU_PAGE_SIZE
   26.20    * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
   26.21    */
   26.22 -+#ifdef XEN
   26.23 -+#define KERNEL_START		 0xf000000004000000
   26.24 -+#define PERCPU_ADDR		 0xf100000000000000-PERCPU_PAGE_SIZE
   26.25 -+#define SHAREDINFO_ADDR		 0xf100000000000000
   26.26 -+#define VHPT_ADDR		 0xf200000000000000
   26.27 -+#else
   26.28 ++#ifndef XEN
   26.29   #define KERNEL_START		 __IA64_UL_CONST(0xa000000100000000)
   26.30   #define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
   26.31  +#endif
   26.32   
   26.33   #ifndef __ASSEMBLY__
   26.34   
   26.35 -@@ -218,9 +225,13 @@
   26.36 +@@ -218,6 +223,7 @@
   26.37   # define PERFMON_IS_SYSWIDE() (0)
   26.38   #endif
   26.39   
   26.40 -+#ifdef XEN
   26.41 -+#define IA64_HAS_EXTRA_STATE(t) 0
   26.42 -+#else
   26.43 ++#ifndef XEN
   26.44   #define IA64_HAS_EXTRA_STATE(t)							\
   26.45   	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
   26.46   	 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
   26.47 -+#endif
   26.48 +@@ -230,6 +236,7 @@
   26.49 + 	ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
   26.50 + 	(last) = ia64_switch_to((next));							 \
   26.51 + } while (0)
   26.52 ++#endif 
   26.53   
   26.54 - #define __switch_to(prev,next,last) do {							 \
   26.55 - 	if (IA64_HAS_EXTRA_STATE(prev))								 \
   26.56 + #ifdef CONFIG_SMP
   26.57 + /*
    27.1 --- a/xen/arch/ia64/patch/linux-2.6.11/unaligned.c	Thu May 19 21:22:49 2005 +0000
    27.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/unaligned.c	Fri May 20 17:23:51 2005 +0000
    27.3 @@ -1,8 +1,145 @@
    27.4 ---- ../../linux-2.6.11/arch/ia64/kernel/unaligned.c	2005-03-02 00:38:25.000000000 -0700
    27.5 -+++ arch/ia64/unaligned.c	2005-05-10 15:46:09.000000000 -0600
    27.6 -@@ -437,7 +437,11 @@
    27.7 +--- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/arch/ia64/kernel/unaligned.c	2005-03-01 23:38:25.000000000 -0800
    27.8 ++++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/arch/ia64/unaligned.c	2005-05-18 12:40:50.000000000 -0700
    27.9 +@@ -201,7 +201,11 @@
   27.10 + 
   27.11 + 	RPT(r1), RPT(r2), RPT(r3),
   27.12 + 
   27.13 ++#ifdef  CONFIG_VTI
   27.14 ++	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
   27.15 ++#else   //CONFIG_VTI
   27.16 + 	RSW(r4), RSW(r5), RSW(r6), RSW(r7),
   27.17 ++#endif  //CONFIG_VTI
   27.18 + 
   27.19 + 	RPT(r8), RPT(r9), RPT(r10), RPT(r11),
   27.20 + 	RPT(r12), RPT(r13), RPT(r14), RPT(r15),
   27.21 +@@ -291,6 +295,121 @@
   27.22 + 	return reg;
   27.23   }
   27.24   
   27.25 ++#ifdef CONFIG_VTI
   27.26 ++static void
   27.27 ++set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
   27.28 ++{
   27.29 ++	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   27.30 ++	unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
   27.31 ++	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   27.32 ++	unsigned long rnats, nat_mask;
   27.33 ++    unsigned long old_rsc,new_rsc;
   27.34 ++	unsigned long on_kbs,rnat;
   27.35 ++	long sof = (regs->cr_ifs) & 0x7f;
   27.36 ++	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
   27.37 ++	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
   27.38 ++	long ridx = r1 - 32;
   27.39 ++
   27.40 ++	if (ridx >= sof) {
   27.41 ++		/* this should never happen, as the "rsvd register fault" has higher priority */
   27.42 ++		DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
   27.43 ++		return;
   27.44 ++	}
   27.45 ++
   27.46 ++	if (ridx < sor)
   27.47 ++		ridx = rotate_reg(sor, rrb_gr, ridx);
   27.48 ++
   27.49 ++    old_rsc=ia64_get_rsc();
   27.50 ++    new_rsc=old_rsc&(~0x3);
   27.51 ++    ia64_set_rsc(new_rsc);
   27.52 ++
   27.53 ++    bspstore = ia64_get_bspstore();
   27.54 ++    bsp =kbs + (regs->loadrs >> 19);//16+3
   27.55 ++
   27.56 ++	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
   27.57 ++    nat_mask = 1UL << ia64_rse_slot_num(addr);
   27.58 ++	rnat_addr = ia64_rse_rnat_addr(addr);
   27.59 ++
   27.60 ++    if(addr >= bspstore){
   27.61 ++
   27.62 ++        ia64_flushrs ();
   27.63 ++        ia64_mf ();
   27.64 ++		*addr = val;
   27.65 ++        bspstore = ia64_get_bspstore();
   27.66 ++    	rnat = ia64_get_rnat ();
   27.67 ++        if(bspstore < rnat_addr){
   27.68 ++            rnat=rnat&(~nat_mask);
   27.69 ++        }else{
   27.70 ++            *rnat_addr = (*rnat_addr)&(~nat_mask);
   27.71 ++        }
   27.72 ++        ia64_mf();
   27.73 ++        ia64_loadrs();
   27.74 ++        ia64_set_rnat(rnat);
   27.75 ++    }else{
   27.76 ++
   27.77 ++    	rnat = ia64_get_rnat ();
   27.78 ++		*addr = val;
   27.79 ++        if(bspstore < rnat_addr){
   27.80 ++            rnat=rnat&(~nat_mask);
   27.81 ++        }else{
   27.82 ++            *rnat_addr = (*rnat_addr)&(~nat_mask);
   27.83 ++        }
   27.84 ++        ia64_set_bspstore (bspstore);
   27.85 ++        ia64_set_rnat(rnat);
   27.86 ++    }
   27.87 ++    ia64_set_rsc(old_rsc);
   27.88 ++}
   27.89 ++
   27.90 ++
   27.91 ++static void
   27.92 ++get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
   27.93 ++{
   27.94 ++	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   27.95 ++	unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
   27.96 ++	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   27.97 ++	unsigned long rnats, nat_mask;
   27.98 ++	unsigned long on_kbs;
   27.99 ++    unsigned long old_rsc, new_rsc;
  27.100 ++	long sof = (regs->cr_ifs) & 0x7f;
  27.101 ++	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
  27.102 ++	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  27.103 ++	long ridx = r1 - 32;
  27.104 ++
  27.105 ++	if (ridx >= sof) {
  27.106 ++		/* read of out-of-frame register returns an undefined value; 0 in our case.  */
  27.107 ++		DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
  27.108 ++		panic("wrong stack register number");
  27.109 ++	}
  27.110 ++
  27.111 ++	if (ridx < sor)
  27.112 ++		ridx = rotate_reg(sor, rrb_gr, ridx);
  27.113 ++
  27.114 ++    old_rsc=ia64_get_rsc();
  27.115 ++    new_rsc=old_rsc&(~(0x3));
  27.116 ++    ia64_set_rsc(new_rsc);
  27.117 ++
  27.118 ++    bspstore = ia64_get_bspstore();
  27.119 ++    bsp =kbs + (regs->loadrs >> 19); //16+3;
  27.120 ++
  27.121 ++	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
  27.122 ++    nat_mask = 1UL << ia64_rse_slot_num(addr);
  27.123 ++	rnat_addr = ia64_rse_rnat_addr(addr);
  27.124 ++
  27.125 ++    if(addr >= bspstore){
  27.126 ++
  27.127 ++        ia64_flushrs ();
  27.128 ++        ia64_mf ();
  27.129 ++        bspstore = ia64_get_bspstore();
  27.130 ++    }
  27.131 ++	*val=*addr;
  27.132 ++    if(bspstore < rnat_addr){
  27.133 ++        *nat=!!(ia64_get_rnat()&nat_mask);
  27.134 ++    }else{
  27.135 ++        *nat = !!((*rnat_addr)&nat_mask);
  27.136 ++    }
  27.137 ++    ia64_set_rsc(old_rsc);
  27.138 ++}
  27.139 ++#else // CONFIG_VTI
  27.140 + static void
  27.141 + set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
  27.142 + {
  27.143 +@@ -435,9 +554,14 @@
  27.144 + 		*nat = 0;
  27.145 + 	return;
  27.146 + }
  27.147 ++#endif // CONFIG_VTI
  27.148 + 
  27.149   
  27.150  +#ifdef XEN
  27.151  +void
  27.152 @@ -12,7 +149,19 @@
  27.153   setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
  27.154   {
  27.155   	struct switch_stack *sw = (struct switch_stack *) regs - 1;
  27.156 -@@ -522,7 +526,11 @@
  27.157 +@@ -466,7 +590,11 @@
  27.158 + 		unat = &sw->ar_unat;
  27.159 + 	} else {
  27.160 + 		addr = (unsigned long)regs;
  27.161 ++#ifdef CONFIG_VTI
  27.162 ++		unat = &regs->eml_unat;
  27.163 ++#else //CONFIG_VTI
  27.164 + 		unat = &sw->caller_unat;
  27.165 ++#endif  //CONFIG_VTI
  27.166 + 	}
  27.167 + 	DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
  27.168 + 	       addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
  27.169 +@@ -522,7 +650,11 @@
  27.170   	 */
  27.171   	if (regnum >= IA64_FIRST_ROTATING_FR) {
  27.172   		ia64_sync_fph(current);
  27.173 @@ -24,7 +173,7 @@
  27.174   	} else {
  27.175   		/*
  27.176   		 * pt_regs or switch_stack ?
  27.177 -@@ -581,7 +589,11 @@
  27.178 +@@ -581,7 +713,11 @@
  27.179   	 */
  27.180   	if (regnum >= IA64_FIRST_ROTATING_FR) {
  27.181   		ia64_flush_fph(current);
  27.182 @@ -36,7 +185,7 @@
  27.183   	} else {
  27.184   		/*
  27.185   		 * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
  27.186 -@@ -611,7 +623,11 @@
  27.187 +@@ -611,7 +747,11 @@
  27.188   }
  27.189   
  27.190   
  27.191 @@ -48,7 +197,19 @@
  27.192   getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
  27.193   {
  27.194   	struct switch_stack *sw = (struct switch_stack *) regs - 1;
  27.195 -@@ -1294,6 +1310,9 @@
  27.196 +@@ -640,7 +780,11 @@
  27.197 + 		unat = &sw->ar_unat;
  27.198 + 	} else {
  27.199 + 		addr = (unsigned long)regs;
  27.200 ++#ifdef  CONFIG_VTI
  27.201 ++		unat = &regs->eml_unat;;
  27.202 ++#else   //CONFIG_VTI
  27.203 + 		unat = &sw->caller_unat;
  27.204 ++#endif  //CONFIG_VTI
  27.205 + 	}
  27.206 + 
  27.207 + 	DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
  27.208 +@@ -1294,6 +1438,9 @@
  27.209   void
  27.210   ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
  27.211   {
  27.212 @@ -58,7 +219,7 @@
  27.213   	struct ia64_psr *ipsr = ia64_psr(regs);
  27.214   	mm_segment_t old_fs = get_fs();
  27.215   	unsigned long bundle[2];
  27.216 -@@ -1502,4 +1521,5 @@
  27.217 +@@ -1502,4 +1649,5 @@
  27.218   	si.si_imm = 0;
  27.219   	force_sig_info(SIGBUS, &si, current);
  27.220   	goto done;
    28.1 --- a/xen/arch/ia64/process.c	Thu May 19 21:22:49 2005 +0000
    28.2 +++ b/xen/arch/ia64/process.c	Fri May 20 17:23:51 2005 +0000
    28.3 @@ -64,11 +64,16 @@ long do_iopl(domid_t domain, unsigned in
    28.4  void schedule_tail(struct exec_domain *next)
    28.5  {
    28.6  	unsigned long rr7;
    28.7 -	printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
    28.8 -	printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
    28.9 +	//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
   28.10 +	//printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
   28.11 +#ifdef CONFIG_VTI
   28.12 +	/* rr7 will be postponed to last point when resuming back to guest */
   28.13 +	vmx_load_all_rr(current);
   28.14 +#else // CONFIG_VTI
   28.15  	if (rr7 = load_region_regs(current)) {
   28.16  		printk("schedule_tail: change to rr7 not yet implemented\n");
   28.17  	}
   28.18 +#endif // CONFIG_VTI
   28.19  }
   28.20  
   28.21  extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
   28.22 @@ -346,8 +351,8 @@ void ia64_do_page_fault (unsigned long a
   28.23  		// FIXME should validate mpaddr here
   28.24  		if (d == dom0) {
   28.25  			if (address < dom0_start || address >= dom0_start + dom0_size) {
   28.26 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   28.27 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
   28.28 +				//printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   28.29 +				//printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
   28.30  				tdpfoo();
   28.31  			}
   28.32  		}
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/arch/ia64/tools/README.RunVT	Fri May 20 17:23:51 2005 +0000
    29.3 @@ -0,0 +1,59 @@
    29.4 +INSTRUCTIONS FOR Running IPF/Xen with VT-enabled Tiger4 pltform
    29.5 +
    29.6 +Note: the Domain0 must be an unmodified Linux
    29.7 +
    29.8 +1) Perform operations in README.xenia64 to get a flattened Xen IPF source tree
    29.9 +
   29.10 +2) Build an unmodified Linux 2.6 kernel
   29.11 +	a) tar xvfz  linux-2.6.11.tar.gz
   29.12 +        b) cp arch/ia64/configs/tiger_defconfig .config
   29.13 +	c) Build linux.
   29.14 +   		1) yes "" | make oldconfig
   29.15 +   		2) make
   29.16 +
   29.17 +3) Build IPF VT-enabled Xen image
   29.18 +	edit xen/arch/ia64/Rules.mk for
   29.19 +		CONFIG_VTI	?= y 	to enable VT-enable build
   29.20 +4) Setup ELILO.CONF
   29.21 +	image=xen
   29.22 +        	label=xen
   29.23 +        	initrd=vmlinux2.6.11		// unmodified Linux kernel image
   29.24 +        	read-only
   29.25 +        	append="nomca root=/dev/sda3"
   29.26 +
   29.27 +STATUS as 4/28/05 - Features implemented for Domain0
   29.28 +
   29.29 +0. Runs unmodified Linux kernel as Domain0
   29.30 +    Validated with Linux 2.6.11 to run Xwindow and NIC on UP logical processor
   29.31 +
   29.32 +1. Take advantage of VT-enabled processor
   29.33 +   a. Processor intercepts guest privileged instruction and deliver Opcode/Cause to Hypervisor
   29.34 +   b. One VPD (Virtual Processor Descriptor) per Virtual Processor
   29.35 +   c. Domains are in a different virtual address space from hypervisor. Domains have one less VA bit than hypervisor, where hypervisor runs in 0xF00000... address protected by the processor from Domains.
   29.36 +
   29.37 +2. vTLB and guest_VHPT
   29.38 +   a. vTLB extending machine TLB entries through hypervisor internal data structure
   29.39 +      vTLB caches Domains installed TR's and TC's, and then installs TC's for Domains instead.
   29.40 +      vTLB implements collision chains
   29.41 +   b. Processor walks hypervisor internal VHPT, not the domain VHPT.  On TLB miss, vTLB is consulted first to put hypervisor cached entry into VHPT without inject TLB miss to domain.
   29.42 +
   29.43 +3. Region ID fix-partitioning
   29.44 +   a. currently hard partition 24bits of RIDs into 16 partitions by using top 4bit.
   29.45 +   b. Hypervisor uses the very last partition RIDs, i.e., 0xFxxxxx RIDs
   29.46 +   c. Effectively supports Domain0 and 14 other DomainN
   29.47 +
   29.48 +4. HyperVisor is mapped with 2 sets of RIDs during runtime, its own RIDs and the active Domain RIDs
   29.49 +   a. Domain RIDs are used by processor to access guest_VHPT during Domain runtime
   29.50 +   b. Hypervisor RIDs are used when Hypervisor is running
   29.51 +   c. Implies there are some Region registers transition on entering/exiting hypervisor
   29.52 +
   29.53 +5. Linux styled pt_regs with minor modification for VT and instruction emulation
   29.54 +   a. Part of Domain registers are saved/restored from VPD
   29.55 +   b. Extended pt_regs to include r4~r7 and Domain's iipa & isr for possible instruction emulation, so no need to save a complete set of switch_stack on IVT entry
   29.56 +
   29.57 +6. Linux styled per virtual processor memory/RSE stacks, which is the same as non-VT domain0
   29.58 +
   29.59 +7. Handles splitted I/DCache design
   29.60 +   Newer IPF processors has split I/Dcaches.  The design takes this into consideration when Xen recopy Domain0 to target address for execution
   29.61 +
   29.62 +
    30.1 --- a/xen/arch/ia64/tools/mkbuildtree	Thu May 19 21:22:49 2005 +0000
    30.2 +++ b/xen/arch/ia64/tools/mkbuildtree	Fri May 20 17:23:51 2005 +0000
    30.3 @@ -107,7 +107,7 @@ cp_patch arch/ia64/mm/tlb.c arch/ia64/tl
    30.4  #cp_patch arch/ia64/hp/sim/hpsim_irq.c arch/ia64/hpsim_irq.c hpsim_irq.c
    30.5  
    30.6  softlink arch/ia64/kernel/efi_stub.S arch/ia64/efi_stub.S
    30.7 -softlink arch/ia64/kernel/entry.h arch/ia64/entry.h
    30.8 +cp_patch arch/ia64/kernel/entry.h arch/ia64/entry.h entry.h
    30.9  softlink arch/ia64/kernel/ia64_ksyms.c arch/ia64/ia64_ksyms.c
   30.10  softlink arch/ia64/kernel/irq_lsapic.c arch/ia64/irq_lsapic.c
   30.11  softlink arch/ia64/kernel/machvec.c arch/ia64/machvec.c
   30.12 @@ -172,8 +172,8 @@ cp_patch arch/ia64/hp/sim/hpsim_ssc.h in
   30.13  
   30.14  #cp_patch include/asm-ia64/current.h include/asm-ia64/current.h current.h
   30.15  softlink include/asm-ia64/current.h include/asm-ia64/current.h
   30.16 -#cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
   30.17 -softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h
   30.18 +cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
   30.19 +#softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h
   30.20  #cp_patch include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h hardirq.h
   30.21  softlink include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h
   30.22  #cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h
   30.23 @@ -217,7 +217,7 @@ softlink include/asm-ia64/errno.h includ
   30.24  softlink include/asm-ia64/fpu.h include/asm-ia64/fpu.h
   30.25  softlink include/asm-ia64/hdreg.h include/asm-ia64/hdreg.h
   30.26  #softlink include/asm-ia64/ia32.h include/asm-ia64/ia32.h
   30.27 -softlink include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h
   30.28 +cp_patch include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h ia64regs.h
   30.29  softlink include/asm-ia64/intrinsics.h include/asm-ia64/intrinsics.h
   30.30  softlink include/asm-ia64/ioctl.h include/asm-ia64/ioctl.h
   30.31  softlink include/asm-ia64/linkage.h include/asm-ia64/linkage.h
   30.32 @@ -229,7 +229,7 @@ softlink include/asm-ia64/mca.h include/
   30.33  softlink include/asm-ia64/meminit.h include/asm-ia64/meminit.h
   30.34  softlink include/asm-ia64/mman.h include/asm-ia64/mman.h
   30.35  softlink include/asm-ia64/numa.h include/asm-ia64/numa.h
   30.36 -softlink include/asm-ia64/pal.h include/asm-ia64/pal.h
   30.37 +cp_patch include/asm-ia64/pal.h include/asm-ia64/pal.h pal.h
   30.38  softlink include/asm-ia64/param.h include/asm-ia64/param.h
   30.39  softlink include/asm-ia64/patch.h include/asm-ia64/patch.h
   30.40  softlink include/asm-ia64/pci.h include/asm-ia64/pci.h
   30.41 @@ -237,7 +237,7 @@ softlink include/asm-ia64/percpu.h inclu
   30.42  #softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
   30.43  cp_patch include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h pgalloc.h
   30.44  softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h
   30.45 -softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h
   30.46 +cp_patch include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h ptrace.h
   30.47  softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h
   30.48  softlink include/asm-ia64/rse.h include/asm-ia64/rse.h
   30.49  softlink include/asm-ia64/rwsem.h include/asm-ia64/rwsem.h
    31.1 --- a/xen/arch/ia64/vcpu.c	Thu May 19 21:22:49 2005 +0000
    31.2 +++ b/xen/arch/ia64/vcpu.c	Fri May 20 17:23:51 2005 +0000
    31.3 @@ -14,6 +14,9 @@
    31.4  #include <asm/tlb.h>
    31.5  #include <asm/processor.h>
    31.6  #include <asm/delay.h>
    31.7 +#ifdef CONFIG_VTI
    31.8 +#include <asm/vmx_vcpu.h>
    31.9 +#endif // CONFIG_VTI
   31.10  
   31.11  typedef	union {
   31.12  	struct ia64_psr ia64_psr;
   31.13 @@ -523,12 +526,19 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   31.14  		printf("vcpu_pend_interrupt: bad vector\n");
   31.15  		return;
   31.16  	}
   31.17 +#ifdef CONFIG_VTI
   31.18 +    if ( VMX_DOMAIN(vcpu) ) {
   31.19 + 	    set_bit(vector,VPD_CR(vcpu,irr));
   31.20 +    } else
   31.21 +#endif // CONFIG_VTI
   31.22 +    {
   31.23  	if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
   31.24  	if (test_bit(vector,PSCBX(vcpu,irr))) {
   31.25  //printf("vcpu_pend_interrupt: overrun\n");
   31.26  	}
   31.27  	set_bit(vector,PSCBX(vcpu,irr));
   31.28  	PSCB(vcpu,pending_interruption) = 1;
   31.29 +    }
   31.30  }
   31.31  
   31.32  void early_tick(VCPU *vcpu)
   31.33 @@ -619,7 +629,8 @@ extern unsigned long privop_trace;
   31.34  //privop_trace=1;
   31.35  	//TODO: Implement this
   31.36  	printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
   31.37 -	*pval = 0;
   31.38 +	//*pval = 0;
   31.39 +	*pval = ia64_getreg(_IA64_REG_CR_LID);
   31.40  	return IA64_NO_FAULT;
   31.41  }
   31.42  
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/xen/arch/ia64/vlsapic.c	Fri May 20 17:23:51 2005 +0000
    32.3 @@ -0,0 +1,504 @@
    32.4 +
    32.5 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    32.6 +/*
    32.7 + * vlsapic.c: virtual lsapic model including ITC timer.
    32.8 + * Copyright (c) 2005, Intel Corporation.
    32.9 + *
   32.10 + * This program is free software; you can redistribute it and/or modify it
   32.11 + * under the terms and conditions of the GNU General Public License,
   32.12 + * version 2, as published by the Free Software Foundation.
   32.13 + *
   32.14 + * This program is distributed in the hope it will be useful, but WITHOUT
   32.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   32.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   32.17 + * more details.
   32.18 + *
   32.19 + * You should have received a copy of the GNU General Public License along with
   32.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   32.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   32.22 + *
   32.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   32.24 + */
   32.25 +
   32.26 +#include <linux/sched.h>
   32.27 +#include <public/arch-ia64.h>
   32.28 +#include <asm/ia64_int.h>
   32.29 +#include <asm/vcpu.h>
   32.30 +#include <asm/regionreg.h>
   32.31 +#include <asm/tlb.h>
   32.32 +#include <asm/processor.h>
   32.33 +#include <asm/delay.h>
   32.34 +#include <asm/vmx_vcpu.h>
   32.35 +#include <asm/vmx_vcpu.h>
   32.36 +#include <asm/regs.h>
   32.37 +#include <asm/gcc_intrin.h>
   32.38 +#include <asm/vmx_mm_def.h>
   32.39 +#include <asm/vmx.h>
   32.40 +#include <asm/hw_irq.h>
   32.41 +#include <asm/vmx_pal_vsa.h>
   32.42 +#include <asm/kregs.h>
   32.43 +
   32.44 +//u64  fire_itc;
   32.45 +//u64  fire_itc2;
   32.46 +//u64  fire_itm;
   32.47 +//u64  fire_itm2;
   32.48 +/*
   32.49 + * Update the checked last_itc.
   32.50 + */
   32.51 +static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
   32.52 +{
   32.53 +    vtm->last_itc = cur_itc;
   32.54 +}
   32.55 +
   32.56 +/*
   32.57 + * ITC value saw in guest (host+offset+drift).
   32.58 + */
   32.59 +static uint64_t now_itc(vtime_t *vtm)
   32.60 +{
   32.61 +        uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc();
   32.62 +        
   32.63 +        if ( vtm->vtm_local_drift ) {
   32.64 +//          guest_itc -= vtm->vtm_local_drift;
   32.65 +        }       
   32.66 +        if ( (long)(guest_itc - vtm->last_itc) > 0 ) {
   32.67 +            return guest_itc;
   32.68 +
   32.69 +        }
   32.70 +        else {
   32.71 +            /* guest ITC backwarded due after LP switch */
   32.72 +            return vtm->last_itc;
   32.73 +        }
   32.74 +}
   32.75 +
   32.76 +/*
   32.77 + * Interval time components reset.
   32.78 + */
   32.79 +static void vtm_reset(VCPU *vcpu)
   32.80 +{
   32.81 +    uint64_t    cur_itc;
   32.82 +    vtime_t     *vtm;
   32.83 +    
   32.84 +    vtm=&(vcpu->arch.arch_vmx.vtm);
   32.85 +    vtm->vtm_offset = 0;
   32.86 +    vtm->vtm_local_drift = 0;
   32.87 +    VPD_CR(vcpu, itm) = 0;
   32.88 +    VPD_CR(vcpu, itv) = 0x10000;
   32.89 +    cur_itc = ia64_get_itc();
   32.90 +    vtm->last_itc = vtm->vtm_offset + cur_itc;
   32.91 +}
   32.92 +
   32.93 +/* callback function when vtm_timer expires */
   32.94 +static void vtm_timer_fn(unsigned long data)
   32.95 +{
   32.96 +    vtime_t *vtm;
   32.97 +    VCPU    *vcpu = (VCPU*)data;
   32.98 +    u64	    cur_itc,vitm;
   32.99 +
  32.100 +    UINT64  vec;
  32.101 +    
  32.102 +    vec = VPD_CR(vcpu, itv) & 0xff;
  32.103 +    vmx_vcpu_pend_interrupt(vcpu, vec);
  32.104 +
  32.105 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.106 +    cur_itc = now_itc(vtm);
  32.107 +    vitm =VPD_CR(vcpu, itm);
  32.108 + //fire_itc2 = cur_itc;
  32.109 + //fire_itm2 = vitm;
  32.110 +    update_last_itc(vtm,cur_itc);  // pseudo read to update vITC
  32.111 +    vtm->timer_hooked = 0;
  32.112 +}
  32.113 +
  32.114 +void vtm_init(VCPU *vcpu)
  32.115 +{
  32.116 +    vtime_t     *vtm;
  32.117 +    uint64_t    itc_freq;
  32.118 +    
  32.119 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.120 +
  32.121 +    itc_freq = local_cpu_data->itc_freq;
  32.122 +    vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
  32.123 +    vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
  32.124 +    /* set up the actimer */
  32.125 +    init_ac_timer(&(vtm->vtm_timer));
  32.126 +    vtm->timer_hooked = 0;
  32.127 +    vtm->vtm_timer.cpu = 0;     /* Init value for SMP case */
  32.128 +    vtm->vtm_timer.data = (unsigned long)vcpu;
  32.129 +    vtm->vtm_timer.function = vtm_timer_fn;
  32.130 +    vtm_reset(vcpu);
  32.131 +}
  32.132 +
  32.133 +/*
  32.134 + * Action when guest read ITC.
  32.135 + */
  32.136 +uint64_t vtm_get_itc(VCPU *vcpu)
  32.137 +{
  32.138 +    uint64_t    guest_itc, spsr;
  32.139 +    vtime_t    *vtm;
  32.140 +
  32.141 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.142 +    // FIXME: should use local_irq_disable & local_irq_enable ??
  32.143 +    local_irq_save(spsr);
  32.144 +    guest_itc = now_itc(vtm);
  32.145 +    update_last_itc(vtm, guest_itc);
  32.146 +
  32.147 +    local_irq_restore(spsr);
  32.148 +    return guest_itc;
  32.149 +}
  32.150 +
  32.151 +void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
  32.152 +{
  32.153 +    uint64_t    spsr;
  32.154 +    vtime_t     *vtm;
  32.155 +
  32.156 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.157 +    local_irq_save(spsr);
  32.158 +    vtm->vtm_offset = new_itc - ia64_get_itc();
  32.159 +    vtm->last_itc = new_itc;
  32.160 +    vtm_interruption_update(vcpu, vtm);
  32.161 +    local_irq_restore(spsr);
  32.162 +}
  32.163 +
  32.164 +void vtm_set_itv(VCPU *vcpu)
  32.165 +{
  32.166 +    uint64_t    spsr,itv;
  32.167 +    vtime_t     *vtm;
  32.168 +
  32.169 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.170 +    local_irq_save(spsr);
  32.171 +    itv = VPD_CR(vcpu, itv);
  32.172 +    if ( ITV_IRQ_MASK(itv) && vtm->timer_hooked ) {
  32.173 +        rem_ac_timer(&(vtm->vtm_timer));
  32.174 +        vtm->timer_hooked = 0;
  32.175 +    }
  32.176 +    vtm_interruption_update(vcpu, vtm);
  32.177 +    local_irq_restore(spsr);
  32.178 +}
  32.179 +
  32.180 +
  32.181 +/*
  32.182 + * Update interrupt or hook the vtm ac_timer for fire 
  32.183 + * At this point vtm_timer should be removed if itv is masked.
  32.184 + */
  32.185 +/* Interrupt must be disabled at this point */
  32.186 +
  32.187 +extern u64 tick_to_ns(u64 tick);
  32.188 +#define TIMER_SLOP (50*1000) /* ns */	/* copy from ac_timer.c */
  32.189 +void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
  32.190 +{
  32.191 +    uint64_t    cur_itc,vitm,vitv;
  32.192 +    uint64_t    expires;
  32.193 +    long     	diff_now, diff_last;
  32.194 +    uint64_t    spsr;
  32.195 +    
  32.196 +    vitv = VPD_CR(vcpu, itv);
  32.197 +    if ( ITV_IRQ_MASK(vitv) ) {
  32.198 +        return;
  32.199 +    }
  32.200 +    
  32.201 +    vitm =VPD_CR(vcpu, itm);
  32.202 +    local_irq_save(spsr);
  32.203 +    cur_itc =now_itc(vtm);
  32.204 +    diff_last = vtm->last_itc - vitm;
  32.205 +    diff_now = cur_itc - vitm;
  32.206 +    update_last_itc (vtm,cur_itc);
  32.207 +    
  32.208 +    if ( diff_last >= 0 ) {
  32.209 +        // interrupt already fired.
  32.210 +        if ( vtm->timer_hooked ) {
  32.211 +            rem_ac_timer(&(vtm->vtm_timer));
  32.212 +            vtm->timer_hooked = 0;          
  32.213 +        }
  32.214 +    }
  32.215 +    else if ( diff_now >= 0 ) {
  32.216 +        // ITV is fired.
  32.217 +        vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
  32.218 +    }
  32.219 +    /* Both last_itc & cur_itc < itm, wait for fire condition */
  32.220 +    else if ( vtm->timer_hooked ) {
  32.221 +        expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
  32.222 +        mod_ac_timer (&(vtm->vtm_timer), expires);
  32.223 +	printf("mod vtm_timer\n");
  32.224 +//fire_itc = cur_itc;
  32.225 +//fire_itm = vitm;
  32.226 +    }
  32.227 +    else {
  32.228 +        vtm->vtm_timer.expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
  32.229 +        vtm->vtm_timer.cpu = vcpu->processor;
  32.230 +            add_ac_timer(&(vtm->vtm_timer));
  32.231 +            vtm->timer_hooked = 1;
  32.232 +//fire_itc = cur_itc;
  32.233 +//fire_itm = vitm;
  32.234 +    }
  32.235 +    local_irq_restore(spsr);
  32.236 +}
  32.237 +
  32.238 +/*
  32.239 + * Action for vtm when the domain is scheduled out.
  32.240 + * Remove the ac_timer for vtm.
  32.241 + */
  32.242 +void vtm_domain_out(VCPU *vcpu)
  32.243 +{
  32.244 +    vtime_t     *vtm;
  32.245 +    uint64_t    spsr;
  32.246 +    
  32.247 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.248 +    local_irq_save(spsr);
  32.249 +    if ( vtm->timer_hooked ) {
  32.250 +        rem_ac_timer(&(vtm->vtm_timer));
  32.251 +        vtm->timer_hooked = 0;
  32.252 +    }
  32.253 +    local_irq_restore(spsr);
  32.254 +}
  32.255 +
  32.256 +/*
  32.257 + * Action for vtm when the domain is scheduled in.
  32.258 + * Fire vtm IRQ or add the ac_timer for vtm.
  32.259 + */
  32.260 +void vtm_domain_in(VCPU *vcpu)
  32.261 +{
  32.262 +    vtime_t     *vtm;
  32.263 +    
  32.264 +    vtm=&(vcpu->arch.arch_vmx.vtm);
  32.265 +    vtm_interruption_update(vcpu, vtm);
  32.266 +}
  32.267 +
  32.268 +
  32.269 +
  32.270 +/*
  32.271 + * Next for vLSapic
  32.272 + */
  32.273 +
  32.274 +#define  NMI_VECTOR         2
  32.275 +#define  ExtINT_VECTOR      0
  32.276 +
  32.277 +#define  VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i])
  32.278 +/*
  32.279 + * LID-CR64: Keep in vpd.
  32.280 + * IVR-CR65: (RO) see guest_read_ivr().
  32.281 + * TPR-CR66: Keep in vpd, acceleration enabled.
  32.282 + * EOI-CR67: see guest_write_eoi().
  32.283 + * IRR0-3 - CR68-71: (RO) Keep in vpd irq_pending[]
  32.284 + *          can move to vpd for optimization.
  32.285 + * ITV: in time virtualization.
  32.286 + * PMV: Keep in vpd initialized as 0x10000.
  32.287 + * CMCV: Keep in vpd initialized as 0x10000.
  32.288 + * LRR0-1: Keep in vpd, initialized as 0x10000.
  32.289 + *
  32.290 + */
  32.291 +
  32.292 +void vlsapic_reset(VCPU *vcpu)
  32.293 +{
  32.294 +    int     i;
  32.295 +    VPD_CR(vcpu, lid) = 0;
  32.296 +    VPD_CR(vcpu, ivr) = 0;
  32.297 +    VPD_CR(vcpu,tpr) = 0x10000;
  32.298 +    VPD_CR(vcpu, eoi) = 0;
  32.299 +    VPD_CR(vcpu, irr[0]) = 0;
  32.300 +    VPD_CR(vcpu, irr[1]) = 0;
  32.301 +    VPD_CR(vcpu, irr[2]) = 0;
  32.302 +    VPD_CR(vcpu, irr[3]) = 0;
  32.303 +    VPD_CR(vcpu, pmv) = 0x10000;
  32.304 +    VPD_CR(vcpu, cmcv) = 0x10000;
  32.305 +    VPD_CR(vcpu, lrr0) = 0x10000;   // default reset value?
  32.306 +    VPD_CR(vcpu, lrr1) = 0x10000;   // default reset value?
  32.307 +    for ( i=0; i<4; i++) {
  32.308 +        VLSAPIC_INSVC(vcpu,i) = 0;
  32.309 +    }
  32.310 +}
  32.311 +
  32.312 +/*
  32.313 + *  Find highest signaled bits in 4 words (long). 
  32.314 + *
  32.315 + *  return 0-255: highest bits.
  32.316 + *          -1 : Not found.
  32.317 + */
  32.318 +static __inline__ int highest_bits(uint64_t *dat)
  32.319 +{
  32.320 +    uint64_t  bits, bitnum=-1;
  32.321 +    int i;
  32.322 +    
  32.323 +    /* loop for all 256 bits */
  32.324 +    for ( i=3; i >= 0 ; i -- ) {
  32.325 +        bits = dat[i];
  32.326 +        if ( bits ) {
  32.327 +            bitnum = ia64_fls(bits);
  32.328 +            return i*64+bitnum;
  32.329 +        }
  32.330 +    }
  32.331 +   return -1;
  32.332 +}
  32.333 +
  32.334 +/*
  32.335 + * Return 0-255 for pending irq.
  32.336 + *        -1 when no pending.
  32.337 + */
  32.338 +static int highest_pending_irq(VCPU *vcpu)
  32.339 +{
  32.340 +    if ( VPD_CR(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
  32.341 +    if ( VPD_CR(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
  32.342 +    return highest_bits(&VPD_CR(vcpu, irr[0]));
  32.343 +}
  32.344 +
  32.345 +static int highest_inservice_irq(VCPU *vcpu)
  32.346 +{
  32.347 +    if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
  32.348 +    if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
  32.349 +    return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
  32.350 +}
  32.351 +
  32.352 +/*
  32.353 + * The pending irq is higher than the inservice one.
  32.354 + *
  32.355 + */
  32.356 +static int is_higher_irq(int pending, int inservice)
  32.357 +{
  32.358 +    return ( (pending >> 4) > (inservice>>4) || 
  32.359 +                ((pending != -1) && (inservice == -1)) );
  32.360 +}
  32.361 +
  32.362 +static int is_higher_class(int pending, int mic)
  32.363 +{
  32.364 +    return ( (pending >> 4) > mic );
  32.365 +}
  32.366 +
  32.367 +static int is_invalid_irq(int vec)
  32.368 +{
  32.369 +    return (vec == 1 || ((vec <= 14 && vec >= 3)));
  32.370 +}
  32.371 +
  32.372 +/* See Table 5-8 in SDM vol2 for the definition */
  32.373 +static int
  32.374 +irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
  32.375 +{
  32.376 +    uint64_t    vtpr;
  32.377 +    
  32.378 +    vtpr = VPD_CR(vcpu, tpr);
  32.379 +
  32.380 +    if ( h_pending == NMI_VECTOR && h_inservice != NMI_VECTOR )
  32.381 +        // Non Maskable Interrupt
  32.382 +        return 0;
  32.383 +
  32.384 +    if ( h_pending == ExtINT_VECTOR && h_inservice >= 16)
  32.385 +        return (vtpr>>16)&1;    // vtpr.mmi
  32.386 +
  32.387 +    if ( !(vtpr&(1UL<<16)) &&
  32.388 +          is_higher_irq(h_pending, h_inservice) &&
  32.389 +          is_higher_class(h_pending, (vtpr>>4)&0xf) )
  32.390 +        return 0;
  32.391 +
  32.392 +    return 1;
  32.393 +}
  32.394 +
  32.395 +void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
  32.396 +{
  32.397 +    uint64_t    spsr;
  32.398 +
  32.399 +    if (vector & ~0xff) {
  32.400 +        printf("vmx_vcpu_pend_interrupt: bad vector\n");
  32.401 +        return;
  32.402 +    }
  32.403 +    local_irq_save(spsr);
  32.404 +    VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
  32.405 +    local_irq_restore(spsr);
  32.406 +}
  32.407 +
  32.408 +/*
  32.409 + * If the new pending interrupt is enabled and not masked, we directly inject 
  32.410 + * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when 
  32.411 + * the interrupt becomes unmasked, it gets injected.
  32.412 + * RETURN:
  32.413 + *  TRUE:   Interrupt is injected.
  32.414 + *  FALSE:  Not injected but may be in VHPI when vac.a_int=1
  32.415 + *
  32.416 + * Optimization: We defer setting the VHPI until the EOI time, if a higher 
  32.417 + *               priority interrupt is in-service. The idea is to reduce the 
  32.418 + *               number of unnecessary calls to inject_vhpi.
  32.419 + */
  32.420 +int vmx_check_pending_irq(VCPU *vcpu)
  32.421 +{
  32.422 +    uint64_t  spsr;
  32.423 +    int     h_pending, h_inservice;
  32.424 +    int injected=0;
  32.425 +    uint64_t    isr;
  32.426 +    IA64_PSR    vpsr;
  32.427 +
  32.428 +    local_irq_save(spsr);
  32.429 +    h_pending = highest_pending_irq(vcpu);
  32.430 +    if ( h_pending == -1 ) goto chk_irq_exit;
  32.431 +    h_inservice = highest_inservice_irq(vcpu);
  32.432 +
  32.433 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  32.434 +    if (  vpsr.i &&
  32.435 +        !irq_masked(vcpu, h_pending, h_inservice) ) {
  32.436 +        //inject_guest_irq(v);
  32.437 +        isr = vpsr.val & IA64_PSR_RI;
  32.438 +        if ( !vpsr.ic )
  32.439 +            panic("Interrupt when IC=0\n");
  32.440 +        vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
  32.441 +        injected = 1;
  32.442 +    }
  32.443 +    else if ( VMX_VPD(vcpu,vac).a_int && 
  32.444 +            is_higher_irq(h_pending,h_inservice) ) {
  32.445 +        vmx_inject_vhpi(vcpu,h_pending);
  32.446 +    }
  32.447 +
  32.448 +chk_irq_exit:
  32.449 +    local_irq_restore(spsr);
  32.450 +    return injected;
  32.451 +}
  32.452 +
  32.453 +void guest_write_eoi(VCPU *vcpu)
  32.454 +{
  32.455 +    int vec;
  32.456 +    uint64_t  spsr;
  32.457 +
  32.458 +    vec = highest_inservice_irq(vcpu);
  32.459 +    if ( vec < 0 ) panic("Wrong vector to EOI\n");
  32.460 +    local_irq_save(spsr);
  32.461 +    VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
  32.462 +    local_irq_restore(spsr);
  32.463 +    VPD_CR(vcpu, eoi)=0;    // overwrite the data
  32.464 +}
  32.465 +
  32.466 +uint64_t guest_read_vivr(VCPU *vcpu)
  32.467 +{
  32.468 +    int vec, next, h_inservice;
  32.469 +    uint64_t  spsr;
  32.470 +
  32.471 +    local_irq_save(spsr);
  32.472 +    vec = highest_pending_irq(vcpu);
  32.473 +    h_inservice = highest_inservice_irq(vcpu);
  32.474 +    if ( vec < 0 || irq_masked(vcpu, vec, h_inservice) ) {
  32.475 +        local_irq_restore(spsr);
  32.476 +        return IA64_SPURIOUS_INT_VECTOR;
  32.477 +    }
  32.478 + 
  32.479 +    VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
  32.480 +    VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
  32.481 +
  32.482 +    h_inservice = highest_inservice_irq(vcpu);
  32.483 +    next = highest_pending_irq(vcpu);
  32.484 +    if ( VMX_VPD(vcpu,vac).a_int &&
  32.485 +        (is_higher_irq(next, h_inservice) || (next == -1)) )
  32.486 +        vmx_inject_vhpi(vcpu, next);
  32.487 +    local_irq_restore(spsr);
  32.488 +    return (uint64_t)vec;
  32.489 +}
  32.490 +
  32.491 +void vmx_inject_vhpi(VCPU *vcpu, u8 vec)
  32.492 +{
  32.493 +        VMX_VPD(vcpu,vhpi) = vec / 16;
  32.494 +
  32.495 +
  32.496 +        // non-maskable
  32.497 +        if ( vec == NMI_VECTOR ) // NMI
  32.498 +                VMX_VPD(vcpu,vhpi) = 32;
  32.499 +        else if (vec == ExtINT_VECTOR) //ExtINT
  32.500 +                VMX_VPD(vcpu,vhpi) = 16;
  32.501 +        else if (vec == -1)
  32.502 +                VMX_VPD(vcpu,vhpi) = 0; /* Nothing pending */
  32.503 +
  32.504 +        ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 
  32.505 +            (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
  32.506 +}
  32.507 +
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/xen/arch/ia64/vmmu.c	Fri May 20 17:23:51 2005 +0000
    33.3 @@ -0,0 +1,801 @@
    33.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    33.5 +/*
    33.6 + * vmmu.c: virtual memory management unit components.
    33.7 + * Copyright (c) 2005, Intel Corporation.
    33.8 + *
    33.9 + * This program is free software; you can redistribute it and/or modify it
   33.10 + * under the terms and conditions of the GNU General Public License,
   33.11 + * version 2, as published by the Free Software Foundation.
   33.12 + *
   33.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   33.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   33.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   33.16 + * more details.
   33.17 + *
   33.18 + * You should have received a copy of the GNU General Public License along with
   33.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   33.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   33.21 + *
   33.22 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   33.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   33.24 + */
   33.25 +#include <linux/sched.h>
   33.26 +#include <asm/tlb.h>
   33.27 +#include <asm/gcc_intrin.h>
   33.28 +#include <asm/vcpu.h>
   33.29 +#include <xen/interrupt.h>
   33.30 +#include <asm/vmx_vcpu.h>
   33.31 +#include <asm/vmx_mm_def.h>
   33.32 +#include <asm/vmx.h>
   33.33 +#include <asm/hw_irq.h>
   33.34 +#include <asm/vmx_pal_vsa.h>
   33.35 +#include <asm/kregs.h>
   33.36 +
   33.37 +/*
   33.38 + * Architecture ppn is in 4KB unit while XEN
   33.39 + * page may be different(1<<PAGE_SHIFT).
   33.40 + */
   33.41 +static inline u64 arch_ppn_to_xen_ppn(u64 appn)
   33.42 +{
   33.43 +    return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
   33.44 +}
   33.45 +
   33.46 +static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
   33.47 +{
   33.48 +    return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
   33.49 +}
   33.50 +
   33.51 +
   33.52 +/*
   33.53 + * Get the machine page frame number in 16KB unit
   33.54 + * Input:
   33.55 + *  d: 
   33.56 + */
   33.57 +u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
   33.58 +{
   33.59 +    struct domain *d;
   33.60 +    u64    i, xen_gppn, xen_mppn, mpfn;
   33.61 +    
   33.62 +    if ( domid == DOMID_SELF ) {
   33.63 +        d = current->domain;
   33.64 +    }
   33.65 +    else {
   33.66 +        d = find_domain_by_id(domid);
   33.67 +    }
   33.68 +    xen_gppn = arch_ppn_to_xen_ppn(gpfn);
   33.69 +    xen_mppn = __gpfn_to_mfn(d, xen_gppn);
   33.70 +/*
   33.71 +    for (i=0; i<pages; i++) {
   33.72 +        if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
   33.73 +            return INVALID_MFN;
   33.74 +        }
   33.75 +    }
   33.76 +*/
   33.77 +    mpfn= xen_ppn_to_arch_ppn(xen_mppn);
   33.78 +    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
   33.79 +    return mpfn;
   33.80 +    
   33.81 +}
   33.82 +
   33.83 +/*
   33.84 + * The VRN bits of va stand for which rr to get.
   33.85 + */
   33.86 +rr_t vmmu_get_rr(VCPU *vcpu, u64 va)
   33.87 +{
   33.88 +    rr_t   vrr;
   33.89 +    vmx_vcpu_get_rr(vcpu, va, &vrr.value);
   33.90 +    return vrr;
   33.91 +}
   33.92 +
   33.93 +
   33.94 +void recycle_message(thash_cb_t *hcb, u64 para)
   33.95 +{
   33.96 +    printk("hcb=%p recycled with %lx\n",hcb,para);
   33.97 +}
   33.98 +
   33.99 +
  33.100 +/*
  33.101 + * Purge all guest TCs in logical processor.
  33.102 + * Instead of purging all LP TCs, we should only purge   
  33.103 + * TCs that belong to this guest.
  33.104 + */
  33.105 +void
  33.106 +purge_machine_tc_by_domid(domid_t domid)
  33.107 +{
  33.108 +#ifndef PURGE_GUEST_TC_ONLY
  33.109 +    // purge all TCs
  33.110 +    struct ia64_pal_retval  result;
  33.111 +    u64 addr;
  33.112 +    u32 count1,count2;
  33.113 +    u32 stride1,stride2;
  33.114 +    u32 i,j;
  33.115 +    u64 psr;
  33.116 +    
  33.117 +
  33.118 +    result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
  33.119 +    if ( result.status != 0 ) {
  33.120 +        panic ("PAL_PTCE_INFO failed\n");
  33.121 +    }
  33.122 +    addr = result.v0;
  33.123 +    count1 = HIGH_32BITS(result.v1);
  33.124 +    count2 = LOW_32BITS (result.v1);
  33.125 +    stride1 = HIGH_32BITS(result.v2);
  33.126 +    stride2 = LOW_32BITS (result.v2);
  33.127 +    
  33.128 +    local_irq_save(psr);
  33.129 +    for (i=0; i<count1; i++) {
  33.130 +        for (j=0; j<count2; j++) {
  33.131 +            ia64_ptce(addr);
  33.132 +            addr += stride2;
  33.133 +        }
  33.134 +        addr += stride1;
  33.135 +    }
  33.136 +    local_irq_restore(psr);
  33.137 +#else
  33.138 +    // purge all TCs belong to this guest.
  33.139 +#endif
  33.140 +}
  33.141 +
  33.142 +static thash_cb_t *init_domain_vhpt(struct exec_domain *d)
  33.143 +{
  33.144 +    struct pfn_info *page;
  33.145 +    void   *vbase,*vcur;
  33.146 +    vhpt_special *vs;
  33.147 +    thash_cb_t  *vhpt;
  33.148 +    PTA pta_value;
  33.149 +    
  33.150 +    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
  33.151 +    if ( page == NULL ) {
  33.152 +        panic("No enough contiguous memory for init_domain_mm\n");
  33.153 +    }
  33.154 +    vbase = page_to_virt(page);
  33.155 +    printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
  33.156 +    memset(vbase, 0, VCPU_TLB_SIZE);
  33.157 +    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
  33.158 +    vhpt = --((thash_cb_t*)vcur);
  33.159 +    vhpt->ht = THASH_VHPT;
  33.160 +    vhpt->vcpu = d;
  33.161 +    vhpt->hash_func = machine_thash;
  33.162 +    vs = --((vhpt_special *)vcur);
  33.163 +
  33.164 +    /* Setup guest pta */
  33.165 +    pta_value.val = 0;
  33.166 +    pta_value.ve = 1;
  33.167 +    pta_value.vf = 1;
  33.168 +    pta_value.size = VCPU_TLB_SHIFT - 1;    /* 2M */
  33.169 +    pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
  33.170 +    d->arch.arch_vmx.mpta = pta_value.val;
  33.171 +   
  33.172 +    vhpt->vs = vs;
  33.173 +    vhpt->vs->get_mfn = get_mfn;
  33.174 +    vhpt->vs->tag_func = machine_ttag;
  33.175 +    vhpt->hash = vbase;
  33.176 +    vhpt->hash_sz = VCPU_TLB_SIZE/2;
  33.177 +    vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
  33.178 +    vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
  33.179 +    vhpt->recycle_notifier = recycle_message;
  33.180 +    thash_init(vhpt,VCPU_TLB_SHIFT-1);
  33.181 +    return vhpt;
  33.182 +}
  33.183 +
  33.184 +
  33.185 +thash_cb_t *init_domain_tlb(struct exec_domain *d)
  33.186 +{
  33.187 +    struct pfn_info *page;
  33.188 +    void    *vbase,*vcur;
  33.189 +    tlb_special_t  *ts;
  33.190 +    thash_cb_t  *tlb;
  33.191 +    
  33.192 +    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
  33.193 +    if ( page == NULL ) {
  33.194 +        panic("No enough contiguous memory for init_domain_mm\n");
  33.195 +    }
  33.196 +    vbase = page_to_virt(page);
  33.197 +    printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
  33.198 +    memset(vbase, 0, VCPU_TLB_SIZE);
  33.199 +    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
  33.200 +    tlb = --((thash_cb_t*)vcur);
  33.201 +    tlb->ht = THASH_TLB;
  33.202 +    tlb->vcpu = d;
  33.203 +    ts = --((tlb_special_t *)vcur);
  33.204 +    tlb->ts = ts;
  33.205 +    tlb->ts->vhpt = init_domain_vhpt(d);
  33.206 +    tlb->hash_func = machine_thash;
  33.207 +    tlb->hash = vbase;
  33.208 +    tlb->hash_sz = VCPU_TLB_SIZE/2;
  33.209 +    tlb->cch_buf = (u64)vbase + tlb->hash_sz;
  33.210 +    tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
  33.211 +    tlb->recycle_notifier = recycle_message;
  33.212 +    thash_init(tlb,VCPU_TLB_SHIFT-1);
  33.213 +    return tlb;
  33.214 +}
  33.215 +
  33.216 +/* Allocate physical to machine mapping table for domN
  33.217 + * FIXME: Later this interface may be removed, if that table is provided
  33.218 + * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
  33.219 + * this interface at all.
  33.220 + */
  33.221 +void
  33.222 +alloc_pmt(struct domain *d)
  33.223 +{
  33.224 +    struct pfn_info *page;
  33.225 +
  33.226 +    /* Only called once */
  33.227 +    ASSERT(d->arch.pmt);
  33.228 +
  33.229 +    page = alloc_domheap_pages(NULL, get_order(d->max_pages));
  33.230 +    ASSERT(page);
  33.231 +
  33.232 +    d->arch.pmt = page_to_virt(page);
  33.233 +    memset(d->arch.pmt, 0x55, d->max_pages * 8);
  33.234 +}
  33.235 +
  33.236 +/*
  33.237 + * Insert guest TLB to machine TLB.
  33.238 + *  data:   In TLB format
  33.239 + */
  33.240 +void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb)
  33.241 +{
  33.242 +    u64     saved_itir, saved_ifa, saved_rr;
  33.243 +    u64     pages;
  33.244 +    thash_data_t    mtlb;
  33.245 +    rr_t    vrr;
  33.246 +    unsigned int    cl = tlb->cl;
  33.247 +
  33.248 +    mtlb.ifa = tlb->vadr;
  33.249 +    mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
  33.250 +    vrr = vmmu_get_rr(d,mtlb.ifa);
  33.251 +    //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
  33.252 +    pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
  33.253 +    mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
  33.254 +    mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages);
  33.255 +    if (mtlb.ppn == INVALID_MFN)
  33.256 +    panic("Machine tlb insert with invalid mfn number.\n");
  33.257 +
  33.258 +    __asm __volatile("rsm   psr.ic|psr.i;; srlz.i" );
  33.259 +    
  33.260 +    saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
  33.261 +    saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
  33.262 +    saved_rr = ia64_get_rr(mtlb.ifa);
  33.263 +
  33.264 +    ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
  33.265 +    ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
  33.266 +    /* Only access memory stack which is mapped by TR,
  33.267 +     * after rr is switched.
  33.268 +     */
  33.269 +    ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value));
  33.270 +    ia64_srlz_d();
  33.271 +    if ( cl == ISIDE_TLB ) {
  33.272 +        ia64_itci(mtlb.page_flags);
  33.273 +    ia64_srlz_i();
  33.274 +    }
  33.275 +    else {
  33.276 +        ia64_itcd(mtlb.page_flags);
  33.277 +    ia64_srlz_d();
  33.278 +    }
  33.279 +    ia64_set_rr(mtlb.ifa,saved_rr);
  33.280 +    ia64_srlz_d();
  33.281 +    ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
  33.282 +    ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
  33.283 +    __asm __volatile("ssm   psr.ic|psr.i;; srlz.i" );
  33.284 +}
  33.285 +
  33.286 +u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
  33.287 +{
  33.288 +    u64     saved_pta, saved_rr0;
  33.289 +    u64     hash_addr, tag;
  33.290 +    unsigned long psr;
  33.291 +    struct exec_domain *ed = current;
  33.292 +    rr_t    vrr;
  33.293 +
  33.294 +    
  33.295 +    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
  33.296 +    saved_rr0 = ia64_get_rr(0);
  33.297 +    vrr.value = saved_rr0;
  33.298 +    vrr.rid = rid;
  33.299 +    vrr.ps = ps;
  33.300 +
  33.301 +    va = (va << 3) >> 3;    // set VRN to 0.
  33.302 +    // TODO: Set to enforce lazy mode
  33.303 +    local_irq_save(psr);
  33.304 +    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
  33.305 +    ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
  33.306 +    ia64_srlz_d();
  33.307 +
  33.308 +    hash_addr = ia64_thash(va);
  33.309 +    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
  33.310 +
  33.311 +    ia64_set_rr(0, saved_rr0);
  33.312 +    ia64_srlz_d();
  33.313 +    local_irq_restore(psr);
  33.314 +    return hash_addr;
  33.315 +}
  33.316 +
  33.317 +u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
  33.318 +{
  33.319 +    u64     saved_pta, saved_rr0;
  33.320 +    u64     hash_addr, tag;
  33.321 +    u64     psr;
  33.322 +    struct exec_domain *ed = current;
  33.323 +    rr_t    vrr;
  33.324 +
  33.325 +    // TODO: Set to enforce lazy mode    
  33.326 +    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
  33.327 +    saved_rr0 = ia64_get_rr(0);
  33.328 +    vrr.value = saved_rr0;
  33.329 +    vrr.rid = rid;
  33.330 +    vrr.ps = ps;
  33.331 +
  33.332 +    va = (va << 3) >> 3;    // set VRN to 0.
  33.333 +    local_irq_save(psr);
  33.334 +    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
  33.335 +    ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
  33.336 +    ia64_srlz_d();
  33.337 +
  33.338 +    tag = ia64_ttag(va);
  33.339 +    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
  33.340 +
  33.341 +    ia64_set_rr(0, saved_rr0);
  33.342 +    ia64_srlz_d();
  33.343 +    local_irq_restore(psr);
  33.344 +    return tag;
  33.345 +}
  33.346 +
  33.347 +/*
  33.348 + *  Purge machine tlb.
  33.349 + *  INPUT
  33.350 + *      rr:     guest rr.
  33.351 + *      va:     only bits 0:60 is valid
  33.352 + *      size:   bits format (1<<size) for the address range to purge.
  33.353 + *
  33.354 + */
  33.355 +void machine_tlb_purge(u64 rid, u64 va, u64 ps)
  33.356 +{
  33.357 +    u64       saved_rr0;
  33.358 +    u64       psr;
  33.359 +    rr_t      vrr;
  33.360 +
  33.361 +    va = (va << 3) >> 3;    // set VRN to 0.
  33.362 +    saved_rr0 = ia64_get_rr(0);
  33.363 +    vrr.value = saved_rr0;
  33.364 +    vrr.rid = rid;
  33.365 +    vrr.ps = ps;
  33.366 +    local_irq_save(psr);
  33.367 +    ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) );
  33.368 +    ia64_srlz_d();
  33.369 +    ia64_ptcl(va, ps << 2);
  33.370 +    ia64_set_rr( 0, saved_rr0 );
  33.371 +    ia64_srlz_d();
  33.372 +    local_irq_restore(psr);
  33.373 +}
  33.374 +
  33.375 +
  33.376 +int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
  33.377 +{
  33.378 +    ia64_rr  vrr;
  33.379 +    PTA   vpta;
  33.380 +    IA64_PSR  vpsr; 
  33.381 +
  33.382 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  33.383 +    vrr = vmx_vcpu_rr(vcpu, vadr);
  33.384 +    vmx_vcpu_get_pta(vcpu,&vpta.val);
  33.385 +
  33.386 +    if ( vrr.ve & vpta.ve ) {
  33.387 +        switch ( ref ) {
  33.388 +        case DATA_REF:
  33.389 +        case NA_REF:
  33.390 +            return vpsr.dt;
  33.391 +        case INST_REF:
  33.392 +            return vpsr.dt && vpsr.it && vpsr.ic;
  33.393 +        case RSE_REF:
  33.394 +            return vpsr.dt && vpsr.rt;
  33.395 +
  33.396 +        }
  33.397 +    }
  33.398 +    return 0;
  33.399 +}
  33.400 +
  33.401 +
  33.402 +int unimplemented_gva(VCPU *vcpu,u64 vadr)
  33.403 +{
  33.404 +    int bit=vcpu->domain->arch.imp_va_msb;
  33.405 +    u64 ladr =(vadr<<3)>>(3+bit);
  33.406 +    if(!ladr||ladr==(1U<<(61-bit))-1){
  33.407 +        return 0;
  33.408 +    }else{
  33.409 +        return 1;
  33.410 +    }
  33.411 +}
  33.412 +
  33.413 +
  33.414 +/*
  33.415 + * Prefetch guest bundle code.
  33.416 + * INPUT:
  33.417 + *  code: buffer pointer to hold the read data.
  33.418 + *  num:  number of dword (8byts) to read.
  33.419 + */
  33.420 +int
  33.421 +fetch_code(VCPU *vcpu, u64 gip, u64 *code)
  33.422 +{
  33.423 +    u64     gpip;   // guest physical IP
  33.424 +    u64     mpa;
  33.425 +    thash_data_t    *tlb;
  33.426 +    rr_t    vrr;
  33.427 +    u64     mfn;
  33.428 +    
  33.429 +    if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
  33.430 +        gpip = gip;
  33.431 +    }
  33.432 +    else {
  33.433 +        vmx_vcpu_get_rr(vcpu, gip, &vrr.value);
  33.434 +        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 
  33.435 +                vrr.rid, gip, ISIDE_TLB );
  33.436 +        if ( tlb == NULL ) panic("No entry found in ITLB\n");
  33.437 +        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
  33.438 +    }
  33.439 +    mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
  33.440 +    if ( mfn == INVALID_MFN ) return 0;
  33.441 +    
  33.442 +    mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
  33.443 +    *code = *(u64*)__va(mpa);
  33.444 +    return 1;
  33.445 +}
  33.446 +
  33.447 +IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  33.448 +{
  33.449 +
  33.450 +    thash_data_t data, *ovl;
  33.451 +    thash_cb_t  *hcb;
  33.452 +    search_section_t sections;
  33.453 +    rr_t    vrr;
  33.454 +
  33.455 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.456 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  33.457 +    data.itir=itir;
  33.458 +    data.vadr=PAGEALIGN(ifa,data.ps);
  33.459 +    data.section=THASH_TLB_TC;
  33.460 +    data.cl=ISIDE_TLB;
  33.461 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  33.462 +    data.rid = vrr.rid;
  33.463 +    
  33.464 +    sections.v = THASH_SECTION_TR;
  33.465 +
  33.466 +    ovl = thash_find_overlap(hcb, &data, sections);
  33.467 +    while (ovl) {
  33.468 +        // generate MCA.
  33.469 +        panic("Tlb conflict!!");
  33.470 +        return;
  33.471 +    }
  33.472 +    sections.v = THASH_SECTION_TC;
  33.473 +    thash_purge_entries(hcb, &data, sections);
  33.474 +    thash_insert(hcb, &data, ifa);
  33.475 +    return IA64_NO_FAULT;
  33.476 +}
  33.477 +
  33.478 +
  33.479 +
  33.480 +
  33.481 +IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  33.482 +{
  33.483 +
  33.484 +    thash_data_t data, *ovl;
  33.485 +    thash_cb_t  *hcb;
  33.486 +    search_section_t sections;
  33.487 +    rr_t    vrr;
  33.488 +
  33.489 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.490 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  33.491 +    data.itir=itir;
  33.492 +    data.vadr=PAGEALIGN(ifa,data.ps);
  33.493 +    data.section=THASH_TLB_TC;
  33.494 +    data.cl=DSIDE_TLB;
  33.495 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  33.496 +    data.rid = vrr.rid;
  33.497 +    sections.v = THASH_SECTION_TR;
  33.498 +
  33.499 +    ovl = thash_find_overlap(hcb, &data, sections);
  33.500 +    if (ovl) {
  33.501 +          // generate MCA.
  33.502 +        panic("Tlb conflict!!");
  33.503 +        return;
  33.504 +    }
  33.505 +    sections.v = THASH_SECTION_TC;
  33.506 +    thash_purge_entries(hcb, &data, sections);
  33.507 +    thash_insert(hcb, &data, ifa);
  33.508 +    return IA64_NO_FAULT;
  33.509 +}
  33.510 +
  33.511 +IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
  33.512 +{
  33.513 +
  33.514 +    thash_data_t data, *ovl;
  33.515 +    thash_cb_t  *hcb;
  33.516 +    search_section_t sections;
  33.517 +    rr_t    vrr;
  33.518 +
  33.519 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.520 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  33.521 +    data.itir=0;
  33.522 +    data.ps = ps;
  33.523 +    data.vadr=PAGEALIGN(va,ps);
  33.524 +    data.section=THASH_TLB_FM;
  33.525 +    data.cl=DSIDE_TLB;
  33.526 +    vmx_vcpu_get_rr(vcpu, va, &vrr);
  33.527 +    data.rid = vrr.rid;
  33.528 +    sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
  33.529 +
  33.530 +    ovl = thash_find_overlap(hcb, &data, sections);
  33.531 +    if (ovl) {
  33.532 +          // generate MCA.
  33.533 +        panic("Foreignmap Tlb conflict!!");
  33.534 +        return;
  33.535 +    }
  33.536 +    thash_insert(hcb, &data, va);
  33.537 +    return IA64_NO_FAULT;
  33.538 +}
  33.539 +
  33.540 +
  33.541 +IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
  33.542 +{
  33.543 +
  33.544 +    thash_data_t data, *ovl;
  33.545 +    thash_cb_t  *hcb;
  33.546 +    search_section_t sections;
  33.547 +    rr_t    vrr;
  33.548 +
  33.549 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.550 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  33.551 +    data.itir=itir;
  33.552 +    data.vadr=PAGEALIGN(ifa,data.ps);
  33.553 +    data.section=THASH_TLB_TR;
  33.554 +    data.cl=ISIDE_TLB;
  33.555 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  33.556 +    data.rid = vrr.rid;
  33.557 +    sections.v = THASH_SECTION_TR;
  33.558 +
  33.559 +    ovl = thash_find_overlap(hcb, &data, sections);
  33.560 +    if (ovl) {
  33.561 +        // generate MCA.
  33.562 +        panic("Tlb conflict!!");
  33.563 +        return;
  33.564 +    }
  33.565 +    sections.v=THASH_SECTION_TC;
  33.566 +    thash_purge_entries(hcb, &data, sections);
  33.567 +    thash_tr_insert(hcb, &data, ifa, idx);
  33.568 +    return IA64_NO_FAULT;
  33.569 +}
  33.570 +
  33.571 +IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
  33.572 +{
  33.573 +
  33.574 +    thash_data_t data, *ovl;
  33.575 +    thash_cb_t  *hcb;
  33.576 +    search_section_t sections;
  33.577 +    rr_t    vrr;
  33.578 +
  33.579 +
  33.580 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.581 +    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  33.582 +    data.itir=itir;
  33.583 +    data.vadr=PAGEALIGN(ifa,data.ps);
  33.584 +    data.section=THASH_TLB_TR;
  33.585 +    data.cl=DSIDE_TLB;
  33.586 +    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  33.587 +    data.rid = vrr.rid;
  33.588 +    sections.v = THASH_SECTION_TR;
  33.589 +
  33.590 +    ovl = thash_find_overlap(hcb, &data, sections);
  33.591 +    while (ovl) {
  33.592 +        // generate MCA.
  33.593 +        panic("Tlb conflict!!");
  33.594 +        return;
  33.595 +    }
  33.596 +    sections.v=THASH_SECTION_TC;
  33.597 +    thash_purge_entries(hcb, &data, sections);
  33.598 +    thash_tr_insert(hcb, &data, ifa, idx);
  33.599 +    return IA64_NO_FAULT;
  33.600 +}
  33.601 +
  33.602 +
  33.603 +
  33.604 +IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
  33.605 +{
  33.606 +    thash_cb_t  *hcb;
  33.607 +    ia64_rr rr;
  33.608 +    search_section_t sections;
  33.609 +
  33.610 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.611 +    rr=vmx_vcpu_rr(vcpu,vadr);
  33.612 +    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
  33.613 +    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
  33.614 +    return IA64_NO_FAULT;
  33.615 +}
  33.616 +
  33.617 +IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
  33.618 +{
  33.619 +    thash_cb_t  *hcb;
  33.620 +    ia64_rr rr;
  33.621 +    search_section_t sections;
  33.622 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.623 +    rr=vmx_vcpu_rr(vcpu,vadr);
  33.624 +    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
  33.625 +    thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
  33.626 +    return IA64_NO_FAULT;
  33.627 +}
  33.628 +
  33.629 +IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
  33.630 +{
  33.631 +    thash_cb_t  *hcb;
  33.632 +    ia64_rr vrr;
  33.633 +    search_section_t sections;
  33.634 +    thash_data_t data, *ovl;
  33.635 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.636 +    vrr=vmx_vcpu_rr(vcpu,vadr);
  33.637 +    sections.v = THASH_SECTION_TC;
  33.638 +    vadr = PAGEALIGN(vadr, ps);
  33.639 +
  33.640 +    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
  33.641 +    thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB);
  33.642 +    return IA64_NO_FAULT;
  33.643 +}
  33.644 +
  33.645 +
  33.646 +IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
  33.647 +{
  33.648 +    thash_cb_t  *hcb;
  33.649 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.650 +    thash_purge_all(hcb);
  33.651 +    return IA64_NO_FAULT;
  33.652 +}
  33.653 +
  33.654 +IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps)
  33.655 +{
  33.656 +    vmx_vcpu_ptc_l(vcpu, vadr, ps);
  33.657 +    return IA64_ILLOP_FAULT;
  33.658 +}
  33.659 +
  33.660 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps)
  33.661 +{
  33.662 +    vmx_vcpu_ptc_l(vcpu, vadr, ps);
  33.663 +    return IA64_NO_FAULT;
  33.664 +}
  33.665 +
  33.666 +
  33.667 +IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  33.668 +{
  33.669 +    PTA vpta;
  33.670 +    ia64_rr vrr;
  33.671 +    u64 vhpt_offset,tmp;
  33.672 +    vmx_vcpu_get_pta(vcpu, &vpta.val);
  33.673 +    vrr=vmx_vcpu_rr(vcpu, vadr);
  33.674 +    if(vpta.vf){
  33.675 +        panic("THASH,Don't support long format VHPT");
  33.676 +        *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
  33.677 +    }else{
  33.678 +        vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
  33.679 +        *pval = (vadr&VRN_MASK)|
  33.680 +            (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
  33.681 +            vhpt_offset;
  33.682 +    }
  33.683 +    return  IA64_NO_FAULT;
  33.684 +}
  33.685 +
  33.686 +
  33.687 +IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  33.688 +{
  33.689 +    ia64_rr vrr;
  33.690 +    PTA vpta;
  33.691 +    vmx_vcpu_get_pta(vcpu, &vpta.val);
  33.692 +    vrr=vmx_vcpu_rr(vcpu, vadr);
  33.693 +    if(vpta.vf){
  33.694 +        panic("THASH,Don't support long format VHPT");
  33.695 +        *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
  33.696 +    }else{
  33.697 +        *pval = 1;
  33.698 +    }
  33.699 +    return  IA64_NO_FAULT;
  33.700 +}
  33.701 +
  33.702 +
  33.703 +
  33.704 +IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  33.705 +{
  33.706 +    thash_data_t *data;
  33.707 +    thash_cb_t  *hcb;
  33.708 +    ia64_rr vrr;
  33.709 +    ISR visr,pt_isr;
  33.710 +    REGS *regs;
  33.711 +    u64 vhpt_adr;
  33.712 +    IA64_PSR vpsr;
  33.713 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.714 +    vrr=vmx_vcpu_rr(vcpu,vadr);
  33.715 +    regs=vcpu_regs(vcpu);
  33.716 +    pt_isr.val=regs->cr_isr;
  33.717 +    visr.val=0;
  33.718 +    visr.ei=pt_isr.ei;
  33.719 +    visr.ir=pt_isr.ir;
  33.720 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  33.721 +    if(vpsr.ic==0){
  33.722 +         visr.ni=1;
  33.723 +    }
  33.724 +    visr.na=1;
  33.725 +    data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB);
  33.726 +    if(data){
  33.727 +        if(data->p==0){
  33.728 +            visr.na=1;
  33.729 +            vmx_vcpu_set_isr(vcpu,visr.val);
  33.730 +            page_not_present(vcpu, vadr);
  33.731 +            return IA64_FAULT;
  33.732 +        }else if(data->ma == VA_MATTR_NATPAGE){
  33.733 +            visr.na = 1;
  33.734 +            vmx_vcpu_set_isr(vcpu, visr.val);
  33.735 +            dnat_page_consumption(vcpu, vadr);
  33.736 +            return IA64_FAULT;
  33.737 +        }else{
  33.738 +            *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
  33.739 +            return IA64_NO_FAULT;
  33.740 +        }
  33.741 +    }else{
  33.742 +        if(!vhpt_enabled(vcpu, vadr, NA_REF)){
  33.743 +            if(vpsr.ic){
  33.744 +                vmx_vcpu_set_isr(vcpu, visr.val);
  33.745 +                alt_dtlb(vcpu, vadr);
  33.746 +                return IA64_FAULT;
  33.747 +            }
  33.748 +            else{
  33.749 +                nested_dtlb(vcpu);
  33.750 +                return IA64_FAULT;
  33.751 +            }
  33.752 +        }
  33.753 +        else{
  33.754 +            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  33.755 +            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  33.756 +            data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
  33.757 +            if(data){
  33.758 +                if(vpsr.ic){
  33.759 +                    vmx_vcpu_set_isr(vcpu, visr.val);
  33.760 +                    dtlb_fault(vcpu, vadr);
  33.761 +                    return IA64_FAULT;
  33.762 +                }
  33.763 +                else{
  33.764 +                    nested_dtlb(vcpu);
  33.765 +                    return IA64_FAULT;
  33.766 +                }
  33.767 +            }
  33.768 +            else{
  33.769 +                if(vpsr.ic){
  33.770 +                    vmx_vcpu_set_isr(vcpu, visr.val);
  33.771 +                    dvhpt_fault(vcpu, vadr);
  33.772 +                    return IA64_FAULT;
  33.773 +                }
  33.774 +                else{
  33.775 +                    nested_dtlb(vcpu);
  33.776 +                    return IA64_FAULT;
  33.777 +                }
  33.778 +            }
  33.779 +        }
  33.780 +    }
  33.781 +}
  33.782 +
  33.783 +IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
  33.784 +{
  33.785 +    thash_data_t *data;
  33.786 +    thash_cb_t  *hcb;
  33.787 +    ia64_rr rr;
  33.788 +    PTA vpta;
  33.789 +    vmx_vcpu_get_pta(vcpu, &vpta.val);
  33.790 +    if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
  33.791 +        *key=1;
  33.792 +        return IA64_NO_FAULT;
  33.793 +    }
  33.794 +    hcb = vmx_vcpu_get_vtlb(vcpu);
  33.795 +    rr=vmx_vcpu_rr(vcpu,vadr);
  33.796 +    data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB);
  33.797 +    if(!data||!data->p){
  33.798 +        *key=1;
  33.799 +    }else{
  33.800 +        *key=data->key;
  33.801 +    }
  33.802 +    return IA64_NO_FAULT;
  33.803 +}
  33.804 +
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/xen/arch/ia64/vmx_entry.S	Fri May 20 17:23:51 2005 +0000
    34.3 @@ -0,0 +1,611 @@
    34.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    34.5 +/*
    34.6 + * vmx_entry.S:
    34.7 + * Copyright (c) 2005, Intel Corporation.
    34.8 + *
    34.9 + * This program is free software; you can redistribute it and/or modify it
   34.10 + * under the terms and conditions of the GNU General Public License,
   34.11 + * version 2, as published by the Free Software Foundation.
   34.12 + *
   34.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   34.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   34.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   34.16 + * more details.
   34.17 + *
   34.18 + * You should have received a copy of the GNU General Public License along with
   34.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   34.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   34.21 + *
   34.22 + *  Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
   34.23 + *  Kun Tian (Kevin Tian) (kevin.tian@intel.com)
   34.24 + */
   34.25 +
   34.26 +#ifndef VCPU_TLB_SHIFT
   34.27 +#define VCPU_TLB_SHIFT	22
   34.28 +#endif
   34.29 +#include <linux/config.h>
   34.30 +#include <asm/asmmacro.h>
   34.31 +#include <asm/cache.h>
   34.32 +#include <asm/kregs.h>
   34.33 +#include <asm/offsets.h>
   34.34 +#include <asm/pgtable.h>
   34.35 +#include <asm/percpu.h>
   34.36 +#include <asm/processor.h>
   34.37 +#include <asm/thread_info.h>
   34.38 +#include <asm/unistd.h>
   34.39 +
   34.40 +#include "vmx_minstate.h"
   34.41 +
   34.42 +/*
   34.43 + * prev_task <- vmx_ia64_switch_to(struct task_struct *next)
   34.44 + *	With Ingo's new scheduler, interrupts are disabled when this routine gets
   34.45 + *	called.  The code starting at .map relies on this.  The rest of the code
   34.46 + *	doesn't care about the interrupt masking status.
   34.47 + *
   34.48 + * Since we allocate domain stack in xenheap, there's no need to map new
   34.49 + * domain's stack since all xenheap is mapped by TR. Another different task
   34.50 + * for vmx_ia64_switch_to is to switch to bank0 and change current pointer.
   34.51 + */
   34.52 +GLOBAL_ENTRY(vmx_ia64_switch_to)
   34.53 +	.prologue
   34.54 +	alloc r16=ar.pfs,1,0,0,0
   34.55 +	DO_SAVE_SWITCH_STACK
   34.56 +	.body
   34.57 +
   34.58 +	bsw.0	// Switch to bank0, because bank0 r21 is current pointer
   34.59 +	;;
   34.60 +	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
   34.61 +	movl r25=init_task
   34.62 +	adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0
   34.63 +	;;
   34.64 +	st8 [r22]=sp			// save kernel stack pointer of old task
   34.65 +	;;
   34.66 +	/*
   34.67 +	 * TR always mapped this task's page, we can skip doing it again.
   34.68 +	 */
   34.69 +	ld8 sp=[r26]			// load kernel stack pointer of new task
   34.70 +	mov r21=in0			// update "current" application register
   34.71 +	mov r8=r13			// return pointer to previously running task
   34.72 +	mov r13=in0			// set "current" pointer
   34.73 +	;;
   34.74 +	bsw.1
   34.75 +	;;
   34.76 +	DO_LOAD_SWITCH_STACK
   34.77 +
   34.78 +#ifdef CONFIG_SMP
   34.79 +	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
   34.80 +#endif
   34.81 +	br.ret.sptk.many rp		// boogie on out in new context
   34.82 +END(vmx_ia64_switch_to)
   34.83 +
   34.84 +GLOBAL_ENTRY(ia64_leave_nested)
   34.85 +	rsm psr.i
   34.86 +	;;
   34.87 +	adds r21=PT(PR)+16,r12
   34.88 +	;;
   34.89 +
   34.90 +	lfetch [r21],PT(CR_IPSR)-PT(PR)
   34.91 +	adds r2=PT(B6)+16,r12
   34.92 +	adds r3=PT(R16)+16,r12
   34.93 +	;;
   34.94 +	lfetch [r21]
   34.95 +	ld8 r28=[r2],8		// load b6
   34.96 +	adds r29=PT(R24)+16,r12
   34.97 +
   34.98 +	ld8.fill r16=[r3]
   34.99 +	adds r3=PT(AR_CSD)-PT(R16),r3
  34.100 +	adds r30=PT(AR_CCV)+16,r12
  34.101 +	;;
  34.102 +	ld8.fill r24=[r29]
  34.103 +	ld8 r15=[r30]		// load ar.ccv
  34.104 +	;;
  34.105 +	ld8 r29=[r2],16		// load b7
  34.106 +	ld8 r30=[r3],16		// load ar.csd
  34.107 +	;;
  34.108 +	ld8 r31=[r2],16		// load ar.ssd
  34.109 +	ld8.fill r8=[r3],16
  34.110 +	;;
  34.111 +	ld8.fill r9=[r2],16
  34.112 +	ld8.fill r10=[r3],PT(R17)-PT(R10)
  34.113 +	;;
  34.114 +	ld8.fill r11=[r2],PT(R18)-PT(R11)
  34.115 +	ld8.fill r17=[r3],16
  34.116 +	;;
  34.117 +	ld8.fill r18=[r2],16
  34.118 +	ld8.fill r19=[r3],16
  34.119 +	;;
  34.120 +	ld8.fill r20=[r2],16
  34.121 +	ld8.fill r21=[r3],16
  34.122 +	mov ar.csd=r30
  34.123 +	mov ar.ssd=r31
  34.124 +	;;
  34.125 +	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
  34.126 +	invala			// invalidate ALAT
  34.127 +	;;
  34.128 +	ld8.fill r22=[r2],24
  34.129 +	ld8.fill r23=[r3],24
  34.130 +	mov b6=r28
  34.131 +	;;
  34.132 +	ld8.fill r25=[r2],16
  34.133 +	ld8.fill r26=[r3],16
  34.134 +	mov b7=r29
  34.135 +	;;
  34.136 +	ld8.fill r27=[r2],16
  34.137 +	ld8.fill r28=[r3],16
  34.138 +	;;
  34.139 +	ld8.fill r29=[r2],16
  34.140 +	ld8.fill r30=[r3],24
  34.141 +	;;
  34.142 +	ld8.fill r31=[r2],PT(F9)-PT(R31)
  34.143 +	adds r3=PT(F10)-PT(F6),r3
  34.144 +	;;
  34.145 +	ldf.fill f9=[r2],PT(F6)-PT(F9)
  34.146 +	ldf.fill f10=[r3],PT(F8)-PT(F10)
  34.147 +	;;
  34.148 +	ldf.fill f6=[r2],PT(F7)-PT(F6)
  34.149 +	;;
  34.150 +	ldf.fill f7=[r2],PT(F11)-PT(F7)
  34.151 +	ldf.fill f8=[r3],32
  34.152 +	;;
  34.153 +	srlz.i			// ensure interruption collection is off
  34.154 +	mov ar.ccv=r15
  34.155 +	;;
  34.156 +	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
  34.157 +	;;
  34.158 +	ldf.fill f11=[r2]
  34.159 +//	mov r18=r13
  34.160 +//    mov r21=r13
  34.161 +	adds r16=PT(CR_IPSR)+16,r12
  34.162 +	adds r17=PT(CR_IIP)+16,r12
  34.163 +	;;
  34.164 +	ld8 r29=[r16],16	// load cr.ipsr
  34.165 +	ld8 r28=[r17],16	// load cr.iip
  34.166 +	;;
  34.167 +	ld8 r30=[r16],16	// load cr.ifs
  34.168 +	ld8 r25=[r17],16	// load ar.unat
  34.169 +	;;
  34.170 +	ld8 r26=[r16],16	// load ar.pfs
  34.171 +	ld8 r27=[r17],16	// load ar.rsc
  34.172 +	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
  34.173 +	;;
  34.174 +	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
  34.175 +	ld8 r23=[r17],16// load ar.bspstore (may be garbage)
  34.176 +	;;
  34.177 +	ld8 r31=[r16],16	// load predicates
  34.178 +	ld8 r22=[r17],16	// load b0
  34.179 +	;;
  34.180 +	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
  34.181 +	ld8.fill r1=[r17],16	// load r1
  34.182 +	;;
  34.183 +	ld8.fill r12=[r16],16
  34.184 +	ld8.fill r13=[r17],16
  34.185 +	;;
  34.186 +	ld8 r20=[r16],16	// ar.fpsr
  34.187 +	ld8.fill r15=[r17],16
  34.188 +	;;
  34.189 +	ld8.fill r14=[r16],16
  34.190 +	ld8.fill r2=[r17]
  34.191 +	;;
  34.192 +	ld8.fill r3=[r16]
  34.193 +	;;
  34.194 +	mov r16=ar.bsp		// get existing backing store pointer
  34.195 +	;;
  34.196 +	mov b0=r22
  34.197 +	mov ar.pfs=r26
  34.198 +	mov cr.ifs=r30
  34.199 +	mov cr.ipsr=r29
  34.200 +	mov ar.fpsr=r20
  34.201 +	mov cr.iip=r28
  34.202 +	;;
  34.203 +	mov ar.rsc=r27
  34.204 +	mov ar.unat=r25
  34.205 +	mov pr=r31,-1
  34.206 +	rfi
  34.207 +END(ia64_leave_nested)
  34.208 +
  34.209 +
  34.210 +
  34.211 +GLOBAL_ENTRY(ia64_leave_hypervisor)
  34.212 +    PT_REGS_UNWIND_INFO(0)
  34.213 +    /*
  34.214 +     * work.need_resched etc. mustn't get changed by this CPU before it returns to
  34.215 +    ;;
  34.216 +     * user- or fsys-mode, hence we disable interrupts early on:
  34.217 +     */
  34.218 +    rsm psr.i
  34.219 +    ;;
  34.220 +    alloc loc0=ar.pfs,0,1,1,0
  34.221 +    adds out0=16,r12
  34.222 +    ;;
  34.223 +    br.call.sptk.many b0=vmx_deliver_pending_interrupt
  34.224 +    mov ar.pfs=loc0
  34.225 +    adds r8=IA64_VPD_BASE_OFFSET,r13
  34.226 +    ;;
  34.227 +    ld8 r8=[r8]
  34.228 +    ;;
  34.229 +    adds r9=VPD(VPSR),r8
  34.230 +    ;;
  34.231 +    ld8 r9=[r9]
  34.232 +    ;;
  34.233 +    tbit.z pBN0,pBN1=r9,IA64_PSR_BN_BIT
  34.234 +    ;;
  34.235 +(pBN0) add r7=VPD(VBNAT),r8;
  34.236 +(pBN1) add r7=VPD(VNAT),r8;
  34.237 +    ;;
  34.238 +    ld8 r7=[r7]
  34.239 +    ;;
  34.240 +    mov ar.unat=r7
  34.241 +(pBN0) add r4=VPD(VBGR),r8;
  34.242 +(pBN1) add r4=VPD(VGR),r8;
  34.243 +(pBN0) add r5=VPD(VBGR)+0x8,r8;
  34.244 +(pBN1) add r5=VPD(VGR)+0x8,r8;
  34.245 +    ;;
  34.246 +    ld8.fill r16=[r4],16
  34.247 +    ld8.fill r17=[r5],16
  34.248 +    ;;
  34.249 +    ld8.fill r18=[r4],16
  34.250 +    ld8.fill r19=[r5],16
  34.251 +    ;;
  34.252 +    ld8.fill r20=[r4],16
  34.253 +    ld8.fill r21=[r5],16
  34.254 +    ;;
  34.255 +    ld8.fill r22=[r4],16
  34.256 +    ld8.fill r23=[r5],16
  34.257 +    ;;
  34.258 +    ld8.fill r24=[r4],16
  34.259 +    ld8.fill r25=[r5],16
  34.260 +    ;;
  34.261 +    ld8.fill r26=[r4],16
  34.262 +    ld8.fill r27=[r5],16
  34.263 +    ;;
  34.264 +    ld8.fill r28=[r4],16
  34.265 +    ld8.fill r29=[r5],16
  34.266 +    ;;
  34.267 +    ld8.fill r30=[r4],16
  34.268 +    ld8.fill r31=[r5],16
  34.269 +    ;;
  34.270 +    bsw.0
  34.271 +    ;;
  34.272 +    mov r18=r8      //vpd
  34.273 +    mov r19=r9      //vpsr
  34.274 +    adds r20=PT(PR)+16,r12
  34.275 +    ;;
  34.276 +    lfetch [r20],PT(CR_IPSR)-PT(PR)
  34.277 +    adds r16=PT(B6)+16,r12
  34.278 +    adds r17=PT(B7)+16,r12
  34.279 +    ;;
  34.280 +    lfetch [r20]
  34.281 +    mov r21=r13		// get current
  34.282 +    ;;
  34.283 +    ld8 r30=[r16],16      // load b6
  34.284 +    ld8 r31=[r17],16      // load b7
  34.285 +    add r20=PT(EML_UNAT)+16,r12
  34.286 +    ;;
  34.287 +    ld8 r29=[r20]       //load ar_unat
  34.288 +    mov b6=r30
  34.289 +    mov b7=r31
  34.290 +    ld8 r30=[r16],16    //load ar_csd
  34.291 +    ld8 r31=[r17],16    //load ar_ssd
  34.292 +    ;;
  34.293 +    mov ar.unat=r29
  34.294 +    mov ar.csd=r30
  34.295 +    mov ar.ssd=r31
  34.296 +    ;;
  34.297 +    ld8.fill r8=[r16],16    //load r8
  34.298 +    ld8.fill r9=[r17],16    //load r9
  34.299 +    ;;
  34.300 +    ld8.fill r10=[r16],PT(R1)-PT(R10)    //load r10
  34.301 +    ld8.fill r11=[r17],PT(R12)-PT(R11)    //load r11
  34.302 +    ;;
  34.303 +    ld8.fill r1=[r16],16    //load r1
  34.304 +    ld8.fill r12=[r17],16    //load r12
  34.305 +    ;;
  34.306 +    ld8.fill r13=[r16],16    //load r13
  34.307 +    ld8 r30=[r17],16    //load ar_fpsr
  34.308 +    ;;
  34.309 +    ld8.fill r15=[r16],16    //load r15
  34.310 +    ld8.fill r14=[r17],16    //load r14
  34.311 +    mov ar.fpsr=r30
  34.312 +    ;;
  34.313 +    ld8.fill r2=[r16],16    //load r2
  34.314 +    ld8.fill r3=[r17],16    //load r3
  34.315 +    ;;
  34.316 +/*
  34.317 +(pEml) ld8.fill r4=[r16],16    //load r4
  34.318 +(pEml) ld8.fill r5=[r17],16    //load r5
  34.319 +    ;;
  34.320 +(pEml) ld8.fill r6=[r16],PT(AR_CCV)-PT(R6)   //load r6
  34.321 +(pEml) ld8.fill r7=[r17],PT(F7)-PT(R7)   //load r7
  34.322 +    ;;
  34.323 +(pNonEml) adds r16=PT(AR_CCV)-PT(R4),r16
  34.324 +(pNonEml) adds r17=PT(F7)-PT(R5),r17
  34.325 +    ;;
  34.326 +*/
  34.327 +    ld8.fill r4=[r16],16    //load r4
  34.328 +    ld8.fill r5=[r17],16    //load r5
  34.329 +     ;;
  34.330 +    ld8.fill r6=[r16],PT(AR_CCV)-PT(R6)   //load r6
  34.331 +    ld8.fill r7=[r17],PT(F7)-PT(R7)   //load r7
  34.332 +    ;;
  34.333 +
  34.334 +    ld8 r30=[r16],PT(F6)-PT(AR_CCV)
  34.335 +    rsm psr.i | psr.ic  // initiate turning off of interrupt and interruption collection
  34.336 +    ;;
  34.337 +    srlz.i          // ensure interruption collection is off
  34.338 +    ;;
  34.339 +    invala          // invalidate ALAT
  34.340 +    ;;
  34.341 +    ldf.fill f6=[r16],32
  34.342 +    ldf.fill f7=[r17],32
  34.343 +    ;;
  34.344 +    ldf.fill f8=[r16],32
  34.345 +    ldf.fill f9=[r17],32
  34.346 +    ;;
  34.347 +    ldf.fill f10=[r16]
  34.348 +    ldf.fill f11=[r17]
  34.349 +    ;;
  34.350 +    mov ar.ccv=r30
  34.351 +    adds r16=PT(CR_IPSR)-PT(F10),r16
  34.352 +    adds r17=PT(CR_IIP)-PT(F11),r17
  34.353 +    ;;
  34.354 +    ld8 r31=[r16],16    // load cr.ipsr
  34.355 +    ld8 r30=[r17],16    // load cr.iip
  34.356 +    ;;
  34.357 +    ld8 r29=[r16],16    // load cr.ifs
  34.358 +    ld8 r28=[r17],16    // load ar.unat
  34.359 +    ;;
  34.360 +    ld8 r27=[r16],16    // load ar.pfs
  34.361 +    ld8 r26=[r17],16    // load ar.rsc
  34.362 +    ;;
  34.363 +    ld8 r25=[r16],16    // load ar.rnat (may be garbage)
  34.364 +    ld8 r24=[r17],16// load ar.bspstore (may be garbage)
  34.365 +    ;;
  34.366 +    ld8 r23=[r16],16    // load predicates
  34.367 +    ld8 r22=[r17],PT(RFI_PFS)-PT(B0)    // load b0
  34.368 +    ;;
  34.369 +    ld8 r20=[r16],16    // load ar.rsc value for "loadrs"
  34.370 +    ;;
  34.371 +//rbs_switch
  34.372 +    // loadrs has already been shifted
  34.373 +    alloc r16=ar.pfs,0,0,0,0    // drop current register frame
  34.374 +    ;;
  34.375 +    mov ar.rsc=r20
  34.376 +    ;;
  34.377 +    loadrs
  34.378 +    ;;
  34.379 +    mov ar.bspstore=r24
  34.380 +    ;;
  34.381 +    ld8 r24=[r17]       //load rfi_pfs
  34.382 +    mov ar.unat=r28
  34.383 +    mov ar.rnat=r25
  34.384 +    mov ar.rsc=r26
  34.385 +    ;;
  34.386 +    mov cr.ipsr=r31
  34.387 +    mov cr.iip=r30
  34.388 +    mov cr.ifs=r29
  34.389 +    cmp.ne p6,p0=r24,r0
  34.390 +(p6)br.sptk vmx_dorfirfi
  34.391 +    ;;
  34.392 +vmx_dorfirfi_back:
  34.393 +    mov ar.pfs=r27
  34.394 +
  34.395 +//vsa_sync_write_start
  34.396 +    movl r20=__vsa_base
  34.397 +    ;;
  34.398 +    ld8 r20=[r20]       // read entry point
  34.399 +    mov r25=r18
  34.400 +    ;;
  34.401 +    add r16=PAL_VPS_SYNC_WRITE,r20
  34.402 +    movl r24=switch_rr7  // calculate return address
  34.403 +    ;;
  34.404 +    mov b0=r16
  34.405 +    br.cond.sptk b0         // call the service
  34.406 +    ;;
  34.407 +// switch rr7 and rr5
  34.408 +switch_rr7:
  34.409 +    adds r24=SWITCH_MRR5_OFFSET, r21
  34.410 +    adds r26=SWITCH_MRR6_OFFSET, r21
  34.411 +    adds r16=SWITCH_MRR7_OFFSET ,r21
  34.412 +    movl r25=(5<<61)
  34.413 +    movl r27=(6<<61)
  34.414 +    movl r17=(7<<61)
  34.415 +    ;;
  34.416 +    ld8 r24=[r24]
  34.417 +    ld8 r26=[r26]
  34.418 +    ld8 r16=[r16]
  34.419 +    ;;
  34.420 +    mov rr[r25]=r24
  34.421 +    mov rr[r27]=r26
  34.422 +    mov rr[r17]=r16
  34.423 +    ;;
  34.424 +    srlz.i
  34.425 +    ;;
  34.426 +    add r24=SWITCH_MPTA_OFFSET, r21
  34.427 +    ;;
  34.428 +    ld8 r24=[r24]
  34.429 +    ;;
  34.430 +    mov cr.pta=r24
  34.431 +    ;;
  34.432 +    srlz.i
  34.433 +    ;;
  34.434 +// fall through
  34.435 +GLOBAL_ENTRY(ia64_vmm_entry)
  34.436 +/*
  34.437 + *  must be at bank 0
  34.438 + *  parameter:
  34.439 + *  r18:vpd
  34.440 + *  r19:vpsr
  34.441 + *  r20:__vsa_base
  34.442 + *  r22:b0
  34.443 + *  r23:predicate
  34.444 + */
  34.445 +    mov r24=r22
  34.446 +    mov r25=r18
  34.447 +    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
  34.448 +    ;;
  34.449 +    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
  34.450 +    (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
  34.451 +    ;;
  34.452 +    mov pr=r23,-2
  34.453 +    mov b0=r29
  34.454 +    ;;
  34.455 +    br.cond.sptk b0             // call pal service
  34.456 +END(ia64_leave_hypervisor)
  34.457 +
  34.458 +//r24 rfi_pfs
  34.459 +//r17 address of rfi_pfs
  34.460 +GLOBAL_ENTRY(vmx_dorfirfi)
  34.461 +    mov r16=ar.ec
  34.462 +    movl r20 = vmx_dorfirfi_back
  34.463 +	;;
  34.464 +// clean rfi_pfs
  34.465 +    st8 [r17]=r0
  34.466 +    mov b0=r20
  34.467 +// pfs.pec=ar.ec
  34.468 +    dep r24 = r16, r24, 52, 6
  34.469 +    ;;
  34.470 +    mov ar.pfs=r24
  34.471 +	;;
  34.472 +    br.ret.sptk b0
  34.473 +	;;
  34.474 +END(vmx_dorfirfi)
  34.475 +
  34.476 +
  34.477 +#define VMX_PURGE_RR7	0
  34.478 +#define VMX_INSERT_RR7	1
  34.479 +/*
  34.480 + * in0: old rr7
  34.481 + * in1: virtual address of xen image
  34.482 + * in2: virtual address of vhpt table
  34.483 + */
  34.484 +GLOBAL_ENTRY(vmx_purge_double_mapping)
  34.485 +    alloc loc1 = ar.pfs,5,9,0,0
  34.486 +    mov loc0 = rp
  34.487 +    movl r8 = 1f
  34.488 +    ;;
  34.489 +    movl loc4 = KERNEL_TR_PAGE_SHIFT
  34.490 +    movl loc5 = VCPU_TLB_SHIFT
  34.491 +    mov loc6 = psr
  34.492 +    movl loc7 = XEN_RR7_SWITCH_STUB
  34.493 +    mov loc8 = (1<<VMX_PURGE_RR7)
  34.494 +    ;;
  34.495 +    srlz.i
  34.496 +    ;;
  34.497 +    rsm psr.i | psr.ic
  34.498 +    ;;
  34.499 +    srlz.i
  34.500 +    ;;
  34.501 +    mov ar.rsc = 0
  34.502 +    mov b6 = loc7
  34.503 +    mov rp = r8
  34.504 +    ;;
  34.505 +    br.sptk b6
  34.506 +1:
  34.507 +    mov ar.rsc = 3
  34.508 +    mov rp = loc0
  34.509 +    ;;
  34.510 +    mov psr.l = loc6
  34.511 +    ;;
  34.512 +    srlz.i
  34.513 +    ;;
  34.514 +    br.ret.sptk rp
  34.515 +END(vmx_purge_double_mapping)
  34.516 +
  34.517 +/*
  34.518 + * in0: new rr7
  34.519 + * in1: virtual address of xen image
  34.520 + * in2: virtual address of vhpt table
  34.521 + * in3: pte entry of xen image
  34.522 + * in4: pte entry of vhpt table
  34.523 + */
  34.524 +GLOBAL_ENTRY(vmx_insert_double_mapping)
  34.525 +    alloc loc1 = ar.pfs,5,9,0,0
  34.526 +    mov loc0 = rp
  34.527 +    movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
  34.528 +    ;;
  34.529 +    movl loc3 = IA64_TR_VHPT_IN_DOM	// TR number for vhpt table
  34.530 +    movl r8 = 1f
  34.531 +    movl loc4 = KERNEL_TR_PAGE_SHIFT
  34.532 +    ;;
  34.533 +    movl loc5 = VCPU_TLB_SHIFT
  34.534 +    mov loc6 = psr
  34.535 +    movl loc7 = XEN_RR7_SWITCH_STUB
  34.536 +    ;;
  34.537 +    srlz.i
  34.538 +    ;;
  34.539 +    rsm psr.i | psr.ic
  34.540 +    mov loc8 = (1<<VMX_INSERT_RR7)
  34.541 +    ;;
  34.542 +    srlz.i
  34.543 +    ;;
  34.544 +    mov ar.rsc = 0
  34.545 +    mov b6 = loc7
  34.546 +    mov rp = r8
  34.547 +    ;;
  34.548 +    br.sptk b6
  34.549 +1:
  34.550 +    mov ar.rsc = 3
  34.551 +    mov rp = loc0
  34.552 +    ;;
  34.553 +    mov psr.l = loc6
  34.554 +    ;;
  34.555 +    srlz.i
  34.556 +    ;;
  34.557 +    br.ret.sptk rp
  34.558 +END(vmx_insert_double_mapping)
  34.559 +
  34.560 +    .align PAGE_SIZE
  34.561 +/*
  34.562 + * Stub to add double mapping for new domain, which shouldn't
  34.563 + * access any memory when active. Before reaching this point,
  34.564 + * both psr.i/ic is cleared and rse is set in lazy mode.
  34.565 + *
  34.566 + * in0: new rr7
  34.567 + * in1: virtual address of xen image
  34.568 + * in2: virtual address of vhpt table
  34.569 + * in3: pte entry of xen image
  34.570 + * in4: pte entry of vhpt table
  34.571 + * loc2: TR number for xen image
  34.572 + * loc3: TR number for vhpt table
  34.573 + * loc4: page size for xen image
  34.574 + * loc5: page size of vhpt table
  34.575 + * loc7: free to use
  34.576 + * loc8: purge or insert
  34.577 + * r8: will contain old rid value
  34.578 + */
  34.579 +GLOBAL_ENTRY(vmx_switch_rr7)
  34.580 +    movl loc7 = (7<<61)
  34.581 +    dep.z loc4 = loc4, 2, 6
  34.582 +    dep.z loc5 = loc5, 2, 6
  34.583 +    ;;
  34.584 +    tbit.nz p6,p7=loc8, VMX_INSERT_RR7
  34.585 +    mov r8 = rr[loc7]
  34.586 +    ;;
  34.587 +    mov rr[loc7] = in0
  34.588 +(p6)mov cr.ifa = in1
  34.589 +(p6)mov cr.itir = loc4
  34.590 +    ;;
  34.591 +    srlz.i
  34.592 +    ;;
  34.593 +(p6)itr.i itr[loc2] = in3
  34.594 +(p7)ptr.i in1, loc4
  34.595 +    ;;
  34.596 +(p6)itr.d dtr[loc2] = in3
  34.597 +(p7)ptr.d in1, loc4
  34.598 +    ;;
  34.599 +    srlz.i
  34.600 +    ;;
  34.601 +(p6)mov cr.ifa = in2
  34.602 +(p6)mov cr.itir = loc5
  34.603 +    ;;
  34.604 +(p6)itr.d dtr[loc3] = in4
  34.605 +(p7)ptr.d in2, loc5
  34.606 +    ;;
  34.607 +    srlz.i
  34.608 +    ;;
  34.609 +    mov rr[loc7] = r8
  34.610 +    ;;
  34.611 +    srlz.i
  34.612 +    br.sptk rp
  34.613 +END(vmx_switch_rr7)
  34.614 +    .align PAGE_SIZE
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/xen/arch/ia64/vmx_init.c	Fri May 20 17:23:51 2005 +0000
    35.3 @@ -0,0 +1,296 @@
    35.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    35.5 +/*
    35.6 + * vmx_init.c: initialization work for vt specific domain
    35.7 + * Copyright (c) 2005, Intel Corporation.
    35.8 + *	Kun Tian (Kevin Tian) <kevin.tian@intel.com>
    35.9 + *	Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
   35.10 + *	Fred Yang <fred.yang@intel.com>
   35.11 + *
   35.12 + * This program is free software; you can redistribute it and/or modify it
   35.13 + * under the terms and conditions of the GNU General Public License,
   35.14 + * version 2, as published by the Free Software Foundation.
   35.15 + *
   35.16 + * This program is distributed in the hope it will be useful, but WITHOUT
   35.17 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   35.18 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   35.19 + * more details.
   35.20 + *
   35.21 + * You should have received a copy of the GNU General Public License along with
   35.22 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   35.23 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   35.24 + *
   35.25 + */
   35.26 +
   35.27 +/*
   35.28 + * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
   35.29 + * Simplied design in first step:
   35.30 + *	- One virtual environment
   35.31 + *	- Domain is bound to one LP
   35.32 + * Later to support guest SMP:
   35.33 + *	- Need interface to handle VP scheduled to different LP
   35.34 + */
   35.35 +#include <xen/config.h>
   35.36 +#include <xen/types.h>
   35.37 +#include <xen/sched.h>
   35.38 +#include <asm/pal.h>
   35.39 +#include <asm/page.h>
   35.40 +#include <asm/processor.h>
   35.41 +#include <asm/vmx_vcpu.h>
   35.42 +#include <xen/lib.h>
   35.43 +#include <asm/vmmu.h>
   35.44 +#include <public/arch-ia64.h>
   35.45 +#include <asm/vmx_phy_mode.h>
   35.46 +#include <asm/vmx.h>
   35.47 +
   35.48 +/* Global flag to identify whether Intel vmx feature is on */
   35.49 +u32 vmx_enabled = 0;
   35.50 +static u32 vm_order;
   35.51 +static u64 buffer_size;
   35.52 +static u64 vp_env_info;
   35.53 +static u64 vm_buffer = 0;	/* Buffer required to bring up VMX feature */
   35.54 +u64 __vsa_base = 0;	/* Run-time service base of VMX */
   35.55 +
   35.56 +/* Check whether vt feature is enabled or not. */
   35.57 +void
   35.58 +identify_vmx_feature(void)
   35.59 +{
   35.60 +	pal_status_t ret;
   35.61 +	u64 avail = 1, status = 1, control = 1;
   35.62 +
   35.63 +	vmx_enabled = 0;
   35.64 +	/* Check VT-i feature */
   35.65 +	ret = ia64_pal_proc_get_features(&avail, &status, &control);
   35.66 +	if (ret != PAL_STATUS_SUCCESS) {
   35.67 +		printk("Get proc features failed.\n");
   35.68 +		goto no_vti;
   35.69 +	}
   35.70 +
   35.71 +	/* FIXME: do we need to check status field, to see whether
   35.72 +	 * PSR.vm is actually enabled? If yes, aonther call to
   35.73 +	 * ia64_pal_proc_set_features may be reuqired then.
   35.74 +	 */
   35.75 +	printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
   35.76 +		avail, status, control, avail & PAL_PROC_VM_BIT);
   35.77 +	if (!(avail & PAL_PROC_VM_BIT)) {
   35.78 +		printk("No VT feature supported.\n");
   35.79 +		goto no_vti;
   35.80 +	}
   35.81 +
   35.82 +	ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
   35.83 +	if (ret != PAL_STATUS_SUCCESS) {
   35.84 +		printk("Get vp environment info failed.\n");
   35.85 +		goto no_vti;
   35.86 +	}
   35.87 +
   35.88 +	/* Does xen has ability to decode itself? */
   35.89 +	if (!(vp_env_info & VP_OPCODE))
   35.90 +		printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
   35.91 +	vm_order = get_order(buffer_size);
   35.92 +	printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
   35.93 +
   35.94 +	vmx_enabled = 1;
   35.95 +no_vti:
   35.96 +	return;
   35.97 +}
   35.98 +
   35.99 +/*
  35.100 + * Init virtual environment on current LP
  35.101 + * vsa_base is the indicator whether it's first LP to be initialized
  35.102 + * for current domain.
  35.103 + */ 
  35.104 +void
  35.105 +vmx_init_env(void)
  35.106 +{
  35.107 +	u64 status, tmp_base;
  35.108 +
  35.109 +	if (!vm_buffer) {
  35.110 +		vm_buffer = alloc_xenheap_pages(vm_order);
  35.111 +		ASSERT(vm_buffer);
  35.112 +		printk("vm_buffer: 0x%lx\n", vm_buffer);
  35.113 +	}
  35.114 +
  35.115 +	status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
  35.116 +				    __pa(vm_buffer),
  35.117 +				    vm_buffer,
  35.118 +				    &tmp_base);
  35.119 +
  35.120 +	if (status != PAL_STATUS_SUCCESS) {
  35.121 +		printk("ia64_pal_vp_init_env failed.\n");
  35.122 +		return -1;
  35.123 +	}
  35.124 +
  35.125 +	if (!__vsa_base)
  35.126 +		__vsa_base = tmp_base;
  35.127 +	else
  35.128 +		ASSERT(tmp_base != __vsa_base);
  35.129 +
  35.130 +	/* Init stub for rr7 switch */
  35.131 +	vmx_init_double_mapping_stub();
  35.132 +}
  35.133 +
  35.134 +typedef union {
  35.135 +	u64 value;
  35.136 +	struct {
  35.137 +		u64 number : 8;
  35.138 +		u64 revision : 8;
  35.139 +		u64 model : 8;
  35.140 +		u64 family : 8;
  35.141 +		u64 archrev : 8;
  35.142 +		u64 rv : 24;
  35.143 +	};
  35.144 +} cpuid3_t;
  35.145 +
  35.146 +/* Allocate vpd from xenheap */
  35.147 +static vpd_t *alloc_vpd(void)
  35.148 +{
  35.149 +	int i;
  35.150 +	cpuid3_t cpuid3;
  35.151 +	vpd_t *vpd;
  35.152 +
  35.153 +	vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
  35.154 +	if (!vpd) {
  35.155 +		printk("VPD allocation failed.\n");
  35.156 +		return NULL;
  35.157 +	}
  35.158 +
  35.159 +	printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
  35.160 +	memset(vpd, 0, VPD_SIZE);
  35.161 +	/* CPUID init */
  35.162 +	for (i = 0; i < 5; i++)
  35.163 +		vpd->vcpuid[i] = ia64_get_cpuid(i);
  35.164 +
  35.165 +	/* Limit the CPUID number to 5 */
  35.166 +	cpuid3.value = vpd->vcpuid[3];
  35.167 +	cpuid3.number = 4;	/* 5 - 1 */
  35.168 +	vpd->vcpuid[3] = cpuid3.value;
  35.169 +
  35.170 +	vpd->vdc.d_vmsw = 1;
  35.171 +	return vpd;
  35.172 +}
  35.173 +
  35.174 +
  35.175 +
  35.176 +/*
  35.177 + * Create a VP on intialized VMX environment.
  35.178 + */
  35.179 +static void
  35.180 +vmx_create_vp(struct exec_domain *ed)
  35.181 +{
  35.182 +	u64 ret;
  35.183 +	vpd_t *vpd = ed->arch.arch_vmx.vpd;
  35.184 +	u64 ivt_base;
  35.185 +    extern char vmx_ia64_ivt;
  35.186 +	/* ia64_ivt is function pointer, so need this tranlation */
  35.187 +	ivt_base = (u64) &vmx_ia64_ivt;
  35.188 +	printk("ivt_base: 0x%lx\n", ivt_base);
  35.189 +	ret = ia64_pal_vp_create(vpd, ivt_base, 0);
  35.190 +	if (ret != PAL_STATUS_SUCCESS)
  35.191 +		panic("ia64_pal_vp_create failed. \n");
  35.192 +}
  35.193 +
  35.194 +void vmx_init_double_mapping_stub(void)
  35.195 +{
  35.196 +	u64 base, psr;
  35.197 +	extern void vmx_switch_rr7(void);
  35.198 +
  35.199 +	base = (u64) &vmx_switch_rr7;
  35.200 +	base = *((u64*)base);
  35.201 +
  35.202 +	psr = ia64_clear_ic();
  35.203 +	ia64_itr(0x1, IA64_TR_RR7_SWITCH_STUB, XEN_RR7_SWITCH_STUB,
  35.204 +		 pte_val(pfn_pte(__pa(base) >> PAGE_SHIFT, PAGE_KERNEL)),
  35.205 +		 RR7_SWITCH_SHIFT);
  35.206 +	ia64_set_psr(psr);
  35.207 +	ia64_srlz_i();
  35.208 +	printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", (u64)(__pa(base)));
  35.209 +}
  35.210 +
  35.211 +/* Other non-context related tasks can be done in context switch */
  35.212 +void
  35.213 +vmx_save_state(struct exec_domain *ed)
  35.214 +{
  35.215 +	u64 status, psr;
  35.216 +	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
  35.217 +
  35.218 +	/* FIXME: about setting of pal_proc_vector... time consuming */
  35.219 +	status = ia64_pal_vp_save(ed->arch.arch_vmx.vpd, 0);
  35.220 +	if (status != PAL_STATUS_SUCCESS)
  35.221 +		panic("Save vp status failed\n");
  35.222 +
  35.223 +	/* FIXME: Do we really need purge double mapping for old ed?
  35.224 +	 * Since rid is completely different between prev and next,
  35.225 +	 * it's not overlap and thus no MCA possible... */
  35.226 +	dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
  35.227 +        vmx_purge_double_mapping(dom_rr7, KERNEL_START,
  35.228 +				 (u64)ed->arch.vtlb->ts->vhpt->hash);
  35.229 +
  35.230 +}
  35.231 +
  35.232 +/* Even guest is in physical mode, we still need such double mapping */
  35.233 +void
  35.234 +vmx_load_state(struct exec_domain *ed)
  35.235 +{
  35.236 +	u64 status, psr;
  35.237 +	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
  35.238 +	u64 pte_xen, pte_vhpt;
  35.239 +
  35.240 +	status = ia64_pal_vp_restore(ed->arch.arch_vmx.vpd, 0);
  35.241 +	if (status != PAL_STATUS_SUCCESS)
  35.242 +		panic("Restore vp status failed\n");
  35.243 +
  35.244 +	dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
  35.245 +	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
  35.246 +	pte_vhpt = pte_val(pfn_pte((__pa(ed->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
  35.247 +	vmx_insert_double_mapping(dom_rr7, KERNEL_START,
  35.248 +				  (u64)ed->arch.vtlb->ts->vhpt->hash,
  35.249 +				  pte_xen, pte_vhpt);
  35.250 +
  35.251 +	/* Guest vTLB is not required to be switched explicitly, since
  35.252 +	 * anchored in exec_domain */
  35.253 +}
  35.254 +
  35.255 +/* Purge old double mapping and insert new one, due to rr7 change */
  35.256 +void
  35.257 +vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7)
  35.258 +{
  35.259 +	u64 pte_xen, pte_vhpt, vhpt_base;
  35.260 +
  35.261 +    vhpt_base = (u64)ed->arch.vtlb->ts->vhpt->hash;
  35.262 +    vmx_purge_double_mapping(oldrr7, KERNEL_START,
  35.263 +				 vhpt_base);
  35.264 +
  35.265 +	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
  35.266 +	pte_vhpt = pte_val(pfn_pte((__pa(vhpt_base) >> PAGE_SHIFT), PAGE_KERNEL));
  35.267 +	vmx_insert_double_mapping(newrr7, KERNEL_START,
  35.268 +				  vhpt_base,
  35.269 +				  pte_xen, pte_vhpt);
  35.270 +}
  35.271 +
  35.272 +/*
  35.273 + * Initialize VMX envirenment for guest. Only the 1st vp/exec_domain
  35.274 + * is registered here.
  35.275 + */
  35.276 +void
  35.277 +vmx_final_setup_domain(struct domain *d)
  35.278 +{
  35.279 +	struct exec_domain *ed = d->exec_domain[0];
  35.280 +	vpd_t *vpd;
  35.281 +
  35.282 +	/* Allocate resources for exec_domain 0 */
  35.283 +	//memset(&ed->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
  35.284 +
  35.285 +	vpd = alloc_vpd();
  35.286 +	ASSERT(vpd);
  35.287 +
  35.288 +	ed->arch.arch_vmx.vpd = vpd;
  35.289 +	vpd->virt_env_vaddr = vm_buffer;
  35.290 +
  35.291 +	/* ed->arch.schedule_tail = arch_vmx_do_launch; */
  35.292 +	vmx_create_vp(ed);
  35.293 +
  35.294 +	/* Set this ed to be vmx */
  35.295 +	ed->arch.arch_vmx.flags = 1;
  35.296 +
  35.297 +	/* Other vmx specific initialization work */
  35.298 +}
  35.299 +
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/xen/arch/ia64/vmx_interrupt.c	Fri May 20 17:23:51 2005 +0000
    36.3 @@ -0,0 +1,388 @@
    36.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    36.5 +/*
    36.6 + * vmx_interrupt.c: handle inject interruption.
    36.7 + * Copyright (c) 2005, Intel Corporation.
    36.8 + *
    36.9 + * This program is free software; you can redistribute it and/or modify it
   36.10 + * under the terms and conditions of the GNU General Public License,
   36.11 + * version 2, as published by the Free Software Foundation.
   36.12 + *
   36.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   36.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   36.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   36.16 + * more details.
   36.17 + *
   36.18 + * You should have received a copy of the GNU General Public License along with
   36.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   36.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   36.21 + *
   36.22 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   36.23 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
   36.24 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   36.25 + */
   36.26 +
   36.27 +
   36.28 +#include <xen/types.h>
   36.29 +#include <asm/vmx_vcpu.h>
   36.30 +#include <asm/vmx_mm_def.h>
   36.31 +#include <asm/vmx_pal_vsa.h>
   36.32 +/* SDM vol2 5.5 - IVA based interruption handling */
   36.33 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
   36.34 +void
   36.35 +collect_interruption(VCPU *vcpu)
   36.36 +{
   36.37 +    u64 ipsr;
   36.38 +    u64 vdcr;
   36.39 +    u64 vifs;
   36.40 +    IA64_PSR vpsr;
   36.41 +    REGS * regs = vcpu_regs(vcpu);
   36.42 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
   36.43 +
   36.44 +    if(vpsr.ic){
   36.45 +	extern void vmx_dorfirfi(void);
   36.46 +	if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
   36.47 +		panic("COLLECT interruption for vmx_dorfirfi\n");
   36.48 +
   36.49 +        /* Sync mpsr id/da/dd/ss/ed bits to vipsr
   36.50 +         * since after guest do rfi, we still want these bits on in
   36.51 +         * mpsr
   36.52 +         */
   36.53 +
   36.54 +        ipsr = regs->cr_ipsr;
   36.55 +        vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
   36.56 +             | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED));
   36.57 +        vmx_vcpu_set_ipsr(vcpu, vpsr.val);
   36.58 +
   36.59 +        /* Currently, for trap, we do not advance IIP to next
   36.60 +         * instruction. That's because we assume caller already
   36.61 +         * set up IIP correctly
   36.62 +         */
   36.63 +
   36.64 +        vmx_vcpu_set_iip(vcpu , regs->cr_iip);
   36.65 +
   36.66 +        /* set vifs.v to zero */
   36.67 +        vifs = VPD_CR(vcpu,ifs);
   36.68 +        vifs &= ~IA64_IFS_V;
   36.69 +        vmx_vcpu_set_ifs(vcpu, vifs);
   36.70 +
   36.71 +        vmx_vcpu_set_iipa(vcpu, regs->cr_iipa);
   36.72 +    }
   36.73 +
   36.74 +    vdcr = VPD_CR(vcpu,dcr);
   36.75 +
   36.76 +    /* Set guest psr
   36.77 +     * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
   36.78 +     * be: set to the value of dcr.be
   36.79 +     * pp: set to the value of dcr.pp
   36.80 +     */
   36.81 +    vpsr.val &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
   36.82 +    vpsr.val |= ( vdcr & IA64_DCR_BE);
   36.83 +
   36.84 +    /* VDCR pp bit position is different from VPSR pp bit */
   36.85 +    if ( vdcr & IA64_DCR_PP ) {
   36.86 +        vpsr.val |= IA64_PSR_PP;
   36.87 +    } else {
   36.88 +        vpsr.val &= ~IA64_PSR_PP;;
   36.89 +    }
   36.90 +
   36.91 +    vmx_vcpu_set_psr(vcpu, vpsr.val);
   36.92 +
   36.93 +}
   36.94 +int
   36.95 +inject_guest_interruption(VCPU *vcpu, u64 vec)
   36.96 +{
   36.97 +    u64 viva;
   36.98 +    REGS *regs;
   36.99 +    regs=vcpu_regs(vcpu);
  36.100 +
  36.101 +    collect_interruption(vcpu);
  36.102 +
  36.103 +    vmx_vcpu_get_iva(vcpu,&viva);
  36.104 +    regs->cr_iip = viva + vec;
  36.105 +}
  36.106 +
  36.107 +
  36.108 +/*
  36.109 + * Set vIFA & vITIR & vIHA, when vPSR.ic =1
  36.110 + * Parameter:
  36.111 + *  set_ifa: if true, set vIFA
  36.112 + *  set_itir: if true, set vITIR
  36.113 + *  set_iha: if true, set vIHA
  36.114 + */
  36.115 +void
  36.116 +set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
  36.117 +          int set_ifa, int set_itir, int set_iha)
  36.118 +{
  36.119 +    IA64_PSR vpsr;
  36.120 +    u64 value;
  36.121 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  36.122 +    /* Vol2, Table 8-1 */
  36.123 +    if ( vpsr.ic ) {
  36.124 +        if ( set_ifa){
  36.125 +            vmx_vcpu_set_ifa(vcpu, vadr);
  36.126 +        }
  36.127 +        if ( set_itir) {
  36.128 +            value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
  36.129 +            vmx_vcpu_set_itir(vcpu, value);
  36.130 +        }
  36.131 +
  36.132 +        if ( set_iha) {
  36.133 +            vmx_vcpu_thash(vcpu, vadr, &value);
  36.134 +            vmx_vcpu_set_iha(vcpu, value);
  36.135 +        }
  36.136 +    }
  36.137 +
  36.138 +
  36.139 +}
  36.140 +
  36.141 +/*
  36.142 + * Data TLB Fault
  36.143 + *  @ Data TLB vector
  36.144 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.145 + */
  36.146 +void
  36.147 +dtlb_fault (VCPU *vcpu, u64 vadr)
  36.148 +{
  36.149 +    /* If vPSR.ic, IFA, ITIR, IHA */
  36.150 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 1);
  36.151 +    inject_guest_interruption(vcpu,IA64_DATA_TLB_VECTOR);
  36.152 +}
  36.153 +
  36.154 +/*
  36.155 + * Instruction TLB Fault
  36.156 + *  @ Instruction TLB vector
  36.157 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.158 + */
  36.159 +void
  36.160 +itlb_fault (VCPU *vcpu, u64 vadr)
  36.161 +{
  36.162 +     /* If vPSR.ic, IFA, ITIR, IHA */
  36.163 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 1);
  36.164 +    inject_guest_interruption(vcpu,IA64_INST_TLB_VECTOR);
  36.165 +}
  36.166 +
  36.167 +
  36.168 +
  36.169 +/*
  36.170 + * Data Nested TLB Fault
  36.171 + *  @ Data Nested TLB Vector
  36.172 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.173 + */
  36.174 +void
  36.175 +nested_dtlb (VCPU *vcpu)
  36.176 +{
  36.177 +    inject_guest_interruption(vcpu,IA64_DATA_NESTED_TLB_VECTOR);
  36.178 +}
  36.179 +
  36.180 +/*
  36.181 + * Alternate Data TLB Fault
  36.182 + *  @ Alternate Data TLB vector
  36.183 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.184 + */
  36.185 +void
  36.186 +alt_dtlb (VCPU *vcpu, u64 vadr)
  36.187 +{
  36.188 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
  36.189 +    inject_guest_interruption(vcpu,IA64_ALT_DATA_TLB_VECTOR);
  36.190 +}
  36.191 +
  36.192 +
  36.193 +/*
  36.194 + * Data TLB Fault
  36.195 + *  @ Data TLB vector
  36.196 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.197 + */
  36.198 +void
  36.199 +alt_itlb (VCPU *vcpu, u64 vadr)
  36.200 +{
  36.201 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
  36.202 +    inject_guest_interruption(vcpu,IA64_ALT_INST_TLB_VECTOR);
  36.203 +}
  36.204 +
  36.205 +/* Deal with:
  36.206 + *  VHPT Translation Vector
  36.207 + */
  36.208 +static void
  36.209 +_vhpt_fault(VCPU *vcpu, u64 vadr)
  36.210 +{
  36.211 +    /* If vPSR.ic, IFA, ITIR, IHA*/
  36.212 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 1);
  36.213 +    inject_guest_interruption(vcpu,IA64_VHPT_TRANS_VECTOR);
  36.214 +
  36.215 +
  36.216 +}
  36.217 +
  36.218 +/*
  36.219 + * VHPT Instruction Fault
  36.220 + *  @ VHPT Translation vector
  36.221 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.222 + */
  36.223 +void
  36.224 +ivhpt_fault (VCPU *vcpu, u64 vadr)
  36.225 +{
  36.226 +    _vhpt_fault(vcpu, vadr);
  36.227 +}
  36.228 +
  36.229 +
  36.230 +/*
  36.231 + * VHPT Data Fault
  36.232 + *  @ VHPT Translation vector
  36.233 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.234 + */
  36.235 +void
  36.236 +dvhpt_fault (VCPU *vcpu, u64 vadr)
  36.237 +{
  36.238 +    _vhpt_fault(vcpu, vadr);
  36.239 +}
  36.240 +
  36.241 +
  36.242 +
  36.243 +/*
  36.244 + * Deal with:
  36.245 + *  General Exception vector
  36.246 + */
  36.247 +void
  36.248 +_general_exception (VCPU *vcpu)
  36.249 +{
  36.250 +    inject_guest_interruption(vcpu,IA64_GENEX_VECTOR);
  36.251 +}
  36.252 +
  36.253 +
  36.254 +/*
  36.255 + * Illegal Operation Fault
  36.256 + *  @ General Exception Vector
  36.257 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.258 + */
  36.259 +void
  36.260 +illegal_op (VCPU *vcpu)
  36.261 +{
  36.262 +    _general_exception(vcpu);
  36.263 +}
  36.264 +
  36.265 +/*
  36.266 + * Illegal Dependency Fault
  36.267 + *  @ General Exception Vector
  36.268 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.269 + */
  36.270 +void
  36.271 +illegal_dep (VCPU *vcpu)
  36.272 +{
  36.273 +    _general_exception(vcpu);
  36.274 +}
  36.275 +
  36.276 +/*
  36.277 + * Reserved Register/Field Fault
  36.278 + *  @ General Exception Vector
  36.279 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.280 + */
  36.281 +void
  36.282 +rsv_reg_field (VCPU *vcpu)
  36.283 +{
  36.284 +    _general_exception(vcpu);
  36.285 +}
  36.286 +/*
  36.287 + * Privileged Operation Fault
  36.288 + *  @ General Exception Vector
  36.289 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.290 + */
  36.291 +
  36.292 +void
  36.293 +privilege_op (VCPU *vcpu)
  36.294 +{
  36.295 +    _general_exception(vcpu);
  36.296 +}
  36.297 +
  36.298 +/*
  36.299 + * Unimplement Data Address Fault
  36.300 + *  @ General Exception Vector
  36.301 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.302 + */
  36.303 +void
  36.304 +unimpl_daddr (VCPU *vcpu)
  36.305 +{
  36.306 +    _general_exception(vcpu);
  36.307 +}
  36.308 +
  36.309 +/*
  36.310 + * Privileged Register Fault
  36.311 + *  @ General Exception Vector
  36.312 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.313 + */
  36.314 +void
  36.315 +privilege_reg (VCPU *vcpu)
  36.316 +{
  36.317 +    _general_exception(vcpu);
  36.318 +}
  36.319 +
  36.320 +/* Deal with
  36.321 + *  Nat consumption vector
  36.322 + * Parameter:
  36.323 + *  vaddr: Optional, if t == REGISTER
  36.324 + */
  36.325 +static void
  36.326 +_nat_consumption_fault(VCPU *vcpu, u64 vadr, miss_type t)
  36.327 +{
  36.328 +    /* If vPSR.ic && t == DATA/INST, IFA */
  36.329 +    if ( t == DATA || t == INSTRUCTION ) {
  36.330 +        /* IFA */
  36.331 +        set_ifa_itir_iha (vcpu, vadr, 1, 0, 0);
  36.332 +    }
  36.333 +
  36.334 +    inject_guest_interruption(vcpu,IA64_NAT_CONSUMPTION_VECTOR);
  36.335 +}
  36.336 +
  36.337 +/*
  36.338 + * IR Data Nat Page Consumption Fault
  36.339 + *  @ Nat Consumption Vector
  36.340 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.341 + */
  36.342 +static void
  36.343 +ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
  36.344 +{
  36.345 +    _nat_consumption_fault(vcpu, vadr, DATA);
  36.346 +}
  36.347 +
  36.348 +/*
  36.349 + * Instruction Nat Page Consumption Fault
  36.350 + *  @ Nat Consumption Vector
  36.351 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.352 + */
  36.353 +void
  36.354 +inat_page_consumption (VCPU *vcpu, u64 vadr)
  36.355 +{
  36.356 +    _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
  36.357 +}
  36.358 +
  36.359 +/*
  36.360 + * Register Nat Consumption Fault
  36.361 + *  @ Nat Consumption Vector
  36.362 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.363 + */
  36.364 +void
  36.365 +rnat_consumption (VCPU *vcpu)
  36.366 +{
  36.367 +    _nat_consumption_fault(vcpu, 0, REGISTER);
  36.368 +}
  36.369 +
  36.370 +/*
  36.371 + * Data Nat Page Consumption Fault
  36.372 + *  @ Nat Consumption Vector
  36.373 + * Refer to SDM Vol2 Table 5-6 & 8-1
  36.374 + */
  36.375 +void
  36.376 +dnat_page_consumption (VCPU *vcpu, uint64_t vadr)
  36.377 +{
  36.378 +    _nat_consumption_fault(vcpu, vadr, DATA);
  36.379 +}
  36.380 +
  36.381 +/* Deal with
  36.382 + *  Page not present vector
  36.383 + */
  36.384 +void
  36.385 +page_not_present(VCPU *vcpu, u64 vadr)
  36.386 +{
  36.387 +    /* If vPSR.ic, IFA, ITIR */
  36.388 +    set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
  36.389 +    inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
  36.390 +}
  36.391 +
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xen/arch/ia64/vmx_ivt.S	Fri May 20 17:23:51 2005 +0000
    37.3 @@ -0,0 +1,978 @@
    37.4 +/*
    37.5 + * arch/ia64/kernel/vmx_ivt.S
    37.6 + *
    37.7 + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
    37.8 + *	Stephane Eranian <eranian@hpl.hp.com>
    37.9 + *	David Mosberger <davidm@hpl.hp.com>
   37.10 + * Copyright (C) 2000, 2002-2003 Intel Co
   37.11 + *	Asit Mallick <asit.k.mallick@intel.com>
   37.12 + *      Suresh Siddha <suresh.b.siddha@intel.com>
   37.13 + *      Kenneth Chen <kenneth.w.chen@intel.com>
   37.14 + *      Fenghua Yu <fenghua.yu@intel.com>
   37.15 + *
   37.16 + *
   37.17 + * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
   37.18 + * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
   37.19 + *
   37.20 + * 05/3/20 Xuefei Xu  (Anthony Xu) (anthony.xu@intel.com)
   37.21 + *              Supporting Intel virtualization architecture
   37.22 + *
   37.23 + */
   37.24 +
   37.25 +/*
   37.26 + * This file defines the interruption vector table used by the CPU.
   37.27 + * It does not include one entry per possible cause of interruption.
   37.28 + *
   37.29 + * The first 20 entries of the table contain 64 bundles each while the
   37.30 + * remaining 48 entries contain only 16 bundles each.
   37.31 + *
   37.32 + * The 64 bundles are used to allow inlining the whole handler for critical
   37.33 + * interruptions like TLB misses.
   37.34 + *
   37.35 + *  For each entry, the comment is as follows:
   37.36 + *
   37.37 + *		// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
   37.38 + *  entry offset ----/     /         /                  /          /
   37.39 + *  entry number ---------/         /                  /          /
   37.40 + *  size of the entry -------------/                  /          /
   37.41 + *  vector name -------------------------------------/          /
   37.42 + *  interruptions triggering this vector ----------------------/
   37.43 + *
   37.44 + * The table is 32KB in size and must be aligned on 32KB boundary.
   37.45 + * (The CPU ignores the 15 lower bits of the address)
   37.46 + *
   37.47 + * Table is based upon EAS2.6 (Oct 1999)
   37.48 + */
   37.49 +
   37.50 +#include <linux/config.h>
   37.51 +
   37.52 +#include <asm/asmmacro.h>
   37.53 +#include <asm/break.h>
   37.54 +#include <asm/ia32.h>
   37.55 +#include <asm/kregs.h>
   37.56 +#include <asm/offsets.h>
   37.57 +#include <asm/pgtable.h>
   37.58 +#include <asm/processor.h>
   37.59 +#include <asm/ptrace.h>
   37.60 +#include <asm/system.h>
   37.61 +#include <asm/thread_info.h>
   37.62 +#include <asm/unistd.h>
   37.63 +#include <asm/vhpt.h>
   37.64 +
   37.65 +
   37.66 +#if 0
   37.67 +  /*
   37.68 +   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
   37.69 +   * needed for something else before enabling this...
   37.70 +   */
   37.71 +# define VMX_DBG_FAULT(i)	mov r16=ar.k2;;	shl r16=r16,8;;	add r16=(i),r16;;mov ar.k2=r16
   37.72 +#else
   37.73 +# define VMX_DBG_FAULT(i)
   37.74 +#endif
   37.75 +
   37.76 +#include "vmx_minstate.h"
   37.77 +
   37.78 +
   37.79 +
   37.80 +#define VMX_FAULT(n)    \
   37.81 +vmx_fault_##n:;          \
   37.82 +    br.sptk vmx_fault_##n;         \
   37.83 +    ;;                  \
   37.84 +
   37.85 +
   37.86 +#define VMX_REFLECT(n)				\
   37.87 +	mov r31=pr;									\
   37.88 +	mov r19=n;			/* prepare to save predicates */		\
   37.89 +    mov r29=cr.ipsr;        \
   37.90 +    ;;      \
   37.91 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
   37.92 +(p7) br.sptk.many vmx_dispatch_reflection;        \
   37.93 +    VMX_FAULT(n);            \
   37.94 +
   37.95 +
   37.96 +GLOBAL_ENTRY(vmx_panic)
   37.97 +    br.sptk.many vmx_panic
   37.98 +    ;;
   37.99 +END(vmx_panic)
  37.100 +
  37.101 +
  37.102 +
  37.103 +
  37.104 +
  37.105 +	.section .text.ivt,"ax"
  37.106 +
  37.107 +	.align 32768	// align on 32KB boundary
  37.108 +	.global vmx_ia64_ivt
  37.109 +vmx_ia64_ivt:
  37.110 +/////////////////////////////////////////////////////////////////////////////////////////
  37.111 +// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
  37.112 +ENTRY(vmx_vhpt_miss)
  37.113 +    VMX_FAULT(0)
  37.114 +END(vmx_vhpt_miss)
  37.115 +
  37.116 +	.org vmx_ia64_ivt+0x400
  37.117 +/////////////////////////////////////////////////////////////////////////////////////////
  37.118 +// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
  37.119 +ENTRY(vmx_itlb_miss)
  37.120 +    mov r31 = pr
  37.121 +    mov r29=cr.ipsr;
  37.122 +    ;;
  37.123 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  37.124 +(p6) br.sptk vmx_fault_1
  37.125 +    mov r16 = cr.ifa
  37.126 +    ;;
  37.127 +    thash r17 = r16
  37.128 +    ttag r20 = r16
  37.129 +    ;;
  37.130 +vmx_itlb_loop:
  37.131 +    cmp.eq p6,p0 = r0, r17
  37.132 +(p6) br vmx_itlb_out
  37.133 +    ;;
  37.134 +    adds r22 = VLE_TITAG_OFFSET, r17
  37.135 +    adds r23 = VLE_CCHAIN_OFFSET, r17
  37.136 +    ;;
  37.137 +    ld8 r24 = [r22]
  37.138 +    ld8 r25 = [r23]
  37.139 +    ;;
  37.140 +    lfetch [r25]
  37.141 +    cmp.eq  p6,p7 = r20, r24
  37.142 +    ;;
  37.143 +(p7)    mov r17 = r25;
  37.144 +(p7)    br.sptk vmx_itlb_loop
  37.145 +    ;;
  37.146 +    adds r23 = VLE_PGFLAGS_OFFSET, r17
  37.147 +    adds r24 = VLE_ITIR_OFFSET, r17
  37.148 +    ;;
  37.149 +    ld8 r26 = [r23]
  37.150 +    ld8 r25 = [r24]
  37.151 +    ;;
  37.152 +    mov cr.itir = r25
  37.153 +    ;;
  37.154 +    itc.i r26
  37.155 +    ;;
  37.156 +    srlz.i
  37.157 +    ;;
  37.158 +    mov r23=r31
  37.159 +    mov r22=b0
  37.160 +    adds r16=IA64_VPD_BASE_OFFSET,r21
  37.161 +    ;;
  37.162 +    ld8 r18=[r16]
  37.163 +    ;;
  37.164 +    adds r19=VPD(VPSR),r18
  37.165 +    movl r20=__vsa_base
  37.166 +    ;;
  37.167 +    ld8 r19=[r19]
  37.168 +    ld8 r20=[r20]
  37.169 +    ;;
  37.170 +    br.sptk ia64_vmm_entry
  37.171 +    ;;
  37.172 +vmx_itlb_out:
  37.173 +    mov r19 = 1
  37.174 +    br.sptk vmx_dispatch_tlb_miss
  37.175 +    VMX_FAULT(1);
  37.176 +END(vmx_itlb_miss)
  37.177 +
  37.178 +	.org vmx_ia64_ivt+0x0800
  37.179 +/////////////////////////////////////////////////////////////////////////////////////////
  37.180 +// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
  37.181 +ENTRY(vmx_dtlb_miss)
  37.182 +    mov r31 = pr
  37.183 +    mov r29=cr.ipsr;
  37.184 +    ;;
  37.185 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  37.186 +(p6)br.sptk vmx_fault_1
  37.187 +    mov r16 = cr.ifa
  37.188 +    ;;
  37.189 +    thash r17 = r16
  37.190 +    ttag r20 = r16
  37.191 +    ;;
  37.192 +vmx_dtlb_loop:
  37.193 +    cmp.eq p6,p0 = r0, r17
  37.194 +(p6)br vmx_dtlb_out
  37.195 +    ;;
  37.196 +    adds r22 = VLE_TITAG_OFFSET, r17
  37.197 +    adds r23 = VLE_CCHAIN_OFFSET, r17
  37.198 +    ;;
  37.199 +    ld8 r24 = [r22]
  37.200 +    ld8 r25 = [r23]
  37.201 +    ;;
  37.202 +    lfetch [r25]
  37.203 +    cmp.eq  p6,p7 = r20, r24
  37.204 +    ;;
  37.205 +(p7)mov r17 = r25;
  37.206 +(p7)br.sptk vmx_dtlb_loop
  37.207 +    ;;
  37.208 +    adds r23 = VLE_PGFLAGS_OFFSET, r17
  37.209 +    adds r24 = VLE_ITIR_OFFSET, r17
  37.210 +    ;;
  37.211 +    ld8 r26 = [r23]
  37.212 +    ld8 r25 = [r24]
  37.213 +    ;;
  37.214 +    mov cr.itir = r25
  37.215 +    ;;
  37.216 +    itc.d r26
  37.217 +    ;;
  37.218 +    srlz.d;
  37.219 +    ;;
  37.220 +    mov r23=r31
  37.221 +    mov r22=b0
  37.222 +    adds r16=IA64_VPD_BASE_OFFSET,r21
  37.223 +    ;;
  37.224 +    ld8 r18=[r16]
  37.225 +    ;;
  37.226 +    adds r19=VPD(VPSR),r18
  37.227 +    movl r20=__vsa_base
  37.228 +    ;;
  37.229 +    ld8 r19=[r19]
  37.230 +    ld8 r20=[r20]
  37.231 +    ;;
  37.232 +    br.sptk ia64_vmm_entry
  37.233 +    ;;
  37.234 +vmx_dtlb_out:
  37.235 +    mov r19 = 2
  37.236 +    br.sptk vmx_dispatch_tlb_miss
  37.237 +    VMX_FAULT(2);
  37.238 +END(vmx_dtlb_miss)
  37.239 +
  37.240 +	.org vmx_ia64_ivt+0x0c00
  37.241 +/////////////////////////////////////////////////////////////////////////////////////////
  37.242 +// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
  37.243 +ENTRY(vmx_alt_itlb_miss)
  37.244 +    mov r31 = pr
  37.245 +    mov r29=cr.ipsr;
  37.246 +    ;;
  37.247 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  37.248 +(p7)br.sptk vmx_fault_3
  37.249 +	mov r16=cr.ifa		// get address that caused the TLB miss
  37.250 +	movl r17=PAGE_KERNEL
  37.251 +	mov r24=cr.ipsr
  37.252 +	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  37.253 +	;;
  37.254 +	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
  37.255 +	shr.u r18=r16,57	// move address bit 61 to bit 4
  37.256 +	;;
  37.257 +	andcm r18=0x10,r18	// bit 4=~address-bit(61)
  37.258 +	or r19=r17,r19		// insert PTE control bits into r19
  37.259 +	;;
  37.260 +	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
  37.261 +	;;
  37.262 +	itc.i r19		// insert the TLB entry
  37.263 +	mov pr=r31,-1
  37.264 +	rfi
  37.265 +    VMX_FAULT(3);
  37.266 +END(vmx_alt_itlb_miss)
  37.267 +
  37.268 +
  37.269 +	.org vmx_ia64_ivt+0x1000
  37.270 +/////////////////////////////////////////////////////////////////////////////////////////
  37.271 +// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
  37.272 +ENTRY(vmx_alt_dtlb_miss)
  37.273 +	mov r31=pr
  37.274 +    mov r29=cr.ipsr;
  37.275 +    ;;
  37.276 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  37.277 +(p7)br.sptk vmx_fault_4
  37.278 +	mov r16=cr.ifa		// get address that caused the TLB miss
  37.279 +	movl r17=PAGE_KERNEL
  37.280 +	mov r20=cr.isr
  37.281 +	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  37.282 +	mov r24=cr.ipsr
  37.283 +	;;
  37.284 +	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
  37.285 +	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
  37.286 +	shr.u r18=r16,57			// move address bit 61 to bit 4
  37.287 +	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
  37.288 +	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
  37.289 +	;;
  37.290 +	andcm r18=0x10,r18	// bit 4=~address-bit(61)
  37.291 +(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
  37.292 +	dep r24=-1,r24,IA64_PSR_ED_BIT,1
  37.293 +	or r19=r19,r17		// insert PTE control bits into r19
  37.294 +	;;
  37.295 +	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
  37.296 +(p6) mov cr.ipsr=r24
  37.297 +	;;
  37.298 +(p7) itc.d r19		// insert the TLB entry
  37.299 +	mov pr=r31,-1
  37.300 +	rfi
  37.301 +    VMX_FAULT(4);
  37.302 +END(vmx_alt_dtlb_miss)
  37.303 +
  37.304 +	.org vmx_ia64_ivt+0x1400
  37.305 +/////////////////////////////////////////////////////////////////////////////////////////
  37.306 +// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
  37.307 +ENTRY(vmx_nested_dtlb_miss)
  37.308 +    VMX_FAULT(5)
  37.309 +END(vmx_nested_dtlb_miss)
  37.310 +
  37.311 +	.org vmx_ia64_ivt+0x1800
  37.312 +/////////////////////////////////////////////////////////////////////////////////////////
  37.313 +// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
  37.314 +ENTRY(vmx_ikey_miss)
  37.315 +	VMX_REFLECT(6)
  37.316 +END(vmx_ikey_miss)
  37.317 +
  37.318 +	.org vmx_ia64_ivt+0x1c00
  37.319 +/////////////////////////////////////////////////////////////////////////////////////////
  37.320 +// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  37.321 +ENTRY(vmx_dkey_miss)
  37.322 +	VMX_REFLECT(7)
  37.323 +END(vmx_dkey_miss)
  37.324 +
  37.325 +	.org vmx_ia64_ivt+0x2000
  37.326 +/////////////////////////////////////////////////////////////////////////////////////////
  37.327 +// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
  37.328 +ENTRY(vmx_dirty_bit)
  37.329 +	VMX_REFLECT(8)
  37.330 +END(vmx_idirty_bit)
  37.331 +
  37.332 +	.org vmx_ia64_ivt+0x2400
  37.333 +/////////////////////////////////////////////////////////////////////////////////////////
  37.334 +// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
  37.335 +ENTRY(vmx_iaccess_bit)
  37.336 +	VMX_REFLECT(9)
  37.337 +END(vmx_iaccess_bit)
  37.338 +
  37.339 +	.org vmx_ia64_ivt+0x2800
  37.340 +/////////////////////////////////////////////////////////////////////////////////////////
  37.341 +// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
  37.342 +ENTRY(vmx_daccess_bit)
  37.343 +	VMX_REFLECT(10)
  37.344 +END(vmx_daccess_bit)
  37.345 +
  37.346 +	.org vmx_ia64_ivt+0x2c00
  37.347 +/////////////////////////////////////////////////////////////////////////////////////////
  37.348 +// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
  37.349 +ENTRY(vmx_break_fault)
  37.350 +	mov r31=pr
  37.351 +    mov r19=11
  37.352 +    br.sptk.many vmx_dispatch_break_fault
  37.353 +END(vmx_break_fault)
  37.354 +
  37.355 +	.org vmx_ia64_ivt+0x3000
  37.356 +/////////////////////////////////////////////////////////////////////////////////////////
  37.357 +// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
  37.358 +ENTRY(vmx_interrupt)
  37.359 +	mov r31=pr		// prepare to save predicates
  37.360 +    mov r19=12
  37.361 +    mov r29=cr.ipsr
  37.362 +    ;;
  37.363 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT
  37.364 +(p7) br.sptk vmx_dispatch_interrupt
  37.365 +    ;;
  37.366 +	mov r27=ar.rsc			/* M */
  37.367 +	mov r20=r1			/* A */
  37.368 +	mov r25=ar.unat		/* M */
  37.369 +	mov r26=ar.pfs			/* I */
  37.370 +	mov r28=cr.iip			/* M */
  37.371 +	cover               /* B (or nothing) */
  37.372 +	;;
  37.373 +	mov r1=sp
  37.374 +	;;
  37.375 +	invala				/* M */
  37.376 +	mov r30=cr.ifs
  37.377 +	;;
  37.378 +    addl r1=-IA64_PT_REGS_SIZE,r1
  37.379 +    ;;
  37.380 +	adds r17=2*L1_CACHE_BYTES,r1		/* really: biggest cache-line size */
  37.381 +	adds r16=PT(CR_IPSR),r1
  37.382 +	;;
  37.383 +	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
  37.384 +	st8 [r16]=r29		/* save cr.ipsr */
  37.385 +	;;
  37.386 +	lfetch.fault.excl.nt1 [r17]
  37.387 +	mov r29=b0
  37.388 +	;;
  37.389 +	adds r16=PT(R8),r1  	/* initialize first base pointer */
  37.390 +	adds r17=PT(R9),r1  	/* initialize second base pointer */
  37.391 +	mov r18=r0      		/* make sure r18 isn't NaT */
  37.392 +	;;
  37.393 +.mem.offset 0,0; st8.spill [r16]=r8,16
  37.394 +.mem.offset 8,0; st8.spill [r17]=r9,16
  37.395 +        ;;
  37.396 +.mem.offset 0,0; st8.spill [r16]=r10,24
  37.397 +.mem.offset 8,0; st8.spill [r17]=r11,24
  37.398 +        ;;
  37.399 +	st8 [r16]=r28,16	/* save cr.iip */
  37.400 +	st8 [r17]=r30,16	/* save cr.ifs */
  37.401 +	mov r8=ar.fpsr		/* M */
  37.402 +	mov r9=ar.csd
  37.403 +	mov r10=ar.ssd
  37.404 +	movl r11=FPSR_DEFAULT   /* L-unit */
  37.405 +	;;
  37.406 +	st8 [r16]=r25,16	/* save ar.unat */
  37.407 +	st8 [r17]=r26,16	/* save ar.pfs */
  37.408 +	shl r18=r18,16		/* compute ar.rsc to be used for "loadrs" */
  37.409 +	;;
  37.410 +    st8 [r16]=r27,16   /* save ar.rsc */
  37.411 +    adds r17=16,r17    /* skip over ar_rnat field */
  37.412 +    ;;          /* avoid RAW on r16 & r17 */
  37.413 +    st8 [r17]=r31,16   /* save predicates */
  37.414 +    adds r16=16,r16    /* skip over ar_bspstore field */
  37.415 +    ;;
  37.416 +    st8 [r16]=r29,16   /* save b0 */
  37.417 +    st8 [r17]=r18,16   /* save ar.rsc value for "loadrs" */
  37.418 +    ;;
  37.419 +.mem.offset 0,0; st8.spill [r16]=r20,16    /* save original r1 */
  37.420 +.mem.offset 8,0; st8.spill [r17]=r12,16
  37.421 +    adds r12=-16,r1    /* switch to kernel memory stack (with 16 bytes of scratch) */
  37.422 +    ;;
  37.423 +.mem.offset 0,0; st8.spill [r16]=r13,16
  37.424 +.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
  37.425 +    mov r13=r21    /* establish `current' */
  37.426 +    ;;
  37.427 +.mem.offset 0,0; st8.spill [r16]=r15,16
  37.428 +.mem.offset 8,0; st8.spill [r17]=r14,16
  37.429 +    dep r14=-1,r0,60,4
  37.430 +    ;;
  37.431 +.mem.offset 0,0; st8.spill [r16]=r2,16
  37.432 +.mem.offset 8,0; st8.spill [r17]=r3,16
  37.433 +    adds r2=IA64_PT_REGS_R16_OFFSET,r1
  37.434 +    ;;
  37.435 +    mov r8=ar.ccv
  37.436 +    movl r1=__gp       /* establish kernel global pointer */
  37.437 +    ;;                                          \
  37.438 +    bsw.1
  37.439 +    ;;
  37.440 +	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  37.441 +	mov out0=cr.ivr		// pass cr.ivr as first arg
  37.442 +	add out1=16,sp		// pass pointer to pt_regs as second arg
  37.443 +
  37.444 +	ssm psr.ic
  37.445 +    ;;
  37.446 +    srlz.i
  37.447 +	;;
  37.448 +    ssm psr.i
  37.449 +	adds r3=8,r2		// set up second base pointer for SAVE_REST
  37.450 +	srlz.i			// ensure everybody knows psr.ic is back on
  37.451 +	;;
  37.452 +.mem.offset 0,0; st8.spill [r2]=r16,16
  37.453 +.mem.offset 8,0; st8.spill [r3]=r17,16
  37.454 +    ;;
  37.455 +.mem.offset 0,0; st8.spill [r2]=r18,16
  37.456 +.mem.offset 8,0; st8.spill [r3]=r19,16
  37.457 +    ;;
  37.458 +.mem.offset 0,0; st8.spill [r2]=r20,16
  37.459 +.mem.offset 8,0; st8.spill [r3]=r21,16
  37.460 +    mov r18=b6
  37.461 +    ;;
  37.462 +.mem.offset 0,0; st8.spill [r2]=r22,16
  37.463 +.mem.offset 8,0; st8.spill [r3]=r23,16
  37.464 +    mov r19=b7
  37.465 +    ;;
  37.466 +.mem.offset 0,0; st8.spill [r2]=r24,16
  37.467 +.mem.offset 8,0; st8.spill [r3]=r25,16
  37.468 +    ;;
  37.469 +.mem.offset 0,0; st8.spill [r2]=r26,16
  37.470 +.mem.offset 8,0; st8.spill [r3]=r27,16
  37.471 +    ;;
  37.472 +.mem.offset 0,0; st8.spill [r2]=r28,16
  37.473 +.mem.offset 8,0; st8.spill [r3]=r29,16
  37.474 +    ;;
  37.475 +.mem.offset 0,0; st8.spill [r2]=r30,16
  37.476 +.mem.offset 8,0; st8.spill [r3]=r31,32
  37.477 +    ;;
  37.478 +    mov ar.fpsr=r11     /* M-unit */
  37.479 +    st8 [r2]=r8,8      /* ar.ccv */
  37.480 +    adds r24=PT(B6)-PT(F7),r3
  37.481 +    ;;
  37.482 +    stf.spill [r2]=f6,32
  37.483 +    stf.spill [r3]=f7,32
  37.484 +    ;;
  37.485 +    stf.spill [r2]=f8,32
  37.486 +    stf.spill [r3]=f9,32
  37.487 +    ;;
  37.488 +    stf.spill [r2]=f10
  37.489 +    stf.spill [r3]=f11
  37.490 +    adds r25=PT(B7)-PT(F11),r3
  37.491 +    ;;
  37.492 +    st8 [r24]=r18,16       /* b6 */
  37.493 +    st8 [r25]=r19,16       /* b7 */
  37.494 +    ;;
  37.495 +    st8 [r24]=r9           /* ar.csd */
  37.496 +    st8 [r25]=r10          /* ar.ssd */
  37.497 +    ;;
  37.498 +	srlz.d			// make sure we see the effect of cr.ivr
  37.499 +	movl r14=ia64_leave_nested
  37.500 +	;;
  37.501 +	mov rp=r14
  37.502 +	br.call.sptk.many b6=vmx_ia64_handle_irq
  37.503 +	;;
  37.504 +END(vmx_interrupt)
  37.505 +
  37.506 +	.org vmx_ia64_ivt+0x3400
  37.507 +/////////////////////////////////////////////////////////////////////////////////////////
  37.508 +// 0x3400 Entry 13 (size 64 bundles) Reserved
  37.509 +	VMX_DBG_FAULT(13)
  37.510 +	VMX_FAULT(13)
  37.511 +
  37.512 +
  37.513 +	.org vmx_ia64_ivt+0x3800
  37.514 +/////////////////////////////////////////////////////////////////////////////////////////
  37.515 +// 0x3800 Entry 14 (size 64 bundles) Reserved
  37.516 +	VMX_DBG_FAULT(14)
  37.517 +	VMX_FAULT(14)
  37.518 +
  37.519 +
  37.520 +	.org vmx_ia64_ivt+0x3c00
  37.521 +/////////////////////////////////////////////////////////////////////////////////////////
  37.522 +// 0x3c00 Entry 15 (size 64 bundles) Reserved
  37.523 +	VMX_DBG_FAULT(15)
  37.524 +	VMX_FAULT(15)
  37.525 +
  37.526 +
  37.527 +	.org vmx_ia64_ivt+0x4000
  37.528 +/////////////////////////////////////////////////////////////////////////////////////////
  37.529 +// 0x4000 Entry 16 (size 64 bundles) Reserved
  37.530 +	VMX_DBG_FAULT(16)
  37.531 +	VMX_FAULT(16)
  37.532 +
  37.533 +	.org vmx_ia64_ivt+0x4400
  37.534 +/////////////////////////////////////////////////////////////////////////////////////////
  37.535 +// 0x4400 Entry 17 (size 64 bundles) Reserved
  37.536 +	VMX_DBG_FAULT(17)
  37.537 +	VMX_FAULT(17)
  37.538 +
  37.539 +	.org vmx_ia64_ivt+0x4800
  37.540 +/////////////////////////////////////////////////////////////////////////////////////////
  37.541 +// 0x4800 Entry 18 (size 64 bundles) Reserved
  37.542 +	VMX_DBG_FAULT(18)
  37.543 +	VMX_FAULT(18)
  37.544 +
  37.545 +	.org vmx_ia64_ivt+0x4c00
  37.546 +/////////////////////////////////////////////////////////////////////////////////////////
  37.547 +// 0x4c00 Entry 19 (size 64 bundles) Reserved
  37.548 +	VMX_DBG_FAULT(19)
  37.549 +	VMX_FAULT(19)
  37.550 +
  37.551 +/////////////////////////////////////////////////////////////////////////////////////////
  37.552 +// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  37.553 +ENTRY(vmx_iaccess_rights)
  37.554 +	VMX_REFLECT(22)
  37.555 +END(vmx_iaccess_rights)
  37.556 +
  37.557 +	.org vmx_ia64_ivt+0x5300
  37.558 +/////////////////////////////////////////////////////////////////////////////////////////
  37.559 +// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  37.560 +ENTRY(vmx_daccess_rights)
  37.561 +	VMX_REFLECT(23)
  37.562 +END(vmx_daccess_rights)
  37.563 +
  37.564 +	.org vmx_ia64_ivt+0x5400
  37.565 +/////////////////////////////////////////////////////////////////////////////////////////
  37.566 +// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
  37.567 +ENTRY(vmx_general_exception)
  37.568 +    VMX_FAULT(24)
  37.569 +//    VMX_REFLECT(24)
  37.570 +END(vmx_general_exception)
  37.571 +
  37.572 +	.org vmx_ia64_ivt+0x5500
  37.573 +/////////////////////////////////////////////////////////////////////////////////////////
  37.574 +// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  37.575 +ENTRY(vmx_disabled_fp_reg)
  37.576 +	VMX_REFLECT(25)
  37.577 +END(vmx_disabled_fp_reg)
  37.578 +
  37.579 +	.org vmx_ia64_ivt+0x5600
  37.580 +/////////////////////////////////////////////////////////////////////////////////////////
  37.581 +// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
  37.582 +ENTRY(vmx_nat_consumption)
  37.583 +	VMX_REFLECT(26)
  37.584 +END(vmx_nat_consumption)
  37.585 +
  37.586 +	.org vmx_ia64_ivt+0x5700
  37.587 +/////////////////////////////////////////////////////////////////////////////////////////
  37.588 +// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  37.589 +ENTRY(vmx_speculation_vector)
  37.590 +	VMX_REFLECT(27)
  37.591 +END(vmx_speculation_vector)
  37.592 +
  37.593 +	.org vmx_ia64_ivt+0x5800
  37.594 +/////////////////////////////////////////////////////////////////////////////////////////
  37.595 +// 0x5800 Entry 28 (size 16 bundles) Reserved
  37.596 +	VMX_DBG_FAULT(28)
  37.597 +	VMX_FAULT(28)
  37.598 +
  37.599 +	.org vmx_ia64_ivt+0x5900
  37.600 +/////////////////////////////////////////////////////////////////////////////////////////
  37.601 +// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
  37.602 +ENTRY(vmx_debug_vector)
  37.603 +	VMX_DBG_FAULT(29)
  37.604 +	VMX_FAULT(29)
  37.605 +END(vmx_debug_vector)
  37.606 +
  37.607 +	.org vmx_ia64_ivt+0x5a00
  37.608 +/////////////////////////////////////////////////////////////////////////////////////////
  37.609 +// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  37.610 +ENTRY(vmx_unaligned_access)
  37.611 +	VMX_REFLECT(30)
  37.612 +END(vmx_unaligned_access)
  37.613 +
  37.614 +	.org vmx_ia64_ivt+0x5b00
  37.615 +/////////////////////////////////////////////////////////////////////////////////////////
  37.616 +// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  37.617 +ENTRY(vmx_unsupported_data_reference)
  37.618 +	VMX_REFLECT(31)
  37.619 +END(vmx_unsupported_data_reference)
  37.620 +
  37.621 +	.org vmx_ia64_ivt+0x5c00
  37.622 +/////////////////////////////////////////////////////////////////////////////////////////
  37.623 +// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
  37.624 +ENTRY(vmx_floating_point_fault)
  37.625 +	VMX_REFLECT(32)
  37.626 +END(vmx_floating_point_fault)
  37.627 +
  37.628 +	.org vmx_ia64_ivt+0x5d00
  37.629 +/////////////////////////////////////////////////////////////////////////////////////////
  37.630 +// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  37.631 +ENTRY(vmx_floating_point_trap)
  37.632 +	VMX_REFLECT(33)
  37.633 +END(vmx_floating_point_trap)
  37.634 +
  37.635 +	.org vmx_ia64_ivt+0x5e00
  37.636 +/////////////////////////////////////////////////////////////////////////////////////////
  37.637 +// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  37.638 +ENTRY(vmx_lower_privilege_trap)
  37.639 +	VMX_REFLECT(34)
  37.640 +END(vmx_lower_privilege_trap)
  37.641 +
  37.642 +	.org vmx_ia64_ivt+0x5f00
  37.643 +/////////////////////////////////////////////////////////////////////////////////////////
  37.644 +// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  37.645 +ENTRY(vmx_taken_branch_trap)
  37.646 +	VMX_REFLECT(35)
  37.647 +END(vmx_taken_branch_trap)
  37.648 +
  37.649 +	.org vmx_ia64_ivt+0x6000
  37.650 +/////////////////////////////////////////////////////////////////////////////////////////
  37.651 +// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  37.652 +ENTRY(vmx_single_step_trap)
  37.653 +	VMX_REFLECT(36)
  37.654 +END(vmx_single_step_trap)
  37.655 +
  37.656 +	.org vmx_ia64_ivt+0x6100
  37.657 +/////////////////////////////////////////////////////////////////////////////////////////
  37.658 +// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
  37.659 +ENTRY(vmx_virtualization_fault)
  37.660 +	VMX_DBG_FAULT(37)
  37.661 +	mov r31=pr
  37.662 +    mov r19=37
  37.663 +    br.sptk vmx_dispatch_virtualization_fault
  37.664 +END(vmx_virtualization_fault)
  37.665 +
  37.666 +	.org vmx_ia64_ivt+0x6200
  37.667 +/////////////////////////////////////////////////////////////////////////////////////////
  37.668 +// 0x6200 Entry 38 (size 16 bundles) Reserved
  37.669 +	VMX_DBG_FAULT(38)
  37.670 +	VMX_FAULT(38)
  37.671 +
  37.672 +	.org vmx_ia64_ivt+0x6300
  37.673 +/////////////////////////////////////////////////////////////////////////////////////////
  37.674 +// 0x6300 Entry 39 (size 16 bundles) Reserved
  37.675 +	VMX_DBG_FAULT(39)
  37.676 +	VMX_FAULT(39)
  37.677 +
  37.678 +	.org vmx_ia64_ivt+0x6400
  37.679 +/////////////////////////////////////////////////////////////////////////////////////////
  37.680 +// 0x6400 Entry 40 (size 16 bundles) Reserved
  37.681 +	VMX_DBG_FAULT(40)
  37.682 +	VMX_FAULT(40)
  37.683 +
  37.684 +	.org vmx_ia64_ivt+0x6500
  37.685 +/////////////////////////////////////////////////////////////////////////////////////////
  37.686 +// 0x6500 Entry 41 (size 16 bundles) Reserved
  37.687 +	VMX_DBG_FAULT(41)
  37.688 +	VMX_FAULT(41)
  37.689 +
  37.690 +	.org vmx_ia64_ivt+0x6600
  37.691 +/////////////////////////////////////////////////////////////////////////////////////////
  37.692 +// 0x6600 Entry 42 (size 16 bundles) Reserved
  37.693 +	VMX_DBG_FAULT(42)
  37.694 +	VMX_FAULT(42)
  37.695 +
  37.696 +	.org vmx_ia64_ivt+0x6700
  37.697 +/////////////////////////////////////////////////////////////////////////////////////////
  37.698 +// 0x6700 Entry 43 (size 16 bundles) Reserved
  37.699 +	VMX_DBG_FAULT(43)
  37.700 +	VMX_FAULT(43)
  37.701 +
  37.702 +	.org vmx_ia64_ivt+0x6800
  37.703 +/////////////////////////////////////////////////////////////////////////////////////////
  37.704 +// 0x6800 Entry 44 (size 16 bundles) Reserved
  37.705 +	VMX_DBG_FAULT(44)
  37.706 +	VMX_FAULT(44)
  37.707 +
  37.708 +	.org vmx_ia64_ivt+0x6900
  37.709 +/////////////////////////////////////////////////////////////////////////////////////////
  37.710 +// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
  37.711 +ENTRY(vmx_ia32_exception)
  37.712 +	VMX_DBG_FAULT(45)
  37.713 +	VMX_FAULT(45)
  37.714 +END(vmx_ia32_exception)
  37.715 +
  37.716 +	.org vmx_ia64_ivt+0x6a00
  37.717 +/////////////////////////////////////////////////////////////////////////////////////////
  37.718 +// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
  37.719 +ENTRY(vmx_ia32_intercept)
  37.720 +	VMX_DBG_FAULT(46)
  37.721 +	VMX_FAULT(46)
  37.722 +END(vmx_ia32_intercept)
  37.723 +
  37.724 +	.org vmx_ia64_ivt+0x6b00
  37.725 +/////////////////////////////////////////////////////////////////////////////////////////
  37.726 +// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
  37.727 +ENTRY(vmx_ia32_interrupt)
  37.728 +	VMX_DBG_FAULT(47)
  37.729 +	VMX_FAULT(47)
  37.730 +END(vmx_ia32_interrupt)
  37.731 +
  37.732 +	.org vmx_ia64_ivt+0x6c00
  37.733 +/////////////////////////////////////////////////////////////////////////////////////////
  37.734 +// 0x6c00 Entry 48 (size 16 bundles) Reserved
  37.735 +	VMX_DBG_FAULT(48)
  37.736 +	VMX_FAULT(48)
  37.737 +
  37.738 +	.org vmx_ia64_ivt+0x6d00
  37.739 +/////////////////////////////////////////////////////////////////////////////////////////
  37.740 +// 0x6d00 Entry 49 (size 16 bundles) Reserved
  37.741 +	VMX_DBG_FAULT(49)
  37.742 +	VMX_FAULT(49)
  37.743 +
  37.744 +	.org vmx_ia64_ivt+0x6e00
  37.745 +/////////////////////////////////////////////////////////////////////////////////////////
  37.746 +// 0x6e00 Entry 50 (size 16 bundles) Reserved
  37.747 +	VMX_DBG_FAULT(50)
  37.748 +	VMX_FAULT(50)
  37.749 +
  37.750 +	.org vmx_ia64_ivt+0x6f00
  37.751 +/////////////////////////////////////////////////////////////////////////////////////////
  37.752 +// 0x6f00 Entry 51 (size 16 bundles) Reserved
  37.753 +	VMX_DBG_FAULT(51)
  37.754 +	VMX_FAULT(51)
  37.755 +
  37.756 +	.org vmx_ia64_ivt+0x7000
  37.757 +/////////////////////////////////////////////////////////////////////////////////////////
  37.758 +// 0x7000 Entry 52 (size 16 bundles) Reserved
  37.759 +	VMX_DBG_FAULT(52)
  37.760 +	VMX_FAULT(52)
  37.761 +
  37.762 +	.org vmx_ia64_ivt+0x7100
  37.763 +/////////////////////////////////////////////////////////////////////////////////////////
  37.764 +// 0x7100 Entry 53 (size 16 bundles) Reserved
  37.765 +	VMX_DBG_FAULT(53)
  37.766 +	VMX_FAULT(53)
  37.767 +
  37.768 +	.org vmx_ia64_ivt+0x7200
  37.769 +/////////////////////////////////////////////////////////////////////////////////////////
  37.770 +// 0x7200 Entry 54 (size 16 bundles) Reserved
  37.771 +	VMX_DBG_FAULT(54)
  37.772 +	VMX_FAULT(54)
  37.773 +
  37.774 +	.org vmx_ia64_ivt+0x7300
  37.775 +/////////////////////////////////////////////////////////////////////////////////////////
  37.776 +// 0x7300 Entry 55 (size 16 bundles) Reserved
  37.777 +	VMX_DBG_FAULT(55)
  37.778 +	VMX_FAULT(55)
  37.779 +
  37.780 +	.org vmx_ia64_ivt+0x7400
  37.781 +/////////////////////////////////////////////////////////////////////////////////////////
  37.782 +// 0x7400 Entry 56 (size 16 bundles) Reserved
  37.783 +	VMX_DBG_FAULT(56)
  37.784 +	VMX_FAULT(56)
  37.785 +
  37.786 +	.org vmx_ia64_ivt+0x7500
  37.787 +/////////////////////////////////////////////////////////////////////////////////////////
  37.788 +// 0x7500 Entry 57 (size 16 bundles) Reserved
  37.789 +	VMX_DBG_FAULT(57)
  37.790 +	VMX_FAULT(57)
  37.791 +
  37.792 +	.org vmx_ia64_ivt+0x7600
  37.793 +/////////////////////////////////////////////////////////////////////////////////////////
  37.794 +// 0x7600 Entry 58 (size 16 bundles) Reserved
  37.795 +	VMX_DBG_FAULT(58)
  37.796 +	VMX_FAULT(58)
  37.797 +
  37.798 +	.org vmx_ia64_ivt+0x7700
  37.799 +/////////////////////////////////////////////////////////////////////////////////////////
  37.800 +// 0x7700 Entry 59 (size 16 bundles) Reserved
  37.801 +	VMX_DBG_FAULT(59)
  37.802 +	VMX_FAULT(59)
  37.803 +
  37.804 +	.org vmx_ia64_ivt+0x7800
  37.805 +/////////////////////////////////////////////////////////////////////////////////////////
  37.806 +// 0x7800 Entry 60 (size 16 bundles) Reserved
  37.807 +	VMX_DBG_FAULT(60)
  37.808 +	VMX_FAULT(60)
  37.809 +
  37.810 +	.org vmx_ia64_ivt+0x7900
  37.811 +/////////////////////////////////////////////////////////////////////////////////////////
  37.812 +// 0x7900 Entry 61 (size 16 bundles) Reserved
  37.813 +	VMX_DBG_FAULT(61)
  37.814 +	VMX_FAULT(61)
  37.815 +
  37.816 +	.org vmx_ia64_ivt+0x7a00
  37.817 +/////////////////////////////////////////////////////////////////////////////////////////
  37.818 +// 0x7a00 Entry 62 (size 16 bundles) Reserved
  37.819 +	VMX_DBG_FAULT(62)
  37.820 +	VMX_FAULT(62)
  37.821 +
  37.822 +	.org vmx_ia64_ivt+0x7b00
  37.823 +/////////////////////////////////////////////////////////////////////////////////////////
  37.824 +// 0x7b00 Entry 63 (size 16 bundles) Reserved
  37.825 +	VMX_DBG_FAULT(63)
  37.826 +	VMX_FAULT(63)
  37.827 +
  37.828 +	.org vmx_ia64_ivt+0x7c00
  37.829 +/////////////////////////////////////////////////////////////////////////////////////////
  37.830 +// 0x7c00 Entry 64 (size 16 bundles) Reserved
  37.831 +    VMX_DBG_FAULT(64)
  37.832 +	VMX_FAULT(64)
  37.833 +
  37.834 +	.org vmx_ia64_ivt+0x7d00
  37.835 +/////////////////////////////////////////////////////////////////////////////////////////
  37.836 +// 0x7d00 Entry 65 (size 16 bundles) Reserved
  37.837 +	VMX_DBG_FAULT(65)
  37.838 +	VMX_FAULT(65)
  37.839 +
  37.840 +	.org vmx_ia64_ivt+0x7e00
  37.841 +/////////////////////////////////////////////////////////////////////////////////////////
  37.842 +// 0x7e00 Entry 66 (size 16 bundles) Reserved
  37.843 +	VMX_DBG_FAULT(66)
  37.844 +	VMX_FAULT(66)
  37.845 +
  37.846 +	.org vmx_ia64_ivt+0x7f00
  37.847 +/////////////////////////////////////////////////////////////////////////////////////////
  37.848 +// 0x7f00 Entry 67 (size 16 bundles) Reserved
  37.849 +	VMX_DBG_FAULT(67)
  37.850 +	VMX_FAULT(67)
  37.851 +
  37.852 +	.org vmx_ia64_ivt+0x8000
  37.853 +    // There is no particular reason for this code to be here, other than that
  37.854 +    // there happens to be space here that would go unused otherwise.  If this
  37.855 +    // fault ever gets "unreserved", simply moved the following code to a more
  37.856 +    // suitable spot...
  37.857 +
  37.858 +
  37.859 +ENTRY(vmx_dispatch_reflection)
  37.860 +    /*
  37.861 +     * Input:
  37.862 +     *  psr.ic: off
  37.863 +     *  r19:    intr type (offset into ivt, see ia64_int.h)
  37.864 +     *  r31:    contains saved predicates (pr)
  37.865 +     */
  37.866 +    VMX_SAVE_MIN_WITH_COVER_R19
  37.867 +    alloc r14=ar.pfs,0,0,4,0
  37.868 +    mov out0=cr.ifa
  37.869 +    mov out1=cr.isr
  37.870 +    mov out2=cr.iim
  37.871 +    mov out3=r15
  37.872 +
  37.873 +    ssm psr.ic
  37.874 +    ;;
  37.875 +    srlz.i                  // guarantee that interruption collection is on
  37.876 +    ;;
  37.877 +    ssm psr.i               // restore psr.i
  37.878 +    adds r3=16,r2                // set up second base pointer
  37.879 +    ;;
  37.880 +    VMX_SAVE_REST
  37.881 +    movl r14=ia64_leave_hypervisor
  37.882 +    ;;
  37.883 +    mov rp=r14
  37.884 +    br.call.sptk.many b6=vmx_reflect_interruption
  37.885 +END(vmx_dispatch_reflection)
  37.886 +
  37.887 +ENTRY(vmx_dispatch_virtualization_fault)
  37.888 +    cmp.eq pEml,pNonEml=r0,r0       /* force pEml =1, save r4 ~ r7 */
  37.889 +    ;;
  37.890 +    VMX_SAVE_MIN_WITH_COVER_R19
  37.891 +    ;;
  37.892 +    alloc r14=ar.pfs,0,0,3,0        // now it's safe (must be first in insn group!)
  37.893 +    mov out0=r13        //vcpu
  37.894 +    mov out1=r4         //cause
  37.895 +    mov out2=r5         //opcode
  37.896 +    ssm psr.ic
  37.897 +    ;;
  37.898 +    srlz.i                  // guarantee that interruption collection is on
  37.899 +    ;;
  37.900 +    ssm psr.i               // restore psr.i
  37.901 +    adds r3=16,r2                // set up second base pointer
  37.902 +    ;;
  37.903 +    VMX_SAVE_REST
  37.904 +    movl r14=ia64_leave_hypervisor
  37.905 +    ;;
  37.906 +    mov rp=r14
  37.907 +    br.call.sptk.many b6=vmx_emulate
  37.908 +END(vmx_dispatch_virtualization_fault)
  37.909 +
  37.910 +
  37.911 +
  37.912 +ENTRY(vmx_dispatch_tlb_miss)
  37.913 +    VMX_SAVE_MIN_WITH_COVER_R19
  37.914 +    alloc r14=ar.pfs,0,0,3,0
  37.915 +    mov out0=r13
  37.916 +    mov out1=r15
  37.917 +    mov out2=cr.ifa
  37.918 +
  37.919 +    ssm psr.ic
  37.920 +    ;;
  37.921 +    srlz.i                  // guarantee that interruption collection is on
  37.922 +    ;;
  37.923 +    ssm psr.i               // restore psr.i
  37.924 +    adds r3=16,r2                // set up second base pointer
  37.925 +    ;;
  37.926 +    VMX_SAVE_REST
  37.927 +    movl r14=ia64_leave_hypervisor
  37.928 +    ;;
  37.929 +    mov rp=r14
  37.930 +    br.call.sptk.many b6=vmx_hpw_miss
  37.931 +END(vmx_dispatch_tlb_miss)
  37.932 +
  37.933 +
  37.934 +ENTRY(vmx_dispatch_break_fault)
  37.935 +    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
  37.936 +    ;;
  37.937 +    VMX_SAVE_MIN_WITH_COVER_R19
  37.938 +    ;;
  37.939 +    alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
  37.940 +    mov out0=cr.ifa
  37.941 +    adds out1=16,sp
  37.942 +    mov out2=cr.isr     // FIXME: pity to make this slow access twice
  37.943 +    mov out3=cr.iim     // FIXME: pity to make this slow access twice
  37.944 +
  37.945 +    ssm psr.ic
  37.946 +    ;;
  37.947 +    srlz.i                  // guarantee that interruption collection is on
  37.948 +    ;;
  37.949 +    ssm psr.i               // restore psr.i
  37.950 +    adds r3=16,r2                // set up second base pointer
  37.951 +    ;;
  37.952 +    VMX_SAVE_REST
  37.953 +    movl r14=ia64_leave_hypervisor
  37.954 +    ;;
  37.955 +    mov rp=r14
  37.956 +    br.call.sptk.many b6=vmx_ia64_handle_break
  37.957 +END(vmx_dispatch_break_fault)
  37.958 +
  37.959 +
  37.960 +ENTRY(vmx_dispatch_interrupt)
  37.961 +    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
  37.962 +    ;;
  37.963 +	VMX_SAVE_MIN_WITH_COVER_R19	// uses r31; defines r2 and r3
  37.964 +	;;
  37.965 +	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  37.966 +	mov out0=cr.ivr		// pass cr.ivr as first arg
  37.967 +	add out1=16,sp		// pass pointer to pt_regs as second arg
  37.968 +
  37.969 +	ssm psr.ic
  37.970 +	;;
  37.971 +    srlz.i
  37.972 +    ;;
  37.973 +    ssm psr.i
  37.974 +	adds r3=16,r2		// set up second base pointer for SAVE_REST
  37.975 +	;;
  37.976 +	VMX_SAVE_REST
  37.977 +	movl r14=ia64_leave_hypervisor
  37.978 +	;;
  37.979 +	mov rp=r14
  37.980 +	br.call.sptk.many b6=vmx_ia64_handle_irq
  37.981 +END(vmx_dispatch_interrupt)
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/xen/arch/ia64/vmx_minstate.h	Fri May 20 17:23:51 2005 +0000
    38.3 @@ -0,0 +1,329 @@
    38.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    38.5 +/*
    38.6 + * vmx_minstate.h:
    38.7 + * Copyright (c) 2005, Intel Corporation.
    38.8 + *
    38.9 + * This program is free software; you can redistribute it and/or modify it
   38.10 + * under the terms and conditions of the GNU General Public License,
   38.11 + * version 2, as published by the Free Software Foundation.
   38.12 + *
   38.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   38.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   38.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   38.16 + * more details.
   38.17 + *
   38.18 + * You should have received a copy of the GNU General Public License along with
   38.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   38.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   38.21 + *
   38.22 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   38.23 + */
   38.24 +
   38.25 +#include <linux/config.h>
   38.26 +
   38.27 +#include <asm/asmmacro.h>
   38.28 +#include <asm/fpu.h>
   38.29 +#include <asm/mmu_context.h>
   38.30 +#include <asm/offsets.h>
   38.31 +#include <asm/pal.h>
   38.32 +#include <asm/pgtable.h>
   38.33 +#include <asm/processor.h>
   38.34 +#include <asm/ptrace.h>
   38.35 +#include <asm/system.h>
   38.36 +#include <asm/vmx_pal_vsa.h>
   38.37 +#include <asm/vmx_vpd.h>
   38.38 +#include <asm/cache.h>
   38.39 +#include "entry.h"
   38.40 +
   38.41 +#define VMX_MINSTATE_START_SAVE_MIN         \
   38.42 +    mov ar.rsc=0;       /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
   38.43 +    ;;                                          \
   38.44 +    mov.m r28=ar.rnat;                                  \
   38.45 +    addl r22=IA64_RBS_OFFSET,r1;            /* compute base of RBS */       \
   38.46 +    ;;                                          \
   38.47 +    lfetch.fault.excl.nt1 [r22];                                \
   38.48 +    addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base of memory stack */  \
   38.49 +    mov r23=ar.bspstore;                /* save ar.bspstore */          \
   38.50 +    ;;                                          \
   38.51 +    mov ar.bspstore=r22;                /* switch to kernel RBS */      \
   38.52 +    ;;                                          \
   38.53 +    mov r18=ar.bsp;                                     \
   38.54 +    mov ar.rsc=0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */     \
   38.55 +
   38.56 +
   38.57 +
   38.58 +#define VMX_MINSTATE_END_SAVE_MIN           \
   38.59 +    bsw.1;          /* switch back to bank 1 (must be last in insn group) */    \
   38.60 +    ;;
   38.61 +
   38.62 +
   38.63 +#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
   38.64 +    /* begin to call pal vps sync_read and cleanup psr.pl */     \
   38.65 +    add r25=IA64_VPD_BASE_OFFSET, r21;       \
   38.66 +    movl r20=__vsa_base;     \
   38.67 +    ;;          \
   38.68 +    ld8 r25=[r25];      /* read vpd base */     \
   38.69 +    ld8 r20=[r20];      /* read entry point */  \
   38.70 +    ;;      \
   38.71 +    mov r6=r25;     \
   38.72 +    add r20=PAL_VPS_SYNC_READ,r20;  \
   38.73 +    ;;  \
   38.74 +{ .mii;  \
   38.75 +    add r22=VPD(VPSR),r25;   \
   38.76 +    mov r24=ip;        \
   38.77 +    mov b0=r20;     \
   38.78 +    ;;      \
   38.79 +};           \
   38.80 +{ .mmb;      \
   38.81 +    add r24 = 0x20, r24;    \
   38.82 +    mov r16 = cr.ipsr;  /* Temp workaround since psr.ic is off */ \
   38.83 +    br.cond.sptk b0;        /*  call the service */ \
   38.84 +    ;;              \
   38.85 +};           \
   38.86 +    ld8 r7=[r22];   \
   38.87 +    /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */    \
   38.88 +    extr.u r30=r16, IA64_PSR_CPL0_BIT, 2;   \
   38.89 +    ;;      \
   38.90 +    dep r7=r30, r7, IA64_PSR_CPL0_BIT, 2;   \
   38.91 +    ;;      \
   38.92 +    extr.u r30=r16, IA64_PSR_BE_BIT, 5;   \
   38.93 +    ;;      \
   38.94 +    dep r7=r30, r7, IA64_PSR_BE_BIT, 5;   \
   38.95 +    ;;      \
   38.96 +    extr.u r30=r16, IA64_PSR_RI_BIT, 2;   \
   38.97 +    ;;      \
   38.98 +    dep r7=r30, r7, IA64_PSR_RI_BIT, 2;   \
   38.99 +    ;;      \
  38.100 +    st8 [r22]=r7;      \
  38.101 +    ;;
  38.102 +
  38.103 +
  38.104 +
  38.105 +#define IA64_CURRENT_REG    IA64_KR(CURRENT)  /* r21 is reserved for current pointer */
  38.106 +//#define VMX_MINSTATE_GET_CURRENT(reg)   mov reg=IA64_CURRENT_REG
  38.107 +#define VMX_MINSTATE_GET_CURRENT(reg)   mov reg=r21
  38.108 +
  38.109 +/*
  38.110 + * VMX_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  38.111 + * the minimum state necessary that allows us to turn psr.ic back
  38.112 + * on.
  38.113 + *
  38.114 + * Assumed state upon entry:
  38.115 + *  psr.ic: off
  38.116 + *  r31:    contains saved predicates (pr)
  38.117 + *
  38.118 + * Upon exit, the state is as follows:
  38.119 + *  psr.ic: off
  38.120 + *   r2 = points to &pt_regs.r16
  38.121 + *   r8 = contents of ar.ccv
  38.122 + *   r9 = contents of ar.csd
  38.123 + *  r10 = contents of ar.ssd
  38.124 + *  r11 = FPSR_DEFAULT
  38.125 + *  r12 = kernel sp (kernel virtual address)
  38.126 + *  r13 = points to current task_struct (kernel virtual address)
  38.127 + *  p15 = TRUE if psr.i is set in cr.ipsr
  38.128 + *  predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
  38.129 + *      preserved
  38.130 + *
  38.131 + * Note that psr.ic is NOT turned on by this macro.  This is so that
  38.132 + * we can pass interruption state as arguments to a handler.
  38.133 + */
  38.134 +#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
  38.135 +/*  switch rr7 */       \
  38.136 +    movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
  38.137 +    movl r17=(7<<61);        \
  38.138 +    movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
  38.139 +    movl r22=(6<<61);        \
  38.140 +    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);		\
  38.141 +    movl r23=(5<<61);	\
  38.142 +    ;;              \
  38.143 +    mov rr[r17]=r16;             \
  38.144 +    mov rr[r22]=r20;		 \
  38.145 +    mov rr[r23]=r18;		 \
  38.146 +    ;;      \
  38.147 +    srlz.i;      \
  38.148 +    ;;  \
  38.149 +    VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
  38.150 +    mov r27=ar.rsc;         /* M */                         \
  38.151 +    mov r20=r1;         /* A */                         \
  38.152 +    mov r26=ar.unat;        /* M */                         \
  38.153 +    mov r29=cr.ipsr;        /* M */                         \
  38.154 +    COVER;              /* B;; (or nothing) */                  \
  38.155 +    ;;                                          \
  38.156 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
  38.157 +(p6) br.sptk.few vmx_panic;        \
  38.158 +    mov r1=r16;                     \
  38.159 +/*    mov r21=r16;	*/		\
  38.160 +    /* switch from user to kernel RBS: */                           \
  38.161 +    ;;                                          \
  38.162 +    invala;             /* M */                         \
  38.163 +    SAVE_IFS;                                       \
  38.164 +    ;;                                          \
  38.165 +    VMX_MINSTATE_START_SAVE_MIN                                 \
  38.166 +    adds r17=2*L1_CACHE_BYTES,r1;       /* really: biggest cache-line size */       \
  38.167 +    adds r16=PT(CR_IPSR),r1;                                \
  38.168 +    ;;                                          \
  38.169 +    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;                     \
  38.170 +    st8 [r16]=r29;      /* save cr.ipsr */                      \
  38.171 +    ;;                                          \
  38.172 +    lfetch.fault.excl.nt1 [r17];                                \
  38.173 +    tbit.nz p15,p0=r29,IA64_PSR_I_BIT;                          \
  38.174 +    mov r29=b0                                      \
  38.175 +    ;;                                          \
  38.176 +    adds r16=PT(R8),r1; /* initialize first base pointer */             \
  38.177 +    adds r17=PT(R9),r1; /* initialize second base pointer */                \
  38.178 +    ;;                                          \
  38.179 +.mem.offset 0,0; st8.spill [r16]=r8,16;                             \
  38.180 +.mem.offset 8,0; st8.spill [r17]=r9,16;                             \
  38.181 +        ;;                                          \
  38.182 +.mem.offset 0,0; st8.spill [r16]=r10,24;                            \
  38.183 +.mem.offset 8,0; st8.spill [r17]=r11,24;                            \
  38.184 +        ;;                                          \
  38.185 +    mov r8=ar.pfs;         /* I */                         \
  38.186 +    mov r9=cr.iip;         /* M */                         \
  38.187 +    mov r10=ar.fpsr;        /* M */                         \
  38.188 +        ;;                      \
  38.189 +    st8 [r16]=r9,16;    /* save cr.iip */                       \
  38.190 +    st8 [r17]=r30,16;   /* save cr.ifs */                       \
  38.191 +    sub r18=r18,r22;    /* r18=RSE.ndirty*8 */                      \
  38.192 +    ;;          \
  38.193 +    st8 [r16]=r26,16;   /* save ar.unat */                      \
  38.194 +    st8 [r17]=r8,16;    /* save ar.pfs */                       \
  38.195 +    shl r18=r18,16;     /* compute ar.rsc to be used for "loadrs" */            \
  38.196 +    ;;                                          \
  38.197 +    st8 [r16]=r27,16;   /* save ar.rsc */                       \
  38.198 +    st8 [r17]=r28,16;   /* save ar.rnat */                      \
  38.199 +    ;;          /* avoid RAW on r16 & r17 */                    \
  38.200 +    st8 [r16]=r23,16;   /* save ar.bspstore */                      \
  38.201 +    st8 [r17]=r31,16;   /* save predicates */                       \
  38.202 +    ;;                                          \
  38.203 +    st8 [r16]=r29,16;   /* save b0 */                           \
  38.204 +    st8 [r17]=r18,16;   /* save ar.rsc value for "loadrs" */                \
  38.205 +    ;;                                          \
  38.206 +.mem.offset 0,0; st8.spill [r16]=r20,16;    /* save original r1 */              \
  38.207 +.mem.offset 8,0; st8.spill [r17]=r12,16;                            \
  38.208 +    adds r12=-16,r1;    /* switch to kernel memory stack (with 16 bytes of scratch) */  \
  38.209 +    ;;                                          \
  38.210 +.mem.offset 0,0; st8.spill [r16]=r13,16;                            \
  38.211 +.mem.offset 8,0; st8.spill [r17]=r10,16;    /* save ar.fpsr */              \
  38.212 +    mov r13=r21;   /* establish `current' */               \
  38.213 +    ;;                                          \
  38.214 +.mem.offset 0,0; st8.spill [r16]=r15,16;                            \
  38.215 +.mem.offset 8,0; st8.spill [r17]=r14,16;                            \
  38.216 +    ;;                                          \
  38.217 +.mem.offset 0,0; st8.spill [r16]=r2,16;                             \
  38.218 +.mem.offset 8,0; st8.spill [r17]=r3,16;                             \
  38.219 +    adds r2=PT(F6),r1;                         \
  38.220 +    ;;                                          \
  38.221 + .mem.offset 0,0; st8.spill [r16]=r4,16;                             \
  38.222 + .mem.offset 8,0; st8.spill [r17]=r5,16;                             \
  38.223 +    ;;          \
  38.224 + .mem.offset 0,0; st8.spill [r16]=r6,16;     \
  38.225 + .mem.offset 8,0; st8.spill [r17]=r7,16;     \
  38.226 +    mov r20=ar.ccv;      \
  38.227 +    ;;  \
  38.228 +  mov r18=cr.iipa;  \
  38.229 +  mov r4=cr.isr;   \
  38.230 +  mov r22=ar.unat;    \
  38.231 +    ;;  \
  38.232 +  st8 [r16]=r18,16;      \
  38.233 +  st8 [r17]=r4;      \
  38.234 +    ;;      \
  38.235 +    adds r16=PT(EML_UNAT),r1;   \
  38.236 +    adds r17=PT(AR_CCV),r1;                 \
  38.237 +    ;;                      \
  38.238 +    st8 [r16]=r22,8;     \
  38.239 +    st8 [r17]=r20;       \
  38.240 +    mov r4=r24;         \
  38.241 +    mov r5=r25;         \
  38.242 +     ;;  \
  38.243 +    st8 [r16]=r0;  \
  38.244 +    EXTRA;                                          \
  38.245 +    mov r9=ar.csd;                                      \
  38.246 +    mov r10=ar.ssd;                                     \
  38.247 +    movl r11=FPSR_DEFAULT;   /* L-unit */                           \
  38.248 +    movl r1=__gp;       /* establish kernel global pointer */               \
  38.249 +    ;;                                          \
  38.250 +    PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
  38.251 +    VMX_MINSTATE_END_SAVE_MIN
  38.252 +
  38.253 +/*
  38.254 + * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
  38.255 + *
  38.256 + * Assumed state upon entry:
  38.257 + *  psr.ic: on
  38.258 + *  r2: points to &pt_regs.f6
  38.259 + *  r3: points to &pt_regs.f7
  38.260 + *  r4,r5,scrach
  38.261 + *  r6: points to vpd
  38.262 + *  r7: vpsr
  38.263 + *  r9: contents of ar.csd
  38.264 + *  r10:    contents of ar.ssd
  38.265 + *  r11:    FPSR_DEFAULT
  38.266 + *
  38.267 + * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  38.268 + */
  38.269 +#define VMX_SAVE_REST               \
  38.270 +    tbit.z pBN0,pBN1=r7,IA64_PSR_BN_BIT;  /* guest bank0 or bank1 ? */      \
  38.271 +    ;;      \
  38.272 +(pBN0) add r4=VPD(VBGR),r6;     \
  38.273 +(pBN0) add r5=VPD(VBGR)+0x8,r6;     \
  38.274 +(pBN0) add r7=VPD(VBNAT),r6;     \
  38.275 +    ;;      \
  38.276 +(pBN1) add r5=VPD(VGR)+0x8,r6;      \
  38.277 +(pBN1) add r4=VPD(VGR),r6;      \
  38.278 +(pBN1) add r7=VPD(VNAT),r6;      \
  38.279 +    ;;      \
  38.280 +.mem.offset 0,0; st8.spill [r4]=r16,16;     \
  38.281 +.mem.offset 8,0; st8.spill [r5]=r17,16;     \
  38.282 +    ;;                  \
  38.283 +.mem.offset 0,0; st8.spill [r4]=r18,16;     \
  38.284 +.mem.offset 8,0; st8.spill [r5]=r19,16;     \
  38.285 +    ;;                  \
  38.286 +.mem.offset 0,0; st8.spill [r4]=r20,16;     \
  38.287 +.mem.offset 8,0; st8.spill [r5]=r21,16;     \
  38.288 +    mov r18=b6;         \
  38.289 +    ;;                  \
  38.290 +.mem.offset 0,0; st8.spill [r4]=r22,16;     \
  38.291 +.mem.offset 8,0; st8.spill [r5]=r23,16;     \
  38.292 +    mov r19=b7;     \
  38.293 +    ;;                  \
  38.294 +.mem.offset 0,0; st8.spill [r4]=r24,16;     \
  38.295 +.mem.offset 8,0; st8.spill [r5]=r25,16;     \
  38.296 +    ;;                  \
  38.297 +.mem.offset 0,0; st8.spill [r4]=r26,16;     \
  38.298 +.mem.offset 8,0; st8.spill [r5]=r27,16;     \
  38.299 +    ;;                  \
  38.300 +.mem.offset 0,0; st8.spill [r4]=r28,16;     \
  38.301 +.mem.offset 8,0; st8.spill [r5]=r29,16;     \
  38.302 +    ;;                  \
  38.303 +.mem.offset 0,0; st8.spill [r4]=r30,16;     \
  38.304 +.mem.offset 8,0; st8.spill [r5]=r31,16;     \
  38.305 +    ;;                  \
  38.306 +    mov r30=ar.unat;    \
  38.307 +    ;;      \
  38.308 +    st8 [r7]=r30;       \
  38.309 +    mov ar.fpsr=r11;    /* M-unit */    \
  38.310 +    ;;                  \
  38.311 +    stf.spill [r2]=f6,32;           \
  38.312 +    stf.spill [r3]=f7,32;           \
  38.313 +    ;;                  \
  38.314 +    stf.spill [r2]=f8,32;           \
  38.315 +    stf.spill [r3]=f9,32;           \
  38.316 +    ;;                  \
  38.317 +    stf.spill [r2]=f10;         \
  38.318 +    stf.spill [r3]=f11;         \
  38.319 +    ;;                  \
  38.320 +    adds r2=PT(B6)-PT(F10),r2;      \
  38.321 +    adds r3=PT(B7)-PT(F11),r3;      \
  38.322 +    ;;          \
  38.323 +    st8 [r2]=r18,16;       /* b6 */    \
  38.324 +    st8 [r3]=r19,16;       /* b7 */    \
  38.325 +    ;;                  \
  38.326 +    st8 [r2]=r9;           /* ar.csd */    \
  38.327 +    st8 [r3]=r10;          /* ar.ssd */    \
  38.328 +    ;;
  38.329 +
  38.330 +#define VMX_SAVE_MIN_WITH_COVER   VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
  38.331 +#define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
  38.332 +#define VMX_SAVE_MIN      VMX_DO_SAVE_MIN(     , mov r30=r0, )
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/xen/arch/ia64/vmx_phy_mode.c	Fri May 20 17:23:51 2005 +0000
    39.3 @@ -0,0 +1,393 @@
    39.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    39.5 +/*
    39.6 + * vmx_phy_mode.c: emulating domain physical mode.
    39.7 + * Copyright (c) 2005, Intel Corporation.
    39.8 + *
    39.9 + * This program is free software; you can redistribute it and/or modify it
   39.10 + * under the terms and conditions of the GNU General Public License,
   39.11 + * version 2, as published by the Free Software Foundation.
   39.12 + *
   39.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   39.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   39.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   39.16 + * more details.
   39.17 + *
   39.18 + * You should have received a copy of the GNU General Public License along with
   39.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   39.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   39.21 + *
   39.22 + * Arun Sharma (arun.sharma@intel.com)
   39.23 + * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
   39.24 + * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
   39.25 + */
   39.26 +
   39.27 +
   39.28 +#include <asm/processor.h>
   39.29 +#include <asm/gcc_intrin.h>
   39.30 +#include <asm/vmx_phy_mode.h>
   39.31 +#include <xen/sched.h>
   39.32 +#include <asm/pgtable.h>
   39.33 +
   39.34 +
   39.35 +int valid_mm_mode[8] = {
   39.36 +    GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
   39.37 +    INV_MODE,
   39.38 +    INV_MODE,
   39.39 +    GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
   39.40 +    INV_MODE,
   39.41 +    GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
   39.42 +    INV_MODE,
   39.43 +    GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
   39.44 +};
   39.45 +
   39.46 +/*
   39.47 + * Special notes:
   39.48 + * - Index by it/dt/rt sequence
   39.49 + * - Only existing mode transitions are allowed in this table
   39.50 + * - RSE is placed at lazy mode when emulating guest partial mode
   39.51 + * - If gva happens to be rr0 and rr4, only allowed case is identity
   39.52 + *   mapping (gva=gpa), or panic! (How?)
   39.53 + */
   39.54 +int mm_switch_table[8][8] = {
   39.55 +    /*  2004/09/12(Kevin): Allow switch to self */
   39.56 +        /*
   39.57 +         *  (it,dt,rt): (0,0,0) -> (1,1,1)
   39.58 +         *  This kind of transition usually occurs in the very early
   39.59 +     *  stage of Linux boot up procedure. Another case is in efi
   39.60 +     *  and pal calls. (see "arch/ia64/kernel/head.S")
   39.61 +     *
   39.62 +     *  (it,dt,rt): (0,0,0) -> (0,1,1)
   39.63 +     *  This kind of transition is found when OSYa exits efi boot
   39.64 +     *  service. Due to gva = gpa in this case (Same region),
   39.65 +     *  data access can be satisfied though itlb entry for physical
   39.66 +     *  emulation is hit.
   39.67 +         */
   39.68 +    SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V,
   39.69 +    0,  0,  0,  0,  0,  0,  0,  0,
   39.70 +    0,  0,  0,  0,  0,  0,  0,  0,
   39.71 +    /*
   39.72 +     *  (it,dt,rt): (0,1,1) -> (1,1,1)
   39.73 +     *  This kind of transition is found in OSYa.
   39.74 +     *
   39.75 +     *  (it,dt,rt): (0,1,1) -> (0,0,0)
   39.76 +     *  This kind of transition is found in OSYa
   39.77 +     */
   39.78 +    SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V,
   39.79 +    /* (1,0,0)->(1,1,1) */
   39.80 +    0,  0,  0,  0,  0,  0,  0,  SW_P2V,
   39.81 +    /*
   39.82 +         *  (it,dt,rt): (1,0,1) -> (1,1,1)
   39.83 +         *  This kind of transition usually occurs when Linux returns
   39.84 +     *  from the low level TLB miss handlers.
   39.85 +         *  (see "arch/ia64/kernel/ivt.S")
   39.86 +         */
   39.87 +    0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V,
   39.88 +    0,  0,  0,  0,  0,  0,  0,  0,
   39.89 +    /*
   39.90 +         *  (it,dt,rt): (1,1,1) -> (1,0,1)
   39.91 +         *  This kind of transition usually occurs in Linux low level
   39.92 +     *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
   39.93 +     *
   39.94 +     *  (it,dt,rt): (1,1,1) -> (0,0,0)
   39.95 +     *  This kind of transition usually occurs in pal and efi calls,
   39.96 +     *  which requires running in physical mode.
   39.97 +     *  (see "arch/ia64/kernel/head.S")
   39.98 +     *  (1,1,1)->(1,0,0)
   39.99 +     */
  39.100 +
  39.101 +    SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF,
  39.102 +};
  39.103 +
  39.104 +void
  39.105 +physical_mode_init(VCPU *vcpu)
  39.106 +{
  39.107 +    UINT64 psr;
  39.108 +    struct domain * d = vcpu->domain;
  39.109 +
  39.110 +    vcpu->domain->arch.emul_phy_rr0.rid = XEN_RR7_RID+((d->id)<<3);
  39.111 +    /* FIXME */
  39.112 +#if 0
  39.113 +    vcpu->domain->arch.emul_phy_rr0.ps = 28;  /* set page size to 256M */
  39.114 +#endif
  39.115 +	vcpu->domain->arch.emul_phy_rr0.ps = EMUL_PHY_PAGE_SHIFT;  /* set page size to 4k */
  39.116 +    vcpu->domain->arch.emul_phy_rr0.ve = 1; /* enable VHPT walker on this region */
  39.117 +
  39.118 +    vcpu->domain->arch.emul_phy_rr4.rid = XEN_RR7_RID + ((d->id)<<3) + 4;
  39.119 +    vcpu->domain->arch.emul_phy_rr4.ps = EMUL_PHY_PAGE_SHIFT;  /* set page size to 4k */
  39.120 +    vcpu->domain->arch.emul_phy_rr4.ve = 1; /* enable VHPT walker on this region */
  39.121 +
  39.122 +    vcpu->arch.old_rsc = 0;
  39.123 +    vcpu->arch.mode_flags = GUEST_IN_PHY;
  39.124 +
  39.125 +    psr = ia64_clear_ic();
  39.126 +
  39.127 +    ia64_set_rr((VRN0<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr0.rrval);
  39.128 +    ia64_srlz_d();
  39.129 +    ia64_set_rr((VRN4<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr4.rrval);
  39.130 +    ia64_srlz_d();
  39.131 +#if 0
  39.132 +    /* FIXME: temp workaround to support guest physical mode */
  39.133 +ia64_itr(0x1, IA64_TEMP_PHYSICAL, dom0_start,
  39.134 +	 pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
  39.135 +	 28);
  39.136 +ia64_itr(0x2, IA64_TEMP_PHYSICAL, dom0_start,
  39.137 +	 pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
  39.138 +	 28);
  39.139 +ia64_srlz_i();
  39.140 +#endif
  39.141 +    ia64_set_psr(psr);
  39.142 +    ia64_srlz_i();
  39.143 +    return;
  39.144 +}
  39.145 +
  39.146 +extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
  39.147 +void
  39.148 +physical_itlb_miss(VCPU *vcpu, u64 vadr)
  39.149 +{
  39.150 +    u64 psr;
  39.151 +    IA64_PSR vpsr;
  39.152 +    u64 mppn,gppn;
  39.153 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  39.154 +    gppn=(vadr<<1)>>13;
  39.155 +    mppn = get_mfn(DOMID_SELF,gppn,1);
  39.156 +    mppn=(mppn<<12)|(vpsr.cpl<<7)|PHY_PAGE_WB;
  39.157 +
  39.158 +    psr=ia64_clear_ic();
  39.159 +    ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
  39.160 +    ia64_set_psr(psr);
  39.161 +    ia64_srlz_i();
  39.162 +    return;
  39.163 +}
  39.164 +
  39.165 +void
  39.166 +physical_dtlb_miss(VCPU *vcpu, u64 vadr)
  39.167 +{
  39.168 +    u64 psr;
  39.169 +    IA64_PSR vpsr;
  39.170 +    u64 mppn,gppn;
  39.171 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  39.172 +    gppn=(vadr<<1)>>13;
  39.173 +    mppn = get_mfn(DOMID_SELF,gppn,1);
  39.174 +    mppn=(mppn<<12)|(vpsr.cpl<<7);
  39.175 +    if(vadr>>63)
  39.176 +        mppn |= PHY_PAGE_UC;
  39.177 +    else
  39.178 +        mppn |= PHY_PAGE_WB;
  39.179 +
  39.180 +    psr=ia64_clear_ic();
  39.181 +    ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
  39.182 +    ia64_set_psr(psr);
  39.183 +    ia64_srlz_i();
  39.184 +    return;
  39.185 +}
  39.186 +
  39.187 +void
  39.188 +vmx_init_all_rr(VCPU *vcpu)
  39.189 +{
  39.190 +	VMX(vcpu,vrr[VRN0]) = 0x38;
  39.191 +	VMX(vcpu,vrr[VRN1]) = 0x38;
  39.192 +	VMX(vcpu,vrr[VRN2]) = 0x38;
  39.193 +	VMX(vcpu,vrr[VRN3]) = 0x38;
  39.194 +	VMX(vcpu,vrr[VRN4]) = 0x38;
  39.195 +	VMX(vcpu,vrr[VRN5]) = 0x38;
  39.196 +	VMX(vcpu,vrr[VRN6]) = 0x60;
  39.197 +	VMX(vcpu,vrr[VRN7]) = 0x60;
  39.198 +
  39.199 +	VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
  39.200 +	VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
  39.201 +	VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60);
  39.202 +}
  39.203 +
  39.204 +void
  39.205 +vmx_load_all_rr(VCPU *vcpu)
  39.206 +{
  39.207 +	unsigned long psr;
  39.208 +
  39.209 +	psr = ia64_clear_ic();
  39.210 +
  39.211 +	/* WARNING: not allow co-exist of both virtual mode and physical
  39.212 +	 * mode in same region
  39.213 +	 */
  39.214 +	if (is_physical_mode(vcpu)) {
  39.215 +		ia64_set_rr((VRN0 << VRN_SHIFT),
  39.216 +			     vcpu->domain->arch.emul_phy_rr0.rrval);
  39.217 +		ia64_set_rr((VRN4 << VRN_SHIFT),
  39.218 +			     vcpu->domain->arch.emul_phy_rr4.rrval);
  39.219 +	} else {
  39.220 +		ia64_set_rr((VRN0 << VRN_SHIFT),
  39.221 +			     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
  39.222 +		ia64_set_rr((VRN4 << VRN_SHIFT),
  39.223 +			     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
  39.224 +	}
  39.225 +
  39.226 +#if 1
  39.227 +	/* rr567 will be postponed to last point when resuming back to guest */
  39.228 +	ia64_set_rr((VRN1 << VRN_SHIFT),
  39.229 +		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
  39.230 +	ia64_set_rr((VRN2 << VRN_SHIFT),
  39.231 +		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
  39.232 +	ia64_set_rr((VRN3 << VRN_SHIFT),
  39.233 +		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
  39.234 +#endif
  39.235 +	ia64_srlz_d();
  39.236 +	ia64_set_psr(psr);
  39.237 +    ia64_srlz_i();
  39.238 +}
  39.239 +
  39.240 +void
  39.241 +switch_to_physical_rid(VCPU *vcpu)
  39.242 +{
  39.243 +    UINT64 psr;
  39.244 +
  39.245 +    /* Save original virtual mode rr[0] and rr[4] */
  39.246 +
  39.247 +    psr=ia64_clear_ic();
  39.248 +    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr0.rrval);
  39.249 +    ia64_srlz_d();
  39.250 +    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr4.rrval);
  39.251 +    ia64_srlz_d();
  39.252 +
  39.253 +    ia64_set_psr(psr);
  39.254 +    ia64_srlz_i();
  39.255 +    return;
  39.256 +}
  39.257 +
  39.258 +
  39.259 +void
  39.260 +switch_to_virtual_rid(VCPU *vcpu)
  39.261 +{
  39.262 +    UINT64 psr;
  39.263 +    ia64_rr mrr;
  39.264 +
  39.265 +    psr=ia64_clear_ic();
  39.266 +
  39.267 +    mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
  39.268 +    mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
  39.269 +//VRID_2_MRID(vcpu,mrr.rid);
  39.270 +    mrr.ve = 1;
  39.271 +    ia64_set_rr(VRN0<<VRN_SHIFT, mrr.rrval );
  39.272 +    ia64_srlz_d();
  39.273 +    mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
  39.274 +    mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
  39.275 +    mrr.ve = 1;
  39.276 +    ia64_set_rr(VRN4<<VRN_SHIFT, mrr.rrval );
  39.277 +    ia64_srlz_d();
  39.278 +    ia64_set_psr(psr);
  39.279 +    ia64_srlz_i();
  39.280 +    return;
  39.281 +}
  39.282 +
  39.283 +static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
  39.284 +{
  39.285 +    return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  39.286 +}
  39.287 +
  39.288 +void
  39.289 +switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
  39.290 +{
  39.291 +    int act;
  39.292 +    REGS * regs=vcpu_regs(vcpu);
  39.293 +    act = mm_switch_action(old_psr, new_psr);
  39.294 +    switch (act) {
  39.295 +    case SW_V2P:
  39.296 +        vcpu->arch.old_rsc = regs->ar_rsc;
  39.297 +        switch_to_physical_rid(vcpu);
  39.298 +        /*
  39.299 +         * Set rse to enforced lazy, to prevent active rse save/restor when
  39.300 +         * guest physical mode.
  39.301 +         */
  39.302 +        regs->ar_rsc &= ~(IA64_RSC_MODE);
  39.303 +        vcpu->arch.mode_flags |= GUEST_IN_PHY;
  39.304 +        break;
  39.305 +    case SW_P2V:
  39.306 +        switch_to_virtual_rid(vcpu);
  39.307 +        /*
  39.308 +         * recover old mode which is saved when entering
  39.309 +         * guest physical mode
  39.310 +         */
  39.311 +        regs->ar_rsc = vcpu->arch.old_rsc;
  39.312 +        vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  39.313 +        break;
  39.314 +    case SW_SELF:
  39.315 +        printf("Switch to self-0x%lx!!! MM mode doesn't change...\n",
  39.316 +            old_psr.val);
  39.317 +        break;
  39.318 +    case SW_NOP:
  39.319 +        printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
  39.320 +            old_psr.val, new_psr.val);
  39.321 +        break;
  39.322 +    default:
  39.323 +        /* Sanity check */
  39.324 +    printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
  39.325 +        panic("Unexpected virtual <--> physical mode transition");
  39.326 +        break;
  39.327 +    }
  39.328 +    return;
  39.329 +}
  39.330 +
  39.331 +
  39.332 +
  39.333 +/*
  39.334 + * In physical mode, insert tc/tr for region 0 and 4 uses
  39.335 + * RID[0] and RID[4] which is for physical mode emulation.
  39.336 + * However what those inserted tc/tr wants is rid for
  39.337 + * virtual mode. So original virtual rid needs to be restored
  39.338 + * before insert.
  39.339 + *
  39.340 + * Operations which required such switch include:
  39.341 + *  - insertions (itc.*, itr.*)
  39.342 + *  - purges (ptc.* and ptr.*)
  39.343 + *  - tpa
  39.344 + *  - tak
  39.345 + *  - thash?, ttag?
  39.346 + * All above needs actual virtual rid for destination entry.
  39.347 + */
  39.348 +
  39.349 +void
  39.350 +check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR new_psr)
  39.351 +{
  39.352 +
  39.353 +    if ( (old_psr.dt != new_psr.dt ) ||
  39.354 +         (old_psr.it != new_psr.it ) ||
  39.355 +         (old_psr.rt != new_psr.rt )
  39.356 +         ) {
  39.357 +        switch_mm_mode (vcpu, old_psr, new_psr);
  39.358 +    }
  39.359 +
  39.360 +    return 0;
  39.361 +}
  39.362 +
  39.363 +
  39.364 +/*
  39.365 + * In physical mode, insert tc/tr for region 0 and 4 uses
  39.366 + * RID[0] and RID[4] which is for physical mode emulation.
  39.367 + * However what those inserted tc/tr wants is rid for
  39.368 + * virtual mode. So original virtual rid needs to be restored
  39.369 + * before insert.
  39.370 + *
  39.371 + * Operations which required such switch include:
  39.372 + *  - insertions (itc.*, itr.*)
  39.373 + *  - purges (ptc.* and ptr.*)
  39.374 + *  - tpa
  39.375 + *  - tak
  39.376 + *  - thash?, ttag?
  39.377 + * All above needs actual virtual rid for destination entry.
  39.378 + */
  39.379 +
  39.380 +void
  39.381 +prepare_if_physical_mode(VCPU *vcpu)
  39.382 +{
  39.383 +    if (is_physical_mode(vcpu))
  39.384 +        switch_to_virtual_rid(vcpu);
  39.385 +    return;
  39.386 +}
  39.387 +
  39.388 +/* Recover always follows prepare */
  39.389 +void
  39.390 +recover_if_physical_mode(VCPU *vcpu)
  39.391 +{
  39.392 +    if (is_physical_mode(vcpu))
  39.393 +        switch_to_physical_rid(vcpu);
  39.394 +    return;
  39.395 +}
  39.396 +
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/xen/arch/ia64/vmx_process.c	Fri May 20 17:23:51 2005 +0000
    40.3 @@ -0,0 +1,345 @@
    40.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    40.5 +/*
    40.6 + * vmx_process.c: handling VMX architecture-related VM exits
    40.7 + * Copyright (c) 2005, Intel Corporation.
    40.8 + *
    40.9 + * This program is free software; you can redistribute it and/or modify it
   40.10 + * under the terms and conditions of the GNU General Public License,
   40.11 + * version 2, as published by the Free Software Foundation.
   40.12 + *
   40.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   40.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   40.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   40.16 + * more details.
   40.17 + *
   40.18 + * You should have received a copy of the GNU General Public License along with
   40.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   40.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   40.21 + *
   40.22 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
   40.23 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   40.24 + */
   40.25 +
   40.26 +#include <xen/config.h>
   40.27 +#include <xen/lib.h>
   40.28 +#include <xen/errno.h>
   40.29 +#include <xen/sched.h>
   40.30 +#include <xen/smp.h>
   40.31 +#include <asm/ptrace.h>
   40.32 +#include <xen/delay.h>
   40.33 +
   40.34 +#include <linux/efi.h>  /* FOR EFI_UNIMPLEMENTED */
   40.35 +#include <asm/sal.h>    /* FOR struct ia64_sal_retval */
   40.36 +
   40.37 +#include <asm/system.h>
   40.38 +#include <asm/io.h>
   40.39 +#include <asm/processor.h>
   40.40 +#include <asm/desc.h>
   40.41 +//#include <asm/ldt.h>
   40.42 +#include <xen/irq.h>
   40.43 +#include <xen/event.h>
   40.44 +#include <asm/regionreg.h>
   40.45 +#include <asm/privop.h>
   40.46 +#include <asm/ia64_int.h>
   40.47 +#include <asm/hpsim_ssc.h>
   40.48 +#include <asm/dom_fw.h>
   40.49 +#include <asm/vmx_vcpu.h>
   40.50 +#include <asm/kregs.h>
   40.51 +#include <asm/vmx_mm_def.h>
   40.52 +/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
   40.53 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
   40.54 +
   40.55 +
   40.56 +extern struct ia64_sal_retval pal_emulator_static(UINT64);
   40.57 +extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
   40.58 +extern void rnat_consumption (VCPU *vcpu);
   40.59 +
   40.60 +IA64FAULT
   40.61 +vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   40.62 +{
   40.63 +	static int first_time = 1;
   40.64 +	struct domain *d = (struct domain *) current->domain;
   40.65 +	struct exec_domain *ed = (struct domain *) current;
   40.66 +	extern unsigned long running_on_sim;
   40.67 +	unsigned long i, sal_param[8];
   40.68 +
   40.69 +#if 0
   40.70 +	if (first_time) {
   40.71 +		if (platform_is_hp_ski()) running_on_sim = 1;
   40.72 +		else running_on_sim = 0;
   40.73 +		first_time = 0;
   40.74 +	}
   40.75 +	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   40.76 +		if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
   40.77 +		else do_ssc(vcpu_get_gr(current,36), regs);
   40.78 +	}
   40.79 +#endif
   40.80 +	if (iim == d->breakimm) {
   40.81 +		struct ia64_sal_retval x;
   40.82 +		switch (regs->r2) {
   40.83 +		    case FW_HYPERCALL_PAL_CALL:
   40.84 +			//printf("*** PAL hypercall: index=%d\n",regs->r28);
   40.85 +			//FIXME: This should call a C routine
   40.86 +			x = pal_emulator_static(VMX_VPD(ed, vgr[12]));
   40.87 +			regs->r8 = x.status; regs->r9 = x.v0;
   40.88 +			regs->r10 = x.v1; regs->r11 = x.v2;
   40.89 +#if 0
   40.90 +			if (regs->r8)
   40.91 +				printk("Failed vpal emulation, with index:0x%lx\n",
   40.92 +					VMX_VPD(ed, vgr[12]));
   40.93 +#endif
   40.94 +			break;
   40.95 +		    case FW_HYPERCALL_SAL_CALL:
   40.96 +			for (i = 0; i < 8; i++)
   40.97 +				vmx_vcpu_get_gr(ed, 32+i, &sal_param[i]);
   40.98 +			x = sal_emulator(sal_param[0], sal_param[1],
   40.99 +					 sal_param[2], sal_param[3],
  40.100 +					 sal_param[4], sal_param[5],
  40.101 +					 sal_param[6], sal_param[7]);
  40.102 +			regs->r8 = x.status; regs->r9 = x.v0;
  40.103 +			regs->r10 = x.v1; regs->r11 = x.v2;
  40.104 +#if 0
  40.105 +			if (regs->r8)
  40.106 +				printk("Failed vsal emulation, with index:0x%lx\n",
  40.107 +					sal_param[0]);
  40.108 +#endif
  40.109 +			break;
  40.110 +		    case FW_HYPERCALL_EFI_RESET_SYSTEM:
  40.111 +			printf("efi.reset_system called ");
  40.112 +			if (current->domain == dom0) {
  40.113 +				printf("(by dom0)\n ");
  40.114 +				(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
  40.115 +			}
  40.116 +			printf("(not supported for non-0 domain)\n");
  40.117 +			regs->r8 = EFI_UNSUPPORTED;
  40.118 +			break;
  40.119 +		    case FW_HYPERCALL_EFI_GET_TIME:
  40.120 +			{
  40.121 +			unsigned long *tv, *tc;
  40.122 +			fooefi();
  40.123 +			vmx_vcpu_get_gr(ed, 32, &tv);
  40.124 +			vmx_vcpu_get_gr(ed, 33, &tc);
  40.125 +			printf("efi_get_time(%p,%p) called...",tv,tc);
  40.126 +			tv = __va(translate_domain_mpaddr(tv));
  40.127 +			if (tc) tc = __va(translate_domain_mpaddr(tc));
  40.128 +			regs->r8 = (*efi.get_time)(tv,tc);
  40.129 +			printf("and returns %lx\n",regs->r8);
  40.130 +			}
  40.131 +			break;
  40.132 +		    case FW_HYPERCALL_EFI_SET_TIME:
  40.133 +		    case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
  40.134 +		    case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
  40.135 +			// FIXME: need fixes in efi.h from 2.6.9
  40.136 +		    case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
  40.137 +			// FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
  40.138 +			// SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
  40.139 +			// POINTER ARGUMENTS WILL BE VIRTUAL!!
  40.140 +		    case FW_HYPERCALL_EFI_GET_VARIABLE:
  40.141 +			// FIXME: need fixes in efi.h from 2.6.9
  40.142 +		    case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
  40.143 +		    case FW_HYPERCALL_EFI_SET_VARIABLE:
  40.144 +		    case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
  40.145 +			// FIXME: need fixes in efi.h from 2.6.9
  40.146 +			regs->r8 = EFI_UNSUPPORTED;
  40.147 +			break;
  40.148 +		}
  40.149 +#if 0
  40.150 +		if (regs->r8)
  40.151 +			printk("Failed vgfw emulation, with index:0x%lx\n",
  40.152 +				regs->r2);
  40.153 +#endif
  40.154 +		vmx_vcpu_increment_iip(current);
  40.155 +	} else
  40.156 +		vmx_reflect_interruption(ifa,isr,iim,11);
  40.157 +}
  40.158 +
  40.159 +static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
  40.160 +    0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
  40.161 +    0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
  40.162 +    0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
  40.163 +    0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
  40.164 +    0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
  40.165 +    0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
  40.166 +    0x7f00,
  40.167 +};
  40.168 +
  40.169 +
  40.170 +
  40.171 +void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
  40.172 +     UINT64 vector)
  40.173 +{
  40.174 +    VCPU *vcpu = current;
  40.175 +    REGS *regs=vcpu_regs(vcpu);
  40.176 +    UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
  40.177 +    if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
  40.178 +        panic("Guest nested fault!");
  40.179 +    }
  40.180 +    VPD_CR(vcpu,isr)=isr;
  40.181 +    VPD_CR(vcpu,iipa) = regs->cr_iip;
  40.182 +    vector=vec2off[vector];
  40.183 +    if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
  40.184 +        VPD_CR(vcpu,iim) = iim;
  40.185 +    else {
  40.186 +        set_ifa_itir_iha(vcpu,ifa,1,1,1);
  40.187 +    }
  40.188 +    inject_guest_interruption(vcpu, vector);
  40.189 +}
  40.190 +
  40.191 +// ONLY gets called from ia64_leave_kernel
  40.192 +// ONLY call with interrupts disabled?? (else might miss one?)
  40.193 +// NEVER successful if already reflecting a trap/fault because psr.i==0
  40.194 +void vmx_deliver_pending_interrupt(struct pt_regs *regs)
  40.195 +{
  40.196 +	struct domain *d = current->domain;
  40.197 +	struct exec_domain *ed = current;
  40.198 +	// FIXME: Will this work properly if doing an RFI???
  40.199 +	if (!is_idle_task(d) ) {	// always comes from guest
  40.200 +		//vcpu_poke_timer(ed);
  40.201 +		//if (vcpu_deliverable_interrupts(ed)) {
  40.202 +		//	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
  40.203 +		//	foodpi();
  40.204 +		//	reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
  40.205 +		//}
  40.206 +	        extern void vmx_dorfirfi(void);
  40.207 +		struct pt_regs *user_regs = vcpu_regs(current);
  40.208 +
  40.209 +		if (user_regs != regs)
  40.210 +			printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
  40.211 +		if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
  40.212 +			return;
  40.213 +		vmx_check_pending_irq(ed);
  40.214 +	}
  40.215 +}
  40.216 +
  40.217 +extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
  40.218 +
  40.219 +/* We came here because the H/W VHPT walker failed to find an entry */
  40.220 +void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
  40.221 +{
  40.222 +    IA64_PSR vpsr;
  40.223 +    CACHE_LINE_TYPE type;
  40.224 +    u64 vhpt_adr;
  40.225 +    ISR misr;
  40.226 +    ia64_rr vrr;
  40.227 +    REGS *regs;
  40.228 +    thash_cb_t *vtlb, *vhpt;
  40.229 +    thash_data_t *data, me;
  40.230 +    vtlb=vmx_vcpu_get_vtlb(vcpu);
  40.231 +#ifdef  VTLB_DEBUG
  40.232 +    check_vtlb_sanity(vtlb);
  40.233 +    dump_vtlb(vtlb);
  40.234 +#endif
  40.235 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  40.236 +    regs = vcpu_regs(vcpu);
  40.237 +    misr.val=regs->cr_isr;
  40.238 +/*  TODO
  40.239 +    if(vcpu->domain->id && vec == 2 &&
  40.240 +       vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
  40.241 +        emulate_ins(&v);
  40.242 +        return;
  40.243 +    }
  40.244 +*/
  40.245 +
  40.246 +    if((vec==1)&&(!vpsr.it)){
  40.247 +        physical_itlb_miss(vcpu, vadr);
  40.248 +        return;
  40.249 +    }
  40.250 +    if((vec==2)&&(!vpsr.dt)){
  40.251 +        physical_dtlb_miss(vcpu, vadr);
  40.252 +        return;
  40.253 +    }
  40.254 +    vrr = vmx_vcpu_rr(vcpu,vadr);
  40.255 +    if(vec == 1) type = ISIDE_TLB;
  40.256 +    else if(vec == 2) type = DSIDE_TLB;
  40.257 +    else panic("wrong vec\n");
  40.258 +
  40.259 +//    prepare_if_physical_mode(vcpu);
  40.260 +
  40.261 +    if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
  40.262 +    	if ( data->ps != vrr.ps ) {
  40.263 +    		machine_tlb_insert(vcpu, data);
  40.264 +    	}
  40.265 +    	else {
  40.266 +	        thash_insert(vtlb->ts->vhpt,data,vadr);
  40.267 +	    }
  40.268 +    }else if(type == DSIDE_TLB){
  40.269 +        if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
  40.270 +            if(vpsr.ic){
  40.271 +                vmx_vcpu_set_isr(vcpu, misr.val);
  40.272 +                alt_dtlb(vcpu, vadr);
  40.273 +                return IA64_FAULT;
  40.274 +            } else{
  40.275 +                if(misr.sp){
  40.276 +                    //TODO  lds emulation
  40.277 +                    panic("Don't support speculation load");
  40.278 +                }else{
  40.279 +                    nested_dtlb(vcpu);
  40.280 +                    return IA64_FAULT;
  40.281 +                }
  40.282 +            }
  40.283 +        } else{
  40.284 +            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  40.285 +            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  40.286 +            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  40.287 +            if(data){
  40.288 +                if(vpsr.ic){
  40.289 +                    vmx_vcpu_set_isr(vcpu, misr.val);
  40.290 +                    dtlb_fault(vcpu, vadr);
  40.291 +                    return IA64_FAULT;
  40.292 +                }else{
  40.293 +                    if(misr.sp){
  40.294 +                        //TODO  lds emulation
  40.295 +                        panic("Don't support speculation load");
  40.296 +                    }else{
  40.297 +                        nested_dtlb(vcpu);
  40.298 +                        return IA64_FAULT;
  40.299 +                    }
  40.300 +                }
  40.301 +            }else{
  40.302 +                if(vpsr.ic){
  40.303 +                    vmx_vcpu_set_isr(vcpu, misr.val);
  40.304 +                    dvhpt_fault(vcpu, vadr);
  40.305 +                    return IA64_FAULT;
  40.306 +                }else{
  40.307 +                    if(misr.sp){
  40.308 +                        //TODO  lds emulation
  40.309 +                        panic("Don't support speculation load");
  40.310 +                    }else{
  40.311 +                        nested_dtlb(vcpu);
  40.312 +                        return IA64_FAULT;
  40.313 +                    }
  40.314 +                }
  40.315 +            }
  40.316 +        }
  40.317 +    }else if(type == ISIDE_TLB){
  40.318 +        if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
  40.319 +            if(!vpsr.ic){
  40.320 +                misr.ni=1;
  40.321 +            }
  40.322 +            vmx_vcpu_set_isr(vcpu, misr.val);
  40.323 +            alt_itlb(vcpu, vadr);
  40.324 +            return IA64_FAULT;
  40.325 +        } else{
  40.326 +            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  40.327 +            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  40.328 +            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  40.329 +            if(data){
  40.330 +                if(!vpsr.ic){
  40.331 +                    misr.ni=1;
  40.332 +                }
  40.333 +                vmx_vcpu_set_isr(vcpu, misr.val);
  40.334 +                itlb_fault(vcpu, vadr);
  40.335 +                return IA64_FAULT;
  40.336 +            }else{
  40.337 +                if(!vpsr.ic){
  40.338 +                    misr.ni=1;
  40.339 +                }
  40.340 +                vmx_vcpu_set_isr(vcpu, misr.val);
  40.341 +                ivhpt_fault(vcpu, vadr);
  40.342 +                return IA64_FAULT;
  40.343 +            }
  40.344 +        }
  40.345 +    }
  40.346 +}
  40.347 +
  40.348 +
    41.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    41.2 +++ b/xen/arch/ia64/vmx_utility.c	Fri May 20 17:23:51 2005 +0000
    41.3 @@ -0,0 +1,659 @@
    41.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    41.5 +/*
    41.6 + * vmx_utility.c:
    41.7 + * Copyright (c) 2005, Intel Corporation.
    41.8 + *
    41.9 + * This program is free software; you can redistribute it and/or modify it
   41.10 + * under the terms and conditions of the GNU General Public License,
   41.11 + * version 2, as published by the Free Software Foundation.
   41.12 + *
   41.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   41.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   41.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   41.16 + * more details.
   41.17 + *
   41.18 + * You should have received a copy of the GNU General Public License along with
   41.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   41.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   41.21 + *
   41.22 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   41.23 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
   41.24 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   41.25 + */
   41.26 +
   41.27 +#include <xen/types.h>
   41.28 +#include <asm/vmx_vcpu.h>
   41.29 +#include <asm/processor.h>
   41.30 +#include <asm/vmx_mm_def.h>
   41.31 +
   41.32 +
   41.33 +/*
   41.34 + * Return:
   41.35 + *  0:  Not reserved indirect registers
   41.36 + *  1:  Is reserved indirect registers
   41.37 + */
   41.38 +int
   41.39 +is_reserved_indirect_register (
   41.40 +    int type,
   41.41 +    int index )
   41.42 +{
   41.43 +    switch (type) {
   41.44 +        case IA64_CPUID:
   41.45 +            if ( index >= 5 ) {
   41.46 +                return 1;
   41.47 +            }
   41.48 +
   41.49 +        case IA64_DBR:
   41.50 +        case IA64_IBR:
   41.51 +            //bugbugbug:check with pal about the max ibr/dbr!!!!
   41.52 +            break;
   41.53 +
   41.54 +        case IA64_PMC:
   41.55 +            //bugbugbug:check with pal about the max ibr/dbr!!!!
   41.56 +            break;
   41.57 +
   41.58 +        case IA64_PMD:
   41.59 +            //bugbugbug:check with pal about the max ibr/dbr!!!!
   41.60 +            break;
   41.61 +
   41.62 +        case IA64_PKR:
   41.63 +            //bugbugbug:check with pal about the max pkr!!!!
   41.64 +            break;
   41.65 +
   41.66 +        case IA64_RR:
   41.67 +            //bugbugbug:check with pal about the max rr!!!!
   41.68 +            break;
   41.69 +
   41.70 +        default:
   41.71 +            panic ("Unsupported instruction!");
   41.72 +    }
   41.73 +
   41.74 +    return 0;
   41.75 +
   41.76 +}
   41.77 +
   41.78 +/*
   41.79 + * Return:
   41.80 + *  Set all ignored fields in value to 0 and return
   41.81 + */
   41.82 +u64
   41.83 +indirect_reg_igfld_MASK (
   41.84 +    int type,
   41.85 +    int index,
   41.86 +    u64 value
   41.87 +    )
   41.88 +{
   41.89 +    u64 nvalue;
   41.90 +
   41.91 +    nvalue = value;
   41.92 +    switch ( type ) {
   41.93 +        case IA64_CPUID:
   41.94 +            if ( index == 2 ) {
   41.95 +                nvalue = 0;
   41.96 +            }
   41.97 +            break;
   41.98 +
   41.99 +        case IA64_DBR:
  41.100 +        case IA64_IBR:
  41.101 +            /* Refer to SDM Vol2 Table 7-1,7-2 */
  41.102 +            if ( index % 2 != 0) {
  41.103 +                /* Ignore field: {61:60} */
  41.104 +                nvalue = value & (~MASK (60, 2));
  41.105 +            }
  41.106 +            break;
  41.107 +        case IA64_PMC:
  41.108 +            if ( index == 0 ) {
  41.109 +                /* Ignore field: 3:1 */
  41.110 +                nvalue = value & (~MASK (1, 3));
  41.111 +            }
  41.112 +            break;
  41.113 +        case IA64_PMD:
  41.114 +            if ( index >= 4 ) {
  41.115 +                /* Ignore field: 7:7 */
  41.116 +                /* bugbug: this code is correct for generic
  41.117 +                 * PMD. However, for implementation specific
  41.118 +                 * PMD, it's WRONG. need more info to judge
  41.119 +                 * what's implementation specific PMD.
  41.120 +                 */
  41.121 +                nvalue = value & (~MASK (7, 1));
  41.122 +            }
  41.123 +            break;
  41.124 +        case IA64_PKR:
  41.125 +        case IA64_RR:
  41.126 +            break;
  41.127 +        default:
  41.128 +            panic ("Unsupported instruction!");
  41.129 +    }
  41.130 +
  41.131 +    return nvalue;
  41.132 +}
  41.133 +
  41.134 +/*
  41.135 + * Return:
  41.136 + *  Set all ignored fields in value to 0 and return
  41.137 + */
  41.138 +u64
  41.139 +cr_igfld_mask (int index, u64 value)
  41.140 +{
  41.141 +    u64 nvalue;
  41.142 +
  41.143 +    nvalue = value;
  41.144 +
  41.145 +    switch ( index ) {
  41.146 +    case IA64_REG_CR_IVA:
  41.147 +        /* Ignore filed: 14:0 */
  41.148 +        nvalue = value & (~MASK (0, 15));
  41.149 +        break;
  41.150 +
  41.151 +    case IA64_REG_CR_IHA:
  41.152 +        /* Ignore filed: 1:0 */
  41.153 +        nvalue = value & (~MASK (0, 2));
  41.154 +        break;
  41.155 +
  41.156 +    case IA64_REG_CR_LID:
  41.157 +        /* Ignore filed: 63:32 */
  41.158 +        nvalue = value & (~MASK (32, 32));
  41.159 +        break;
  41.160 +
  41.161 +    case IA64_REG_CR_TPR:
  41.162 +        /* Ignore filed: 63:17,3:0 */
  41.163 +        nvalue = value & (~MASK (17, 47));
  41.164 +        nvalue = nvalue & (~MASK (0, 4));
  41.165 +        break;
  41.166 +
  41.167 +    case IA64_REG_CR_EOI:
  41.168 +        /* Ignore filed: 63:0 */
  41.169 +        nvalue = 0;
  41.170 +        break;
  41.171 +
  41.172 +    case IA64_REG_CR_ITV:
  41.173 +    case IA64_REG_CR_PMV:
  41.174 +    case IA64_REG_CR_CMCV:
  41.175 +    case IA64_REG_CR_LRR0:
  41.176 +    case IA64_REG_CR_LRR1:
  41.177 +        /* Ignore filed: 63:17,12:12 */
  41.178 +        nvalue = value & (~MASK (17, 47));
  41.179 +        nvalue = nvalue & (~MASK (12, 1));
  41.180 +        break;
  41.181 +    }
  41.182 +
  41.183 +    return nvalue;
  41.184 +}
  41.185 +
  41.186 +
  41.187 +/*
  41.188 + * Return:
  41.189 + *  1: PSR reserved fields are not zero
  41.190 + *  0:  PSR reserved fields are all zero
  41.191 + */
  41.192 +int
  41.193 +check_psr_rsv_fields (u64 value)
  41.194 +{
  41.195 +    /* PSR reserved fields: 0, 12~6, 16, 31~28, 63~46
  41.196 +     * These reserved fields shall all be zero
  41.197 +     * Otherwise we will panic
  41.198 +     */
  41.199 +
  41.200 +    if ( value & MASK (0, 1) ||
  41.201 +         value & MASK (6, 7) ||
  41.202 +         value & MASK (16, 1) ||
  41.203 +         value & MASK (28, 4) ||
  41.204 +         value & MASK (46, 18)
  41.205 +         ) {
  41.206 +             return 1;
  41.207 +         }
  41.208 +
  41.209 +    return 0;
  41.210 +}
  41.211 +
  41.212 +
  41.213 +
  41.214 +/*
  41.215 + * Return:
  41.216 + *  1: CR reserved fields are not zero
  41.217 + *  0:  CR reserved fields are all zero
  41.218 + */
  41.219 +int
  41.220 +check_cr_rsv_fields (int index, u64 value)
  41.221 +{
  41.222 +    switch (index) {
  41.223 +        case IA64_REG_CR_DCR:
  41.224 +            if ( (value & MASK ( 3, 5 )) ||
  41.225 +                (value & MASK (15, 49))) {
  41.226 +                    return 1;
  41.227 +            }
  41.228 +            return 0;
  41.229 +
  41.230 +        case IA64_REG_CR_ITM:
  41.231 +        case IA64_REG_CR_IVA:
  41.232 +        case IA64_REG_CR_IIP:
  41.233 +        case IA64_REG_CR_IFA:
  41.234 +        case IA64_REG_CR_IIPA:
  41.235 +        case IA64_REG_CR_IIM:
  41.236 +        case IA64_REG_CR_IHA:
  41.237 +        case IA64_REG_CR_EOI:
  41.238 +            return 0;
  41.239 +
  41.240 +        case IA64_REG_CR_PTA:
  41.241 +            if ( (value & MASK ( 1, 1 )) ||
  41.242 +                (value & MASK (9, 6))) {
  41.243 +                    return 1;
  41.244 +            }
  41.245 +            return 0;
  41.246 +
  41.247 +        case IA64_REG_CR_IPSR:
  41.248 +            return check_psr_rsv_fields (value);
  41.249 +
  41.250 +
  41.251 +        case IA64_REG_CR_ISR:
  41.252 +            if ( (value & MASK ( 24, 8 )) ||
  41.253 +                (value & MASK (44, 20))) {
  41.254 +                    return 1;
  41.255 +            }
  41.256 +            return 0;
  41.257 +
  41.258 +        case IA64_REG_CR_ITIR:
  41.259 +            if ( (value & MASK ( 0, 2 )) ||
  41.260 +                (value & MASK (32, 32))) {
  41.261 +                    return 1;
  41.262 +            }
  41.263 +            return 0;
  41.264 +
  41.265 +        case IA64_REG_CR_IFS:
  41.266 +            if ( (value & MASK ( 38, 25 ))) {
  41.267 +                return 1;
  41.268 +            }
  41.269 +            return 0;
  41.270 +
  41.271 +        case IA64_REG_CR_LID:
  41.272 +            if ( (value & MASK ( 0, 16 ))) {
  41.273 +                return 1;
  41.274 +            }
  41.275 +            return 0;
  41.276 +
  41.277 +        case IA64_REG_CR_IVR:
  41.278 +            if ( (value & MASK ( 8, 56 ))) {
  41.279 +                return 1;
  41.280 +            }
  41.281 +            return 0;
  41.282 +
  41.283 +        case IA64_REG_CR_TPR:
  41.284 +            if ( (value & MASK ( 8, 8 ))) {
  41.285 +                return 1;
  41.286 +            }
  41.287 +            return 0;
  41.288 +
  41.289 +        case IA64_REG_CR_IRR0:
  41.290 +            if ( (value & MASK ( 1, 1 )) ||
  41.291 +                (value & MASK (3, 13))) {
  41.292 +                    return 1;
  41.293 +            }
  41.294 +            return 0;
  41.295 +
  41.296 +        case IA64_REG_CR_ITV:
  41.297 +        case IA64_REG_CR_PMV:
  41.298 +        case IA64_REG_CR_CMCV:
  41.299 +            if ( (value & MASK ( 8, 4 )) ||
  41.300 +                (value & MASK (13, 3))) {
  41.301 +                    return 1;
  41.302 +            }
  41.303 +            return 0;
  41.304 +
  41.305 +        case IA64_REG_CR_LRR0:
  41.306 +        case IA64_REG_CR_LRR1:
  41.307 +            if ( (value & MASK ( 11, 1 )) ||
  41.308 +                (value & MASK (14, 1))) {
  41.309 +                    return 1;
  41.310 +            }
  41.311 +            return 0;
  41.312 +    }
  41.313 +
  41.314 +
  41.315 +    panic ("Unsupported CR");
  41.316 +}
  41.317 +
  41.318 +
  41.319 +
  41.320 +/*
  41.321 + * Return:
  41.322 + *  0:  Indirect Reg reserved fields are not zero
  41.323 + *  1:  Indirect Reg reserved fields are all zero
  41.324 + */
  41.325 +int
  41.326 +check_indirect_reg_rsv_fields ( int type, int index, u64 value )
  41.327 +{
  41.328 +
  41.329 +    switch ( type ) {
  41.330 +        case IA64_CPUID:
  41.331 +            if ( index == 3 ) {
  41.332 +                if ( value & MASK (40, 24 )) {
  41.333 +                    return 0;
  41.334 +                }
  41.335 +            } else if ( index == 4 ) {
  41.336 +                if ( value & MASK (2, 62 )) {
  41.337 +                    return 0;
  41.338 +                }
  41.339 +            }
  41.340 +            break;
  41.341 +
  41.342 +        case IA64_DBR:
  41.343 +        case IA64_IBR:
  41.344 +        case IA64_PMC:
  41.345 +        case IA64_PMD:
  41.346 +            break;
  41.347 +
  41.348 +        case IA64_PKR:
  41.349 +            if ( value & MASK (4, 4) ||
  41.350 +                value & MASK (32, 32 )) {
  41.351 +                return 0;
  41.352 +                }
  41.353 +            break;
  41.354 +
  41.355 +        case IA64_RR:
  41.356 +            if ( value & MASK (1, 1) ||
  41.357 +                value & MASK (32, 32 )) {
  41.358 +                return 0;
  41.359 +                }
  41.360 +            break;
  41.361 +
  41.362 +        default:
  41.363 +            panic ("Unsupported instruction!");
  41.364 +    }
  41.365 +
  41.366 +    return 1;
  41.367 +}
  41.368 +
  41.369 +
  41.370 +
  41.371 +
  41.372 +/* Return
  41.373 + * Same format as isr_t
  41.374 + * Only ei/ni bits are valid, all other bits are zero
  41.375 + */
  41.376 +u64
  41.377 +set_isr_ei_ni (VCPU *vcpu)
  41.378 +{
  41.379 +
  41.380 +    IA64_PSR vpsr,ipsr;
  41.381 +    ISR visr;
  41.382 +    REGS *regs;
  41.383 +
  41.384 +    regs=vcpu_regs(vcpu);
  41.385 +
  41.386 +    visr.val = 0;
  41.387 +
  41.388 +    vpsr.val = vmx_vcpu_get_psr (vcpu);
  41.389 +
  41.390 +    if (!vpsr.ic == 1 ) {
  41.391 +        /* Set ISR.ni */
  41.392 +        visr.ni = 1;
  41.393 +    }
  41.394 +    ipsr.val = regs->cr_ipsr;
  41.395 +
  41.396 +    visr.ei = ipsr.ri;
  41.397 +    return visr.val;
  41.398 +}
  41.399 +
  41.400 +
  41.401 +/* Set up ISR.na/code{3:0}/r/w for no-access instructions
  41.402 + * Refer to SDM Vol Table 5-1
  41.403 + * Parameter:
  41.404 + *  setr: if 1, indicates this function will set up ISR.r
  41.405 + *  setw: if 1, indicates this function will set up ISR.w
  41.406 + * Return:
  41.407 + *  Same format as ISR. All fields are zero, except na/code{3:0}/r/w
  41.408 + */
  41.409 +u64
  41.410 +set_isr_for_na_inst(VCPU *vcpu, int op)
  41.411 +{
  41.412 +    ISR visr;
  41.413 +    visr.val = 0;
  41.414 +    switch (op) {
  41.415 +        case IA64_INST_TPA:
  41.416 +            visr.na = 1;
  41.417 +            visr.code = 0;
  41.418 +            break;
  41.419 +        case IA64_INST_TAK:
  41.420 +            visr.na = 1;
  41.421 +            visr.code = 3;
  41.422 +            break;
  41.423 +    }
  41.424 +    return visr.val;
  41.425 +}
  41.426 +
  41.427 +
  41.428 +
  41.429 +/*
  41.430 + * Set up ISR for registe Nat consumption fault
  41.431 + * Parameters:
  41.432 + *  read: if 1, indicates this is a read access;
  41.433 + *  write: if 1, indicates this is a write access;
  41.434 + */
  41.435 +void
  41.436 +set_rnat_consumption_isr (VCPU *vcpu,int inst,int read,int write)
  41.437 +{
  41.438 +    ISR visr;
  41.439 +    u64 value;
  41.440 +    /* Need set up ISR: code, ei, ni, na, r/w */
  41.441 +    visr.val = 0;
  41.442 +
  41.443 +    /* ISR.code{7:4} =1,
  41.444 +     * Set up ISR.code{3:0}, ISR.na
  41.445 +     */
  41.446 +    visr.code = (1 << 4);
  41.447 +    if (inst) {
  41.448 +
  41.449 +        value = set_isr_for_na_inst (vcpu,inst);
  41.450 +        visr.val = visr.val | value;
  41.451 +    }
  41.452 +
  41.453 +    /* Set up ISR.r/w */
  41.454 +    visr.r = read;
  41.455 +    visr.w = write;
  41.456 +
  41.457 +    /* Set up ei/ni */
  41.458 +    value = set_isr_ei_ni (vcpu);
  41.459 +    visr.val = visr.val | value;
  41.460 +
  41.461 +    vmx_vcpu_set_isr (vcpu,visr.val);
  41.462 +}
  41.463 +
  41.464 +
  41.465 +
  41.466 +/*
  41.467 + * Set up ISR for break fault
  41.468 + */
  41.469 +void set_break_isr (VCPU *vcpu)
  41.470 +{
  41.471 +    ISR visr;
  41.472 +    u64 value;
  41.473 +
  41.474 +    /* Need set up ISR: ei, ni */
  41.475 +
  41.476 +    visr.val = 0;
  41.477 +
  41.478 +    /* Set up ei/ni */
  41.479 +    value = set_isr_ei_ni (vcpu);
  41.480 +    visr.val = visr.val | value;
  41.481 +
  41.482 +    vmx_vcpu_set_isr(vcpu, visr.val);
  41.483 +}
  41.484 +
  41.485 +
  41.486 +
  41.487 +
  41.488 +
  41.489 +
  41.490 +/*
  41.491 + * Set up ISR for Priviledged Operation fault
  41.492 + */
  41.493 +void set_privileged_operation_isr (VCPU *vcpu,int inst)
  41.494 +{
  41.495 +    ISR visr;
  41.496 +    u64 value;
  41.497 +
  41.498 +    /* Need set up ISR: code, ei, ni, na */
  41.499 +
  41.500 +    visr.val = 0;
  41.501 +
  41.502 +    /* Set up na, code{3:0} for no-access instruction */
  41.503 +    value = set_isr_for_na_inst (vcpu, inst);
  41.504 +    visr.val = visr.val | value;
  41.505 +
  41.506 +
  41.507 +    /* ISR.code{7:4} =1 */
  41.508 +    visr.code = (1 << 4) | visr.code;
  41.509 +
  41.510 +    /* Set up ei/ni */
  41.511 +    value = set_isr_ei_ni (vcpu);
  41.512 +    visr.val = visr.val | value;
  41.513 +
  41.514 +    vmx_vcpu_set_isr (vcpu, visr.val);
  41.515 +}
  41.516 +
  41.517 +
  41.518 +
  41.519 +
  41.520 +/*
  41.521 + * Set up ISR for Priviledged Register fault
  41.522 + */
  41.523 +void set_privileged_reg_isr (VCPU *vcpu, int inst)
  41.524 +{
  41.525 +    ISR visr;
  41.526 +    u64 value;
  41.527 +
  41.528 +    /* Need set up ISR: code, ei, ni */
  41.529 +
  41.530 +    visr.val = 0;
  41.531 +
  41.532 +    /* ISR.code{7:4} =2 */
  41.533 +    visr.code = 2 << 4;
  41.534 +
  41.535 +    /* Set up ei/ni */
  41.536 +    value = set_isr_ei_ni (vcpu);
  41.537 +    visr.val = visr.val | value;
  41.538 +
  41.539 +    vmx_vcpu_set_isr (vcpu, visr.val);
  41.540 +}
  41.541 +
  41.542 +
  41.543 +
  41.544 +
  41.545 +
  41.546 +/*
  41.547 + * Set up ISR for Reserved Register/Field fault
  41.548 + */
  41.549 +void set_rsv_reg_field_isr (VCPU *vcpu)
  41.550 +{
  41.551 +    ISR visr;
  41.552 +    u64 value;
  41.553 +
  41.554 +    /* Need set up ISR: code, ei, ni */
  41.555 +
  41.556 +    visr.val = 0;
  41.557 +
  41.558 +    /* ISR.code{7:4} =4 */
  41.559 +    visr.code = (3 << 4) | visr.code;
  41.560 +
  41.561 +    /* Set up ei/ni */
  41.562 +    value = set_isr_ei_ni (vcpu);
  41.563 +    visr.val = visr.val | value;
  41.564 +
  41.565 +    vmx_vcpu_set_isr (vcpu, visr.val);
  41.566 +}
  41.567 +
  41.568 +
  41.569 +
  41.570 +/*
  41.571 + * Set up ISR for Illegal Operation fault
  41.572 + */
  41.573 +void set_illegal_op_isr (VCPU *vcpu)
  41.574 +{
  41.575 +    ISR visr;
  41.576 +    u64 value;
  41.577 +
  41.578 +    /* Need set up ISR: ei, ni */
  41.579 +
  41.580 +    visr.val = 0;
  41.581 +
  41.582 +    /* Set up ei/ni */
  41.583 +    value = set_isr_ei_ni (vcpu);
  41.584 +    visr.val = visr.val | value;
  41.585 +
  41.586 +    vmx_vcpu_set_isr (vcpu, visr.val);
  41.587 +}
  41.588 +
  41.589 +
  41.590 +void set_isr_reg_nat_consumption(VCPU *vcpu, u64 flag, u64 non_access)
  41.591 +{
  41.592 +    ISR isr;
  41.593 +
  41.594 +    isr.val = 0;
  41.595 +    isr.val = set_isr_ei_ni(vcpu);
  41.596 +    isr.code = IA64_REG_NAT_CONSUMPTION_FAULT | flag;
  41.597 +    isr.na = non_access;
  41.598 +    isr.r = 1;
  41.599 +    isr.w = 0;
  41.600 +    vmx_vcpu_set_isr(vcpu, isr.val);
  41.601 +    return;
  41.602 +}
  41.603 +
  41.604 +void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
  41.605 +{
  41.606 +    u64 value;
  41.607 +    ISR isr;
  41.608 +
  41.609 +    isr.val = set_isr_ei_ni(vcpu);
  41.610 +    isr.code = IA64_PRIV_OP_FAULT;
  41.611 +    isr.na = non_access;
  41.612 +    vmx_vcpu_set_isr(vcpu, isr.val);
  41.613 +
  41.614 +    return;
  41.615 +}
  41.616 +
  41.617 +
  41.618 +IA64FAULT check_target_register(VCPU *vcpu, u64 reg_index)
  41.619 +{
  41.620 +    u64 sof;
  41.621 +    REGS *regs;
  41.622 +    regs=vcpu_regs(vcpu);
  41.623 +    sof = regs->cr_ifs & 0x7f;
  41.624 +    if(reg_index >= sof + 32)
  41.625 +        return IA64_FAULT;
  41.626 +    return IA64_NO_FAULT;;
  41.627 +}
  41.628 +
  41.629 +
  41.630 +int is_reserved_rr_register(VCPU* vcpu, int reg_index)
  41.631 +{
  41.632 +    return (reg_index >= 8);
  41.633 +}
  41.634 +
  41.635 +#define  ITIR_RSV_MASK		(0x3UL | (((1UL<<32)-1) << 32))
  41.636 +int is_reserved_itir_field(VCPU* vcpu, u64 itir)
  41.637 +{
  41.638 +	if ( itir & ITIR_RSV_MASK ) {
  41.639 +		return 1;
  41.640 +	}
  41.641 +	return 0;
  41.642 +}
  41.643 +
  41.644 +int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
  41.645 +{
  41.646 +    ia64_rr rr;
  41.647 +    rr.rrval = reg_value;
  41.648 +
  41.649 +    if(rr.reserved0 != 0 || rr.reserved1 != 0){
  41.650 +        return 1;
  41.651 +    }
  41.652 +    if(rr.ps < 12 || rr.ps > 28){
  41.653 +        // page too big or small.
  41.654 +        return 1;
  41.655 +    }
  41.656 +    if(rr.ps > 15 && rr.ps % 2 != 0){
  41.657 +        // unsupported page size.
  41.658 +        return 1;
  41.659 +    }
  41.660 +    return 0;
  41.661 +}
  41.662 +
    42.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    42.2 +++ b/xen/arch/ia64/vmx_vcpu.c	Fri May 20 17:23:51 2005 +0000
    42.3 @@ -0,0 +1,436 @@
    42.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    42.5 +/*
    42.6 + * vmx_vcpu.c: handling all virtual cpu related thing.
    42.7 + * Copyright (c) 2005, Intel Corporation.
    42.8 + *
    42.9 + * This program is free software; you can redistribute it and/or modify it
   42.10 + * under the terms and conditions of the GNU General Public License,
   42.11 + * version 2, as published by the Free Software Foundation.
   42.12 + *
   42.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   42.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   42.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   42.16 + * more details.
   42.17 + *
   42.18 + * You should have received a copy of the GNU General Public License along with
   42.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   42.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   42.21 + *
   42.22 + *  Fred yang (fred.yang@intel.com)
   42.23 + *  Arun Sharma (arun.sharma@intel.com)
   42.24 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   42.25 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   42.26 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   42.27 + */
   42.28 +
   42.29 +
   42.30 +
   42.31 +#include <linux/sched.h>
   42.32 +#include <public/arch-ia64.h>
   42.33 +#include <asm/ia64_int.h>
   42.34 +#include <asm/vmx_vcpu.h>
   42.35 +#include <asm/regionreg.h>
   42.36 +#include <asm/tlb.h>
   42.37 +#include <asm/processor.h>
   42.38 +#include <asm/delay.h>
   42.39 +#include <asm/regs.h>
   42.40 +#include <asm/gcc_intrin.h>
   42.41 +#include <asm/vmx_mm_def.h>
   42.42 +#include <asm/vmx.h>
   42.43 +
   42.44 +//u64  fire_itc;
   42.45 +//u64  fire_itc2;
   42.46 +//u64  fire_itm;
   42.47 +//u64  fire_itm2;
   42.48 +/*
   42.49 + * Copyright (c) 2005 Intel Corporation.
   42.50 + *    Anthony Xu (anthony.xu@intel.com)
   42.51 + *    Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
   42.52 + *
   42.53 + * This program is free software; you can redistribute it and/or modify it
   42.54 + * under the terms and conditions of the GNU General Public License,
   42.55 + * version 2, as published by the Free Software Foundation.
   42.56 + *
   42.57 + * This program is distributed in the hope it will be useful, but WITHOUT
   42.58 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   42.59 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   42.60 + * more details.
   42.61 + *
   42.62 + * You should have received a copy of the GNU General Public License along with
   42.63 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   42.64 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   42.65 + *
   42.66 + */
   42.67 +
   42.68 +/**************************************************************************
   42.69 + VCPU general register access routines
   42.70 +**************************************************************************/
   42.71 +#include <asm/hw_irq.h>
   42.72 +#include <asm/vmx_pal_vsa.h>
   42.73 +#include <asm/kregs.h>
   42.74 +
   42.75 +//unsigned long last_guest_rsm = 0x0;
   42.76 +struct guest_psr_bundle{
   42.77 +	unsigned long ip;
   42.78 +	unsigned long psr;
   42.79 +};
   42.80 +
   42.81 +struct guest_psr_bundle guest_psr_buf[100];
   42.82 +unsigned long guest_psr_index = 0;
   42.83 +
   42.84 +void
   42.85 +vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
   42.86 +{
   42.87 +
   42.88 +    UINT64 mask;
   42.89 +    REGS *regs;
   42.90 +    IA64_PSR old_psr, new_psr;
   42.91 +    old_psr.val=vmx_vcpu_get_psr(vcpu);
   42.92 +
   42.93 +    regs=vcpu_regs(vcpu);
   42.94 +    /* We only support guest as:
   42.95 +     *  vpsr.pk = 0
   42.96 +     *  vpsr.is = 0
   42.97 +     * Otherwise panic
   42.98 +     */
   42.99 +    if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
  42.100 +        panic ("Setting unsupport guest psr!");
  42.101 +    }
  42.102 +
  42.103 +    /*
  42.104 +     * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  42.105 +     * Since these bits will become 0, after success execution of each
  42.106 +     * instruction, we will change set them to mIA64_PSR
  42.107 +     */
  42.108 +    VMX_VPD(vcpu,vpsr) = value &
  42.109 +            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
  42.110 +                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
  42.111 +            ));
  42.112 +
  42.113 +    new_psr.val=vmx_vcpu_get_psr(vcpu);
  42.114 +    {
  42.115 +	struct xen_regs *regs = vcpu_regs(vcpu);
  42.116 +	guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
  42.117 +	guest_psr_buf[guest_psr_index].psr = new_psr.val;
  42.118 +	if (++guest_psr_index >= 100)
  42.119 +	    guest_psr_index = 0;
  42.120 +    }
  42.121 +#if 0
  42.122 +    if (old_psr.i != new_psr.i) {
  42.123 +	if (old_psr.i)
  42.124 +		last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
  42.125 +	else
  42.126 +		last_guest_rsm = 0;
  42.127 +    }
  42.128 +#endif
  42.129 +
  42.130 +    /*
  42.131 +     * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  42.132 +     * , except for the following bits:
  42.133 +     *  ic/i/dt/si/rt/mc/it/bn/vm
  42.134 +     */
  42.135 +    mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  42.136 +        IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  42.137 +        IA64_PSR_VM;
  42.138 +
  42.139 +    regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
  42.140 +
  42.141 +    check_mm_mode_switch(vcpu, old_psr, new_psr);
  42.142 +    return IA64_NO_FAULT;
  42.143 +}
  42.144 +
  42.145 +/* Adjust slot both in xen_regs and vpd, upon vpsr.ri which
  42.146 + * should have sync with ipsr in entry.
  42.147 + *
  42.148 + * Clear some bits due to successfully emulation.
  42.149 + */
  42.150 +IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
  42.151 +{
  42.152 +    // TODO: trap_bounce?? Eddie
  42.153 +    REGS *regs = vcpu_regs(vcpu);
  42.154 +    IA64_PSR vpsr;
  42.155 +    IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
  42.156 +
  42.157 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  42.158 +    if (vpsr.ri == 2) {
  42.159 +    vpsr.ri = 0;
  42.160 +    regs->cr_iip += 16;
  42.161 +    } else {
  42.162 +    vpsr.ri++;
  42.163 +    }
  42.164 +
  42.165 +    ipsr->ri = vpsr.ri;
  42.166 +    vpsr.val &=
  42.167 +            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
  42.168 +                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
  42.169 +            ));
  42.170 +
  42.171 +    VMX_VPD(vcpu, vpsr) = vpsr.val;
  42.172 +
  42.173 +    ipsr->val &=
  42.174 +            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
  42.175 +                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
  42.176 +            ));
  42.177 +
  42.178 +    return (IA64_NO_FAULT);
  42.179 +}
  42.180 +
  42.181 +
  42.182 +IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
  42.183 +{
  42.184 +    REGS *regs = vcpu_regs(vcpu);
  42.185 +    IA64_PSR vpsr;
  42.186 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  42.187 +
  42.188 +    if(!vpsr.ic)
  42.189 +        VPD_CR(vcpu,ifs) = regs->cr_ifs;
  42.190 +    regs->cr_ifs = IA64_IFS_V;
  42.191 +    return (IA64_NO_FAULT);
  42.192 +}
  42.193 +
  42.194 +
  42.195 +thash_cb_t *
  42.196 +vmx_vcpu_get_vtlb(VCPU *vcpu)
  42.197 +{
  42.198 +    return vcpu->arch.vtlb;
  42.199 +}
  42.200 +
  42.201 +
  42.202 +struct virutal_platform_def *
  42.203 +vmx_vcpu_get_plat(VCPU *vcpu)
  42.204 +{
  42.205 +    return &(vcpu->arch.arch_vmx.vmx_platform);
  42.206 +}
  42.207 +
  42.208 +
  42.209 +ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
  42.210 +{
  42.211 +        return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
  42.212 +}
  42.213 +
  42.214 +
  42.215 +IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  42.216 +{
  42.217 +    extern void set_one_rr(UINT64, UINT64);
  42.218 +    ia64_rr oldrr,newrr;
  42.219 +    thash_cb_t *hcb;
  42.220 +    oldrr=vmx_vcpu_rr(vcpu,reg);
  42.221 +    newrr.rrval=val;
  42.222 +#if 1
  42.223 +    if(oldrr.ps!=newrr.ps){
  42.224 +        hcb = vmx_vcpu_get_vtlb(vcpu);
  42.225 +        thash_purge_all(hcb);
  42.226 +    }
  42.227 +#endif
  42.228 +    VMX(vcpu,vrr[reg>>61]) = val;
  42.229 +    switch((u64)(reg>>61)) {
  42.230 +    case VRN5:
  42.231 +        VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
  42.232 +        break;
  42.233 +    case VRN6:
  42.234 +        VMX(vcpu,mrr6)=vmx_vrrtomrr(vcpu,val);
  42.235 +        break;
  42.236 +    case VRN7:
  42.237 +        VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
  42.238 +        /* Change double mapping for this domain */
  42.239 +        vmx_change_double_mapping(vcpu,
  42.240 +                      vmx_vrrtomrr(vcpu,oldrr.rrval),
  42.241 +                      vmx_vrrtomrr(vcpu,newrr.rrval));
  42.242 +        break;
  42.243 +    default:
  42.244 +        ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
  42.245 +        break;
  42.246 +    }
  42.247 +
  42.248 +    return (IA64_NO_FAULT);
  42.249 +}
  42.250 +
  42.251 +
  42.252 +
  42.253 +/**************************************************************************
  42.254 + VCPU protection key register access routines
  42.255 +**************************************************************************/
  42.256 +
  42.257 +IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  42.258 +{
  42.259 +    UINT64 val = (UINT64)ia64_get_pkr(reg);
  42.260 +    *pval = val;
  42.261 +    return (IA64_NO_FAULT);
  42.262 +}
  42.263 +
  42.264 +IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
  42.265 +{
  42.266 +    ia64_set_pkr(reg,val);
  42.267 +    return (IA64_NO_FAULT);
  42.268 +}
  42.269 +
  42.270 +#if 0
  42.271 +int tlb_debug=0;
  42.272 +check_entry(u64 va, u64 ps, char *str)
  42.273 +{
  42.274 +     va &= ~ (PSIZE(ps)-1);
  42.275 +     if ( va == 0x2000000002908000UL ||
  42.276 +	  va == 0x600000000000C000UL ) {
  42.277 +	stop();
  42.278 +     }
  42.279 +     if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
  42.280 +}
  42.281 +#endif
  42.282 +
  42.283 +
  42.284 +u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
  42.285 +{
  42.286 +    ia64_rr rr,rr1;
  42.287 +    rr=vmx_vcpu_rr(vcpu,ifa);
  42.288 +    rr1.rrval=0;
  42.289 +    rr1.ps=rr.ps;
  42.290 +    rr1.rid=rr.rid;
  42.291 +    return (rr1.rrval);
  42.292 +}
  42.293 +
  42.294 +
  42.295 +
  42.296 +
  42.297 +IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
  42.298 +{
  42.299 +    // TODO: Only allowed for current vcpu
  42.300 +    UINT64 ifs, psr;
  42.301 +    REGS *regs = vcpu_regs(vcpu);
  42.302 +    psr = VPD_CR(vcpu,ipsr);
  42.303 +    vmx_vcpu_set_psr(vcpu,psr);
  42.304 +    ifs=VPD_CR(vcpu,ifs);
  42.305 +    if((ifs>>63)&&(ifs<<1)){
  42.306 +        ifs=(regs->cr_ifs)&0x7f;
  42.307 +        regs->rfi_pfs = (ifs<<7)|ifs;
  42.308 +        regs->cr_ifs = VPD_CR(vcpu,ifs);
  42.309 +    }
  42.310 +    regs->cr_iip = VPD_CR(vcpu,iip);
  42.311 +    return (IA64_NO_FAULT);
  42.312 +}
  42.313 +
  42.314 +
  42.315 +UINT64
  42.316 +vmx_vcpu_get_psr(VCPU *vcpu)
  42.317 +{
  42.318 +    return VMX_VPD(vcpu,vpsr);
  42.319 +}
  42.320 +
  42.321 +
  42.322 +IA64FAULT
  42.323 +vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
  42.324 +{
  42.325 +    IA64_PSR vpsr;
  42.326 +
  42.327 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  42.328 +    if ( vpsr.bn ) {
  42.329 +        *val=VMX_VPD(vcpu,vgr[reg-16]);
  42.330 +        // Check NAT bit
  42.331 +        if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) {
  42.332 +            // TODO
  42.333 +            //panic ("NAT consumption fault\n");
  42.334 +            return IA64_FAULT;
  42.335 +        }
  42.336 +
  42.337 +    }
  42.338 +    else {
  42.339 +        *val=VMX_VPD(vcpu,vbgr[reg-16]);
  42.340 +        if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) {
  42.341 +            //panic ("NAT consumption fault\n");
  42.342 +            return IA64_FAULT;
  42.343 +        }
  42.344 +
  42.345 +    }
  42.346 +    return IA64_NO_FAULT;
  42.347 +}
  42.348 +
  42.349 +IA64FAULT
  42.350 +vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
  42.351 +{
  42.352 +    IA64_PSR vpsr;
  42.353 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
  42.354 +    if ( vpsr.bn ) {
  42.355 +        VMX_VPD(vcpu,vgr[reg-16]) = val;
  42.356 +        if(nat){
  42.357 +            VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) );
  42.358 +        }else{
  42.359 +            VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
  42.360 +        }
  42.361 +    }
  42.362 +    else {
  42.363 +        VMX_VPD(vcpu,vbgr[reg-16]) = val;
  42.364 +        if(nat){
  42.365 +            VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) );
  42.366 +        }else{
  42.367 +            VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) );
  42.368 +        }
  42.369 +    }
  42.370 +    return IA64_NO_FAULT;
  42.371 +}
  42.372 +
  42.373 +
  42.374 +
  42.375 +IA64FAULT
  42.376 +vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
  42.377 +{
  42.378 +    REGS *regs=vcpu_regs(vcpu);
  42.379 +    u64 nat;
  42.380 +    //TODO, Eddie
  42.381 +    if (!regs) return 0;
  42.382 +    if (reg >= 16 && reg < 32) {
  42.383 +        return vmx_vcpu_get_bgr(vcpu,reg,val);
  42.384 +    }
  42.385 +    getreg(reg,val,&nat,regs);    // FIXME: handle NATs later
  42.386 +    if(nat){
  42.387 +        return IA64_FAULT;
  42.388 +    }
  42.389 +    return IA64_NO_FAULT;
  42.390 +}
  42.391 +
  42.392 +// returns:
  42.393 +//   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
  42.394 +//   IA64_NO_FAULT otherwise
  42.395 +
  42.396 +IA64FAULT
  42.397 +vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
  42.398 +{
  42.399 +    REGS *regs = vcpu_regs(vcpu);
  42.400 +    long sof = (regs->cr_ifs) & 0x7f;
  42.401 +    //TODO Eddie
  42.402 +
  42.403 +    if (!regs) return IA64_ILLOP_FAULT;
  42.404 +    if (reg >= sof + 32) return IA64_ILLOP_FAULT;
  42.405 +    if ( reg >= 16 && reg < 32 ) {
  42.406 +        return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
  42.407 +    }
  42.408 +    setreg(reg,value,nat,regs);
  42.409 +    return IA64_NO_FAULT;
  42.410 +}
  42.411 +
  42.412 +
  42.413 +IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
  42.414 +{
  42.415 +    UINT64 vpsr;
  42.416 +    vpsr = vmx_vcpu_get_psr(vcpu);
  42.417 +    vpsr &= (~imm24);
  42.418 +    vmx_vcpu_set_psr(vcpu, vpsr);
  42.419 +    return IA64_NO_FAULT;
  42.420 +}
  42.421 +
  42.422 +
  42.423 +IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
  42.424 +{
  42.425 +    UINT64 vpsr;
  42.426 +    vpsr = vmx_vcpu_get_psr(vcpu);
  42.427 +    vpsr |= imm24;
  42.428 +    vmx_vcpu_set_psr(vcpu, vpsr);
  42.429 +    return IA64_NO_FAULT;
  42.430 +}
  42.431 +
  42.432 +
  42.433 +IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
  42.434 +{
  42.435 +    vmx_vcpu_set_psr(vcpu, val);
  42.436 +    return IA64_NO_FAULT;
  42.437 +}
  42.438 +
  42.439 +
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/xen/arch/ia64/vmx_virt.c	Fri May 20 17:23:51 2005 +0000
    43.3 @@ -0,0 +1,1501 @@
    43.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    43.5 +/*
    43.6 + * vmx_virt.c:
    43.7 + * Copyright (c) 2005, Intel Corporation.
    43.8 + *
    43.9 + * This program is free software; you can redistribute it and/or modify it
   43.10 + * under the terms and conditions of the GNU General Public License,
   43.11 + * version 2, as published by the Free Software Foundation.
   43.12 + *
   43.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   43.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   43.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   43.16 + * more details.
   43.17 + *
   43.18 + * You should have received a copy of the GNU General Public License along with
   43.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   43.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   43.21 + *
   43.22 + *  Fred yang (fred.yang@intel.com)
   43.23 + *  Shaofan Li (Susue Li) <susie.li@intel.com>
   43.24 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
   43.25 + */
   43.26 +
   43.27 +
   43.28 +
   43.29 +#include <asm/privop.h>
   43.30 +#include <asm/vmx_vcpu.h>
   43.31 +#include <asm/processor.h>
   43.32 +#include <asm/delay.h>	// Debug only
   43.33 +#include <asm/vmmu.h>
   43.34 +#include <asm/vmx_mm_def.h>
   43.35 +#include <asm/smp.h>
   43.36 +
   43.37 +#include <asm/virt_event.h>
   43.38 +extern UINT64 privop_trace;
   43.39 +
   43.40 +void
   43.41 +ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64  * cause)
   43.42 +{
   43.43 +    *cause=0;
   43.44 +    switch (slot_type) {
   43.45 +        case M:
   43.46 +        if (inst.generic.major==0){
   43.47 +            if(inst.M28.x3==0){
   43.48 +                if(inst.M44.x4==6){
   43.49 +                    *cause=EVENT_SSM;
   43.50 +                }else if(inst.M44.x4==7){
   43.51 +                    *cause=EVENT_RSM;
   43.52 +                }else if(inst.M30.x4==8&&inst.M30.x2==2){
   43.53 +                    *cause=EVENT_MOV_TO_AR_IMM;
   43.54 +                }
   43.55 +            }
   43.56 +        }
   43.57 +        else if(inst.generic.major==1){
   43.58 +            if(inst.M28.x3==0){
   43.59 +                if(inst.M32.x6==0x2c){
   43.60 +                    *cause=EVENT_MOV_TO_CR;
   43.61 +                }else if(inst.M33.x6==0x24){
   43.62 +                    *cause=EVENT_MOV_FROM_CR;
   43.63 +                }else if(inst.M35.x6==0x2d){
   43.64 +                    *cause=EVENT_MOV_TO_PSR;
   43.65 +                }else if(inst.M36.x6==0x25){
   43.66 +                    *cause=EVENT_MOV_FROM_PSR;
   43.67 +                }else if(inst.M29.x6==0x2A){
   43.68 +                    *cause=EVENT_MOV_TO_AR;
   43.69 +                }else if(inst.M31.x6==0x22){
   43.70 +                    *cause=EVENT_MOV_FROM_AR;
   43.71 +                }else if(inst.M45.x6==0x09){
   43.72 +                    *cause=EVENT_PTC_L;
   43.73 +                }else if(inst.M45.x6==0x0A){
   43.74 +                    *cause=EVENT_PTC_G;
   43.75 +                }else if(inst.M45.x6==0x0B){
   43.76 +                    *cause=EVENT_PTC_GA;
   43.77 +                }else if(inst.M45.x6==0x0C){
   43.78 +                    *cause=EVENT_PTR_D;
   43.79 +                }else if(inst.M45.x6==0x0D){
   43.80 +                    *cause=EVENT_PTR_I;
   43.81 +                }else if(inst.M46.x6==0x1A){
   43.82 +                    *cause=EVENT_THASH;
   43.83 +                }else if(inst.M46.x6==0x1B){
   43.84 +                    *cause=EVENT_TTAG;
   43.85 +                }else if(inst.M46.x6==0x1E){
   43.86 +                    *cause=EVENT_TPA;
   43.87 +                }else if(inst.M46.x6==0x1F){
   43.88 +                    *cause=EVENT_TAK;
   43.89 +                }else if(inst.M47.x6==0x34){
   43.90 +                    *cause=EVENT_PTC_E;
   43.91 +                }else if(inst.M41.x6==0x2E){
   43.92 +                    *cause=EVENT_ITC_D;
   43.93 +                }else if(inst.M41.x6==0x2F){
   43.94 +                    *cause=EVENT_ITC_I;
   43.95 +                }else if(inst.M42.x6==0x00){
   43.96 +                    *cause=EVENT_MOV_TO_RR;
   43.97 +                }else if(inst.M42.x6==0x01){
   43.98 +                    *cause=EVENT_MOV_TO_DBR;
   43.99 +                }else if(inst.M42.x6==0x02){
  43.100 +                    *cause=EVENT_MOV_TO_IBR;
  43.101 +                }else if(inst.M42.x6==0x03){
  43.102 +                    *cause=EVENT_MOV_TO_PKR;
  43.103 +                }else if(inst.M42.x6==0x04){
  43.104 +                    *cause=EVENT_MOV_TO_PMC;
  43.105 +                }else if(inst.M42.x6==0x05){
  43.106 +                    *cause=EVENT_MOV_TO_PMD;
  43.107 +                }else if(inst.M42.x6==0x0E){
  43.108 +                    *cause=EVENT_ITR_D;
  43.109 +                }else if(inst.M42.x6==0x0F){
  43.110 +                    *cause=EVENT_ITR_I;
  43.111 +                }else if(inst.M43.x6==0x10){
  43.112 +                    *cause=EVENT_MOV_FROM_RR;
  43.113 +                }else if(inst.M43.x6==0x11){
  43.114 +                    *cause=EVENT_MOV_FROM_DBR;
  43.115 +                }else if(inst.M43.x6==0x12){
  43.116 +                    *cause=EVENT_MOV_FROM_IBR;
  43.117 +                }else if(inst.M43.x6==0x13){
  43.118 +                    *cause=EVENT_MOV_FROM_PKR;
  43.119 +                }else if(inst.M43.x6==0x14){
  43.120 +                    *cause=EVENT_MOV_FROM_PMC;
  43.121 +/*
  43.122 +                }else if(inst.M43.x6==0x15){
  43.123 +                    *cause=EVENT_MOV_FROM_PMD;
  43.124 +*/
  43.125 +                }else if(inst.M43.x6==0x17){
  43.126 +                    *cause=EVENT_MOV_FROM_CPUID;
  43.127 +                }
  43.128 +            }
  43.129 +        }
  43.130 +        break;
  43.131 +        case B:
  43.132 +        if(inst.generic.major==0){
  43.133 +            if(inst.B8.x6==0x02){
  43.134 +                *cause=EVENT_COVER;
  43.135 +            }else if(inst.B8.x6==0x08){
  43.136 +                *cause=EVENT_RFI;
  43.137 +            }else if(inst.B8.x6==0x0c){
  43.138 +                *cause=EVENT_BSW_0;
  43.139 +            }else if(inst.B8.x6==0x0d){
  43.140 +                *cause=EVENT_BSW_1;
  43.141 +            }
  43.142 +        }
  43.143 +    }
  43.144 +}
  43.145 +
  43.146 +IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
  43.147 +{
  43.148 +    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
  43.149 +    return vmx_vcpu_reset_psr_sm(vcpu,imm24);
  43.150 +}
  43.151 +
  43.152 +IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
  43.153 +{
  43.154 +    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
  43.155 +    return vmx_vcpu_set_psr_sm(vcpu,imm24);
  43.156 +}
  43.157 +
  43.158 +unsigned long last_guest_psr = 0x0;
  43.159 +IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
  43.160 +{
  43.161 +    UINT64 tgt = inst.M33.r1;
  43.162 +    UINT64 val;
  43.163 +    IA64FAULT fault;
  43.164 +
  43.165 +/*
  43.166 +    if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
  43.167 +        return vmx_vcpu_set_gr(vcpu, tgt, val);
  43.168 +    else return fault;
  43.169 +    */
  43.170 +    val = vmx_vcpu_get_psr(vcpu);
  43.171 +    val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  43.172 +    last_guest_psr = val;
  43.173 +    return vmx_vcpu_set_gr(vcpu, tgt, val, 0);
  43.174 +}
  43.175 +
  43.176 +/**
  43.177 + * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
  43.178 + */
  43.179 +IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
  43.180 +{
  43.181 +    UINT64 val;
  43.182 +    IA64FAULT fault;
  43.183 +    if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
  43.184 +	panic(" get_psr nat bit fault\n");
  43.185 +
  43.186 +	val = (val & MASK(0, 32)) | (VMX_VPD(vcpu, vpsr) & MASK(32, 32));
  43.187 +#if 0
  43.188 +	if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
  43.189 +		while(1);
  43.190 +	else
  43.191 +		last_mov_from_psr = 0;
  43.192 +#endif
  43.193 +        return vmx_vcpu_set_psr_l(vcpu,val);
  43.194 +}
  43.195 +
  43.196 +
  43.197 +/**************************************************************************
  43.198 +Privileged operation emulation routines
  43.199 +**************************************************************************/
  43.200 +
  43.201 +IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
  43.202 +{
  43.203 +    IA64_PSR  vpsr;
  43.204 +    REGS *regs;
  43.205 +#ifdef  CHECK_FAULT
  43.206 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.207 +    if ( vpsr.cpl != 0) {
  43.208 +        /* Inject Privileged Operation fault into guest */
  43.209 +        set_privileged_operation_isr (vcpu, 0);
  43.210 +        privilege_op (vcpu);
  43.211 +        return IA64_FAULT;
  43.212 +    }
  43.213 +#endif // CHECK_FAULT
  43.214 +    regs=vcpu_regs(vcpu);
  43.215 +    vpsr.val=regs->cr_ipsr;
  43.216 +    if ( vpsr.is == 1 ) {
  43.217 +        panic ("We do not support IA32 instruction yet");
  43.218 +    }
  43.219 +
  43.220 +    return vmx_vcpu_rfi(vcpu);
  43.221 +}
  43.222 +
  43.223 +IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
  43.224 +{
  43.225 +#ifdef  CHECK_FAULT
  43.226 +    IA64_PSR  vpsr;
  43.227 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.228 +    if ( vpsr.cpl != 0) {
  43.229 +        /* Inject Privileged Operation fault into guest */
  43.230 +        set_privileged_operation_isr (vcpu, 0);
  43.231 +        privilege_op (vcpu);
  43.232 +        return IA64_FAULT;
  43.233 +    }
  43.234 +#endif // CHECK_FAULT
  43.235 +   return vmx_vcpu_bsw0(vcpu);
  43.236 +}
  43.237 +
  43.238 +IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
  43.239 +{
  43.240 +#ifdef  CHECK_FAULT
  43.241 +    IA64_PSR  vpsr;
  43.242 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.243 +    if ( vpsr.cpl != 0) {
  43.244 +        /* Inject Privileged Operation fault into guest */
  43.245 +        set_privileged_operation_isr (vcpu, 0);
  43.246 +        privilege_op (vcpu);
  43.247 +        return IA64_FAULT;
  43.248 +    }
  43.249 +#endif // CHECK_FAULT
  43.250 +    return vmx_vcpu_bsw1(vcpu);
  43.251 +}
  43.252 +
  43.253 +IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
  43.254 +{
  43.255 +    return vmx_vcpu_cover(vcpu);
  43.256 +}
  43.257 +
  43.258 +IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
  43.259 +{
  43.260 +    u64 r2,r3;
  43.261 +    ISR isr;
  43.262 +    IA64_PSR  vpsr;
  43.263 +
  43.264 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.265 +    if ( vpsr.cpl != 0) {
  43.266 +        /* Inject Privileged Operation fault into guest */
  43.267 +        set_privileged_operation_isr (vcpu, 0);
  43.268 +        privilege_op (vcpu);
  43.269 +        return IA64_FAULT;
  43.270 +    }
  43.271 +    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){
  43.272 +#ifdef  VMAL_NO_FAULT_CHECK
  43.273 +        set_isr_reg_nat_consumption(vcpu,0,0);
  43.274 +        rnat_comsumption(vcpu);
  43.275 +        return IA64_FAULT;
  43.276 +#endif // VMAL_NO_FAULT_CHECK
  43.277 +    }
  43.278 +#ifdef  VMAL_NO_FAULT_CHECK
  43.279 +    if (unimplemented_gva(vcpu,r3) ) {
  43.280 +        isr.val = set_isr_ei_ni(vcpu);
  43.281 +        isr.code = IA64_RESERVED_REG_FAULT;
  43.282 +        vcpu_set_isr(vcpu, isr.val);
  43.283 +        unimpl_daddr(vcpu);
  43.284 +        return IA64_FAULT;
  43.285 +   }
  43.286 +#endif // VMAL_NO_FAULT_CHECK
  43.287 +    return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
  43.288 +}
  43.289 +
  43.290 +IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
  43.291 +{
  43.292 +    u64 r3;
  43.293 +    ISR isr;
  43.294 +    IA64_PSR  vpsr;
  43.295 +
  43.296 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.297 +#ifdef  VMAL_NO_FAULT_CHECK
  43.298 +    if ( vpsr.cpl != 0) {
  43.299 +        /* Inject Privileged Operation fault into guest */
  43.300 +        set_privileged_operation_isr (vcpu, 0);
  43.301 +        privilege_op (vcpu);
  43.302 +        return IA64_FAULT;
  43.303 +    }
  43.304 +#endif // VMAL_NO_FAULT_CHECK
  43.305 +    if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){
  43.306 +#ifdef  VMAL_NO_FAULT_CHECK
  43.307 +        set_isr_reg_nat_consumption(vcpu,0,0);
  43.308 +        rnat_comsumption(vcpu);
  43.309 +        return IA64_FAULT;
  43.310 +#endif // VMAL_NO_FAULT_CHECK
  43.311 +    }
  43.312 +    return vmx_vcpu_ptc_e(vcpu,r3);
  43.313 +}
  43.314 +
  43.315 +IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
  43.316 +{
  43.317 +    return vmx_emul_ptc_l(vcpu, inst);
  43.318 +}
  43.319 +
  43.320 +IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
  43.321 +{
  43.322 +    return vmx_emul_ptc_l(vcpu, inst);
  43.323 +}
  43.324 +
  43.325 +IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
  43.326 +{
  43.327 +    ISR isr;
  43.328 +    IA64FAULT	ret1, ret2;
  43.329 +
  43.330 +#ifdef  VMAL_NO_FAULT_CHECK
  43.331 +    IA64_PSR  vpsr;
  43.332 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.333 +    if ( vpsr.cpl != 0) {
  43.334 +        /* Inject Privileged Operation fault into guest */
  43.335 +        set_privileged_operation_isr (vcpu, 0);
  43.336 +        privilege_op (vcpu);
  43.337 +        return IA64_FAULT;
  43.338 +    }
  43.339 +#endif // VMAL_NO_FAULT_CHECK
  43.340 +    ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3);
  43.341 +    ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2);
  43.342 +#ifdef  VMAL_NO_FAULT_CHECK
  43.343 +    if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
  43.344 +        set_isr_reg_nat_consumption(vcpu,0,0);
  43.345 +        rnat_comsumption(vcpu);
  43.346 +        return IA64_FAULT;
  43.347 +    }
  43.348 +    if (unimplemented_gva(vcpu,r3) ) {
  43.349 +        isr.val = set_isr_ei_ni(vcpu);
  43.350 +        isr.code = IA64_RESERVED_REG_FAULT;
  43.351 +        vcpu_set_isr(vcpu, isr.val);
  43.352 +        unimpl_daddr(vcpu);
  43.353 +        return IA64_FAULT;
  43.354 +   }
  43.355 +#endif // VMAL_NO_FAULT_CHECK
  43.356 +   return IA64_NO_FAULT;
  43.357 +}
  43.358 +
  43.359 +IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
  43.360 +{
  43.361 +    u64 r2,r3;
  43.362 +    if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
  43.363 +    	return IA64_FAULT;
  43.364 +    return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
  43.365 +}
  43.366 +
  43.367 +IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
  43.368 +{
  43.369 +    u64 r2,r3;
  43.370 +    if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
  43.371 +    	return IA64_FAULT;
  43.372 +    return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
  43.373 +}
  43.374 +
  43.375 +
  43.376 +IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
  43.377 +{
  43.378 +    u64 r1,r3;
  43.379 +    ISR visr;
  43.380 +    IA64_PSR vpsr;
  43.381 +#ifdef  CHECK_FAULT
  43.382 +    if(check_target_register(vcpu, inst.M46.r1)){
  43.383 +        set_illegal_op_isr(vcpu);
  43.384 +        illegal_op(vcpu);
  43.385 +        return IA64_FAULT;
  43.386 +    }
  43.387 +#endif //CHECK_FAULT
  43.388 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  43.389 +#ifdef  CHECK_FAULT
  43.390 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  43.391 +        return IA64_NO_FAULT;
  43.392 +#endif  //CHECK_FAULT
  43.393 +    }
  43.394 +#ifdef  CHECK_FAULT
  43.395 +    if(unimplemented_gva(vcpu, r3)){
  43.396 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  43.397 +        return IA64_NO_FAULT;
  43.398 +    }
  43.399 +#endif  //CHECK_FAULT
  43.400 +    vmx_vcpu_thash(vcpu, r3, &r1);
  43.401 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  43.402 +    return(IA64_NO_FAULT);
  43.403 +}
  43.404 +
  43.405 +
  43.406 +IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
  43.407 +{
  43.408 +    u64 r1,r3;
  43.409 +    ISR visr;
  43.410 +    IA64_PSR vpsr;
  43.411 + #ifdef  CHECK_FAULT
  43.412 +    if(check_target_register(vcpu, inst.M46.r1)){
  43.413 +        set_illegal_op_isr(vcpu);
  43.414 +        illegal_op(vcpu);
  43.415 +        return IA64_FAULT;
  43.416 +    }
  43.417 +#endif //CHECK_FAULT
  43.418 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  43.419 +#ifdef  CHECK_FAULT
  43.420 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  43.421 +        return IA64_NO_FAULT;
  43.422 +#endif  //CHECK_FAULT
  43.423 +    }
  43.424 +#ifdef  CHECK_FAULT
  43.425 +    if(unimplemented_gva(vcpu, r3)){
  43.426 +        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  43.427 +        return IA64_NO_FAULT;
  43.428 +    }
  43.429 +#endif  //CHECK_FAULT
  43.430 +    vmx_vcpu_ttag(vcpu, r3, &r1);
  43.431 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  43.432 +    return(IA64_NO_FAULT);
  43.433 +}
  43.434 +
  43.435 +
  43.436 +IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
  43.437 +{
  43.438 +    u64 r1,r3;
  43.439 +    ISR visr;
  43.440 +#ifdef  CHECK_FAULT
  43.441 +    if(check_target_register(vcpu, inst.M46.r1)){
  43.442 +        set_illegal_op_isr(vcpu);
  43.443 +        illegal_op(vcpu);
  43.444 +        return IA64_FAULT;
  43.445 +    }
  43.446 +    IA64_PSR vpsr;
  43.447 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.448 +    if(vpsr.cpl!=0){
  43.449 +        visr.val=0;
  43.450 +        vcpu_set_isr(vcpu, visr.val);
  43.451 +        return IA64_FAULT;
  43.452 +    }
  43.453 +#endif  //CHECK_FAULT
  43.454 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  43.455 +#ifdef  CHECK_FAULT
  43.456 +        set_isr_reg_nat_consumption(vcpu,0,1);
  43.457 +        rnat_comsumption(vcpu);
  43.458 +        return IA64_FAULT;
  43.459 +#endif  //CHECK_FAULT
  43.460 +    }
  43.461 +#ifdef  CHECK_FAULT
  43.462 +    if (unimplemented_gva(vcpu,r3) ) {
  43.463 +        // inject unimplemented_data_address_fault
  43.464 +        visr.val = set_isr_ei_ni(vcpu);
  43.465 +        visr.code = IA64_RESERVED_REG_FAULT;
  43.466 +        vcpu_set_isr(vcpu, isr.val);
  43.467 +        // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
  43.468 +        unimpl_daddr(vcpu);
  43.469 +        return IA64_FAULT;
  43.470 +   }
  43.471 +#endif  //CHECK_FAULT
  43.472 +
  43.473 +    if(vmx_vcpu_tpa(vcpu, r3, &r1)){
  43.474 +        return IA64_FAULT;
  43.475 +    }
  43.476 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  43.477 +    return(IA64_NO_FAULT);
  43.478 +}
  43.479 +
  43.480 +IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
  43.481 +{
  43.482 +    u64 r1,r3;
  43.483 +    ISR visr;
  43.484 +    IA64_PSR vpsr;
  43.485 +    int fault=IA64_NO_FAULT;
  43.486 +#ifdef  CHECK_FAULT
  43.487 +    visr.val=0;
  43.488 +    if(check_target_register(vcpu, inst.M46.r1)){
  43.489 +        set_illegal_op_isr(vcpu);
  43.490 +        illegal_op(vcpu);
  43.491 +        return IA64_FAULT;
  43.492 +    }
  43.493 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.494 +    if(vpsr.cpl!=0){
  43.495 +        vcpu_set_isr(vcpu, visr.val);
  43.496 +        return IA64_FAULT;
  43.497 +    }
  43.498 +#endif
  43.499 +    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  43.500 +#ifdef  CHECK_FAULT
  43.501 +        set_isr_reg_nat_consumption(vcpu,0,1);
  43.502 +        rnat_comsumption(vcpu);
  43.503 +        return IA64_FAULT;
  43.504 +#endif
  43.505 +    }
  43.506 +    if(vmx_vcpu_tak(vcpu, r3, &r1)){
  43.507 +        return IA64_FAULT;
  43.508 +    }
  43.509 +    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  43.510 +    return(IA64_NO_FAULT);
  43.511 +}
  43.512 +
  43.513 +
  43.514 +/************************************
  43.515 + * Insert translation register/cache
  43.516 +************************************/
  43.517 +
  43.518 +IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
  43.519 +{
  43.520 +    UINT64 fault, itir, ifa, pte, slot;
  43.521 +    ISR isr;
  43.522 +    IA64_PSR  vpsr;
  43.523 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.524 +    if ( vpsr.ic ) {
  43.525 +        set_illegal_op_isr(vcpu);
  43.526 +        illegal_op(vcpu);
  43.527 +        return IA64_FAULT;
  43.528 +    }
  43.529 +#ifdef  VMAL_NO_FAULT_CHECK
  43.530 +    if ( vpsr.cpl != 0) {
  43.531 +        /* Inject Privileged Operation fault into guest */
  43.532 +        set_privileged_operation_isr (vcpu, 0);
  43.533 +        privilege_op (vcpu);
  43.534 +        return IA64_FAULT;
  43.535 +    }
  43.536 +#endif // VMAL_NO_FAULT_CHECK
  43.537 +    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
  43.538 +#ifdef  VMAL_NO_FAULT_CHECK
  43.539 +        set_isr_reg_nat_consumption(vcpu,0,0);
  43.540 +        rnat_comsumption(vcpu);
  43.541 +        return IA64_FAULT;
  43.542 +#endif // VMAL_NO_FAULT_CHECK
  43.543 +    }
  43.544 +#ifdef  VMAL_NO_FAULT_CHECK
  43.545 +    if(is_reserved_rr_register(vcpu, slot)){
  43.546 +        set_illegal_op_isr(vcpu);
  43.547 +        illegal_op(vcpu);
  43.548 +        return IA64_FAULT;
  43.549 +    }
  43.550 +#endif // VMAL_NO_FAULT_CHECK
  43.551 +
  43.552 +    if (vmx_vcpu_get_itir(vcpu,&itir)){
  43.553 +        return(IA64_FAULT);
  43.554 +    }
  43.555 +    if (vmx_vcpu_get_ifa(vcpu,&ifa)){
  43.556 +        return(IA64_FAULT);
  43.557 +    }
  43.558 +#ifdef  VMAL_NO_FAULT_CHECK
  43.559 +    if (is_reserved_itir_field(vcpu, itir)) {
  43.560 +    	// TODO
  43.561 +    	return IA64_FAULT;
  43.562 +    }
  43.563 +    if (unimplemented_gva(vcpu,ifa) ) {
  43.564 +        isr.val = set_isr_ei_ni(vcpu);
  43.565 +        isr.code = IA64_RESERVED_REG_FAULT;
  43.566 +        vcpu_set_isr(vcpu, isr.val);
  43.567 +        unimpl_daddr(vcpu);
  43.568 +        return IA64_FAULT;
  43.569 +   }
  43.570 +#endif // VMAL_NO_FAULT_CHECK
  43.571 +
  43.572 +    return (vmx_vcpu_itr_d(vcpu,pte,itir,ifa,slot));
  43.573 +}
  43.574 +
  43.575 +IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
  43.576 +{
  43.577 +    UINT64 fault, itir, ifa, pte, slot;
  43.578 +    ISR isr;
  43.579 +    IA64_PSR  vpsr;
  43.580 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.581 +    if ( vpsr.ic ) {
  43.582 +        set_illegal_op_isr(vcpu);
  43.583 +        illegal_op(vcpu);
  43.584 +        return IA64_FAULT;
  43.585 +    }
  43.586 +#ifdef  VMAL_NO_FAULT_CHECK
  43.587 +    if ( vpsr.cpl != 0) {
  43.588 +        /* Inject Privileged Operation fault into guest */
  43.589 +        set_privileged_operation_isr (vcpu, 0);
  43.590 +        privilege_op (vcpu);
  43.591 +        return IA64_FAULT;
  43.592 +    }
  43.593 +#endif // VMAL_NO_FAULT_CHECK
  43.594 +    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
  43.595 +#ifdef  VMAL_NO_FAULT_CHECK
  43.596 +        set_isr_reg_nat_consumption(vcpu,0,0);
  43.597 +        rnat_comsumption(vcpu);
  43.598 +        return IA64_FAULT;
  43.599 +#endif // VMAL_NO_FAULT_CHECK
  43.600 +    }
  43.601 +#ifdef  VMAL_NO_FAULT_CHECK
  43.602 +    if(is_reserved_rr_register(vcpu, slot)){
  43.603 +        set_illegal_op_isr(vcpu);
  43.604 +        illegal_op(vcpu);
  43.605 +        return IA64_FAULT;
  43.606 +    }
  43.607 +#endif // VMAL_NO_FAULT_CHECK
  43.608 +
  43.609 +    if (vmx_vcpu_get_itir(vcpu,&itir)){
  43.610 +        return(IA64_FAULT);
  43.611 +    }
  43.612 +    if (vmx_vcpu_get_ifa(vcpu,&ifa)){
  43.613 +        return(IA64_FAULT);
  43.614 +    }
  43.615 +#ifdef  VMAL_NO_FAULT_CHECK
  43.616 +    if (is_reserved_itir_field(vcpu, itir)) {
  43.617 +    	// TODO
  43.618 +    	return IA64_FAULT;
  43.619 +    }
  43.620 +    if (unimplemented_gva(vcpu,ifa) ) {
  43.621 +        isr.val = set_isr_ei_ni(vcpu);
  43.622 +        isr.code = IA64_RESERVED_REG_FAULT;
  43.623 +        vcpu_set_isr(vcpu, isr.val);
  43.624 +        unimpl_daddr(vcpu);
  43.625 +        return IA64_FAULT;
  43.626 +   }
  43.627 +#endif // VMAL_NO_FAULT_CHECK
  43.628 +
  43.629 +   return (vmx_vcpu_itr_i(vcpu,pte,itir,ifa,slot));
  43.630 +}
  43.631 +
  43.632 +IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
  43.633 +{
  43.634 +    UINT64 fault;
  43.635 +    ISR isr;
  43.636 +    IA64_PSR  vpsr;
  43.637 +    IA64FAULT	ret1;
  43.638 +
  43.639 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.640 +    if ( vpsr.ic ) {
  43.641 +        set_illegal_op_isr(vcpu);
  43.642 +        illegal_op(vcpu);
  43.643 +        return IA64_FAULT;
  43.644 +    }
  43.645 +
  43.646 +#ifdef  VMAL_NO_FAULT_CHECK
  43.647 +    if ( vpsr.cpl != 0) {
  43.648 +        /* Inject Privileged Operation fault into guest */
  43.649 +        set_privileged_operation_isr (vcpu, 0);
  43.650 +        privilege_op (vcpu);
  43.651 +        return IA64_FAULT;
  43.652 +    }
  43.653 +#endif // VMAL_NO_FAULT_CHECK
  43.654 +    ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pte);
  43.655 +#ifdef  VMAL_NO_FAULT_CHECK
  43.656 +    if( ret1 != IA64_NO_FAULT ){
  43.657 +        set_isr_reg_nat_consumption(vcpu,0,0);
  43.658 +        rnat_comsumption(vcpu);
  43.659 +        return IA64_FAULT;
  43.660 +    }
  43.661 +#endif // VMAL_NO_FAULT_CHECK
  43.662 +
  43.663 +    if (vmx_vcpu_get_itir(vcpu,itir)){
  43.664 +        return(IA64_FAULT);
  43.665 +    }
  43.666 +    if (vmx_vcpu_get_ifa(vcpu,ifa)){
  43.667 +        return(IA64_FAULT);
  43.668 +    }
  43.669 +#ifdef  VMAL_NO_FAULT_CHECK
  43.670 +    if (unimplemented_gva(vcpu,ifa) ) {
  43.671 +        isr.val = set_isr_ei_ni(vcpu);
  43.672 +        isr.code = IA64_RESERVED_REG_FAULT;
  43.673 +        vcpu_set_isr(vcpu, isr.val);
  43.674 +        unimpl_daddr(vcpu);
  43.675 +        return IA64_FAULT;
  43.676 +   }
  43.677 +#endif // VMAL_NO_FAULT_CHECK
  43.678 +   return IA64_NO_FAULT;
  43.679 +}
  43.680 +
  43.681 +IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
  43.682 +{
  43.683 +    UINT64 itir, ifa, pte;
  43.684 +
  43.685 +    if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
  43.686 +    	return IA64_FAULT;
  43.687 +    }
  43.688 +
  43.689 +   return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
  43.690 +}
  43.691 +
  43.692 +IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
  43.693 +{
  43.694 +    UINT64 itir, ifa, pte;
  43.695 +
  43.696 +    if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
  43.697 +    	return IA64_FAULT;
  43.698 +    }
  43.699 +
  43.700 +   return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
  43.701 +
  43.702 +}
  43.703 +
  43.704 +/*************************************
  43.705 + * Moves to semi-privileged registers
  43.706 +*************************************/
  43.707 +
  43.708 +IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
  43.709 +{
  43.710 +    // I27 and M30 are identical for these fields
  43.711 +    if(inst.M30.ar3!=44){
  43.712 +        panic("Can't support ar register other than itc");
  43.713 +    }
  43.714 +#ifdef  CHECK_FAULT
  43.715 +    IA64_PSR vpsr;
  43.716 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
  43.717 +    if ( vpsr.cpl != 0) {
  43.718 +        /* Inject Privileged Operation fault into guest */
  43.719 +        set_privileged_operation_isr (vcpu, 0);
  43.720 +        privilege_op (vcpu);
  43.721 +        return IA64_FAULT;
  43.722 +    }
  43.723 +#endif // CHECK_FAULT
  43.724 +    UINT64  imm;
  43.725 +    if(inst.M30.s){
  43.726 +        imm = -inst.M30.imm;
  43.727 +    }else{
  43.728 +        imm = inst.M30.imm;