ia64/xen-unstable

changeset 10840:e5c84586c333

MErge with xenppc-unstable-merge.hg
author kfraser@localhost.localdomain
date Fri Jul 28 10:51:38 2006 +0100 (2006-07-28)
parents 1eb42266de1b 158db2446071
children 7ee2c02c6db0
files tools/libxc/xc_ia64_stubs.c xen/include/asm-ia64/linux/asm/asmmacro.h
line diff
     1.1 --- a/Config.mk	Thu Jul 27 17:44:14 2006 -0500
     1.2 +++ b/Config.mk	Fri Jul 28 10:51:38 2006 +0100
     1.3 @@ -36,6 +36,8 @@ ifneq ($(debug),y)
     1.4  CFLAGS    ?= -O2 -fomit-frame-pointer
     1.5  CFLAGS    += -DNDEBUG
     1.6  else
     1.7 +# Less than -O1 produces bad code and large stack frames
     1.8 +CFLAGS    ?= -O1 -fno-omit-frame-pointer
     1.9  CFLAGS    += -g
    1.10  endif
    1.11  
     2.1 --- a/buildconfigs/linux-defconfig_xen0_ia64	Thu Jul 27 17:44:14 2006 -0500
     2.2 +++ b/buildconfigs/linux-defconfig_xen0_ia64	Fri Jul 28 10:51:38 2006 +0100
     2.3 @@ -1,7 +1,7 @@
     2.4  #
     2.5  # Automatically generated make config: don't edit
     2.6  # Linux kernel version: 2.6.16.13-xen0
     2.7 -# Mon May 22 14:46:31 2006
     2.8 +# Fri Jun 30 12:59:19 2006
     2.9  #
    2.10  
    2.11  #
    2.12 @@ -721,21 +721,10 @@ CONFIG_SERIAL_NONSTANDARD=y
    2.13  #
    2.14  # Serial drivers
    2.15  #
    2.16 -CONFIG_SERIAL_8250=y
    2.17 -CONFIG_SERIAL_8250_CONSOLE=y
    2.18 -CONFIG_SERIAL_8250_ACPI=y
    2.19 -CONFIG_SERIAL_8250_NR_UARTS=6
    2.20 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4
    2.21 -CONFIG_SERIAL_8250_EXTENDED=y
    2.22 -CONFIG_SERIAL_8250_SHARE_IRQ=y
    2.23 -# CONFIG_SERIAL_8250_DETECT_IRQ is not set
    2.24 -# CONFIG_SERIAL_8250_RSA is not set
    2.25  
    2.26  #
    2.27  # Non-8250 serial port support
    2.28  #
    2.29 -CONFIG_SERIAL_CORE=y
    2.30 -CONFIG_SERIAL_CORE_CONSOLE=y
    2.31  # CONFIG_SERIAL_JSM is not set
    2.32  CONFIG_UNIX98_PTYS=y
    2.33  CONFIG_LEGACY_PTYS=y
    2.34 @@ -1516,8 +1505,16 @@ CONFIG_CRYPTO_DES=y
    2.35  #
    2.36  # Hardware crypto devices
    2.37  #
    2.38 +# CONFIG_XEN_UTIL is not set
    2.39  CONFIG_HAVE_ARCH_ALLOC_SKB=y
    2.40  CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
    2.41 +CONFIG_XEN_BALLOON=y
    2.42 +CONFIG_XEN_SKBUFF=y
    2.43 +CONFIG_XEN_NETDEV_BACKEND=y
    2.44 +CONFIG_XEN_NETDEV_FRONTEND=y
    2.45 +# CONFIG_XEN_DEVMEM is not set
    2.46 +# CONFIG_XEN_REBOOT is not set
    2.47 +# CONFIG_XEN_SMPBOOT is not set
    2.48  CONFIG_XEN_INTERFACE_VERSION=0x00030202
    2.49  
    2.50  #
    2.51 @@ -1525,20 +1522,21 @@ CONFIG_XEN_INTERFACE_VERSION=0x00030202
    2.52  #
    2.53  CONFIG_XEN_PRIVILEGED_GUEST=y
    2.54  # CONFIG_XEN_UNPRIVILEGED_GUEST is not set
    2.55 +CONFIG_XEN_PRIVCMD=y
    2.56  CONFIG_XEN_BACKEND=y
    2.57  # CONFIG_XEN_PCIDEV_BACKEND is not set
    2.58  CONFIG_XEN_BLKDEV_BACKEND=y
    2.59 +CONFIG_XEN_XENBUS_DEV=y
    2.60  # CONFIG_XEN_BLKDEV_TAP is not set
    2.61 -CONFIG_XEN_NETDEV_BACKEND=y
    2.62  # CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
    2.63  CONFIG_XEN_NETDEV_LOOPBACK=y
    2.64  # CONFIG_XEN_TPMDEV_BACKEND is not set
    2.65  CONFIG_XEN_BLKDEV_FRONTEND=y
    2.66 -CONFIG_XEN_NETDEV_FRONTEND=y
    2.67  # CONFIG_XEN_SCRUB_PAGES is not set
    2.68 -# CONFIG_XEN_DISABLE_SERIAL is not set
    2.69 +CONFIG_XEN_DISABLE_SERIAL=y
    2.70  CONFIG_XEN_SYSFS=y
    2.71  CONFIG_XEN_COMPAT_030002_AND_LATER=y
    2.72  # CONFIG_XEN_COMPAT_LATEST_ONLY is not set
    2.73  CONFIG_XEN_COMPAT_030002=y
    2.74 +CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
    2.75  CONFIG_NO_IDLE_HZ=y
     3.1 --- a/buildconfigs/linux-defconfig_xen_ia64	Thu Jul 27 17:44:14 2006 -0500
     3.2 +++ b/buildconfigs/linux-defconfig_xen_ia64	Fri Jul 28 10:51:38 2006 +0100
     3.3 @@ -1,7 +1,7 @@
     3.4  #
     3.5  # Automatically generated make config: don't edit
     3.6  # Linux kernel version: 2.6.16.13-xen
     3.7 -# Mon May 22 14:15:20 2006
     3.8 +# Thu Jun 29 16:23:48 2006
     3.9  #
    3.10  
    3.11  #
    3.12 @@ -727,21 +727,10 @@ CONFIG_SERIAL_NONSTANDARD=y
    3.13  #
    3.14  # Serial drivers
    3.15  #
    3.16 -CONFIG_SERIAL_8250=y
    3.17 -CONFIG_SERIAL_8250_CONSOLE=y
    3.18 -CONFIG_SERIAL_8250_ACPI=y
    3.19 -CONFIG_SERIAL_8250_NR_UARTS=6
    3.20 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4
    3.21 -CONFIG_SERIAL_8250_EXTENDED=y
    3.22 -CONFIG_SERIAL_8250_SHARE_IRQ=y
    3.23 -# CONFIG_SERIAL_8250_DETECT_IRQ is not set
    3.24 -# CONFIG_SERIAL_8250_RSA is not set
    3.25  
    3.26  #
    3.27  # Non-8250 serial port support
    3.28  #
    3.29 -CONFIG_SERIAL_CORE=y
    3.30 -CONFIG_SERIAL_CORE_CONSOLE=y
    3.31  # CONFIG_SERIAL_JSM is not set
    3.32  CONFIG_UNIX98_PTYS=y
    3.33  CONFIG_LEGACY_PTYS=y
    3.34 @@ -1522,8 +1511,16 @@ CONFIG_CRYPTO_DES=y
    3.35  #
    3.36  # Hardware crypto devices
    3.37  #
    3.38 +# CONFIG_XEN_UTIL is not set
    3.39  CONFIG_HAVE_ARCH_ALLOC_SKB=y
    3.40  CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
    3.41 +CONFIG_XEN_BALLOON=y
    3.42 +CONFIG_XEN_SKBUFF=y
    3.43 +CONFIG_XEN_NETDEV_BACKEND=y
    3.44 +CONFIG_XEN_NETDEV_FRONTEND=y
    3.45 +# CONFIG_XEN_DEVMEM is not set
    3.46 +# CONFIG_XEN_REBOOT is not set
    3.47 +# CONFIG_XEN_SMPBOOT is not set
    3.48  CONFIG_XEN_INTERFACE_VERSION=0x00030202
    3.49  
    3.50  #
    3.51 @@ -1531,20 +1528,21 @@ CONFIG_XEN_INTERFACE_VERSION=0x00030202
    3.52  #
    3.53  CONFIG_XEN_PRIVILEGED_GUEST=y
    3.54  # CONFIG_XEN_UNPRIVILEGED_GUEST is not set
    3.55 +CONFIG_XEN_PRIVCMD=y
    3.56  CONFIG_XEN_BACKEND=y
    3.57  # CONFIG_XEN_PCIDEV_BACKEND is not set
    3.58  CONFIG_XEN_BLKDEV_BACKEND=y
    3.59 +CONFIG_XEN_XENBUS_DEV=y
    3.60  # CONFIG_XEN_BLKDEV_TAP is not set
    3.61 -CONFIG_XEN_NETDEV_BACKEND=y
    3.62  # CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
    3.63  CONFIG_XEN_NETDEV_LOOPBACK=y
    3.64  # CONFIG_XEN_TPMDEV_BACKEND is not set
    3.65  CONFIG_XEN_BLKDEV_FRONTEND=y
    3.66 -CONFIG_XEN_NETDEV_FRONTEND=y
    3.67  # CONFIG_XEN_SCRUB_PAGES is not set
    3.68 -# CONFIG_XEN_DISABLE_SERIAL is not set
    3.69 +CONFIG_XEN_DISABLE_SERIAL=y
    3.70  CONFIG_XEN_SYSFS=y
    3.71  CONFIG_XEN_COMPAT_030002_AND_LATER=y
    3.72  # CONFIG_XEN_COMPAT_LATEST_ONLY is not set
    3.73  CONFIG_XEN_COMPAT_030002=y
    3.74 +CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
    3.75  CONFIG_NO_IDLE_HZ=y
     4.1 --- a/linux-2.6-xen-sparse/arch/ia64/Kconfig	Thu Jul 27 17:44:14 2006 -0500
     4.2 +++ b/linux-2.6-xen-sparse/arch/ia64/Kconfig	Fri Jul 28 10:51:38 2006 +0100
     4.3 @@ -71,6 +71,13 @@ config XEN_IA64_DOM0_NON_VP
     4.4  	help
     4.5  	  dom0 P=M model
     4.6  
     4.7 +config XEN_IA64_VDSO_PARAVIRT
     4.8 +	bool
     4.9 +	depends on XEN && !ITANIUM
    4.10 +	default y
    4.11 +	help
    4.12 +	  vDSO paravirtualization
    4.13 +
    4.14  config SCHED_NO_NO_OMIT_FRAME_POINTER
    4.15  	bool
    4.16  	default y
    4.17 @@ -518,7 +525,7 @@ config XEN_DEVMEM
    4.18  	default n
    4.19  
    4.20  config XEN_REBOOT
    4.21 -	default n
    4.22 +	default y
    4.23  
    4.24  config XEN_SMPBOOT
    4.25  	default n
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S	Fri Jul 28 10:51:38 2006 +0100
     5.3 @@ -0,0 +1,488 @@
     5.4 +/*
     5.5 + * This file contains the code that gets mapped at the upper end of each task's text
     5.6 + * region.  For now, it contains the signal trampoline code only.
     5.7 + *
     5.8 + * Copyright (C) 1999-2003 Hewlett-Packard Co
     5.9 + * 	David Mosberger-Tang <davidm@hpl.hp.com>
    5.10 + */
    5.11 +
    5.12 +#include <linux/config.h>
    5.13 +
    5.14 +#include <asm/asmmacro.h>
    5.15 +#include <asm/errno.h>
    5.16 +#include <asm/asm-offsets.h>
    5.17 +#include <asm/sigcontext.h>
    5.18 +#include <asm/system.h>
    5.19 +#include <asm/unistd.h>
    5.20 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
    5.21 +# include <asm/privop.h>
    5.22 +#endif
    5.23 +
    5.24 +/*
    5.25 + * We can't easily refer to symbols inside the kernel.  To avoid full runtime relocation,
    5.26 + * complications with the linker (which likes to create PLT stubs for branches
    5.27 + * to targets outside the shared object) and to avoid multi-phase kernel builds, we
    5.28 + * simply create minimalistic "patch lists" in special ELF sections.
    5.29 + */
    5.30 +	.section ".data.patch.fsyscall_table", "a"
    5.31 +	.previous
    5.32 +#define LOAD_FSYSCALL_TABLE(reg)			\
    5.33 +[1:]	movl reg=0;					\
    5.34 +	.xdata4 ".data.patch.fsyscall_table", 1b-.
    5.35 +
    5.36 +	.section ".data.patch.brl_fsys_bubble_down", "a"
    5.37 +	.previous
    5.38 +#define BRL_COND_FSYS_BUBBLE_DOWN(pr)			\
    5.39 +[1:](pr)brl.cond.sptk 0;				\
    5.40 +	.xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
    5.41 +
    5.42 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
    5.43 +	// The page in which hyperprivop lives must be pinned by ITR.
    5.44 +	// However vDSO area isn't pinned. So issuing hyperprivop
    5.45 +	// from vDSO page causes trouble that Kevin pointed out.
    5.46 +	// After clearing vpsr.ic, the vcpu is pre-empted and the itlb
    5.47 +	// is flushed. Then vcpu get cpu again, tlb miss fault occures.
    5.48 +	// However it results in nested dtlb fault because vpsr.ic is off.
    5.49 +	// To avoid such a situation, we jump into the kernel text area
    5.50 +	// which is pinned, and then issue hyperprivop and return back
    5.51 +	// to vDSO page.
    5.52 +	// This is Dan Magenheimer's idea.
    5.53 +
    5.54 +	// Currently is_running_on_xen() is defined as running_on_xen.
    5.55 +	// If is_running_on_xen() is a real function, we must update
    5.56 +	// according to it.
    5.57 +	.section ".data.patch.running_on_xen", "a"
    5.58 +	.previous
    5.59 +#define LOAD_RUNNING_ON_XEN(reg)			\
    5.60 +[1:]	movl reg=0;					\
    5.61 +	.xdata4 ".data.patch.running_on_xen", 1b-.
    5.62 +
    5.63 +	.section ".data.patch.brl_xen_rsm_be_i", "a"
    5.64 +	.previous
    5.65 +#define BRL_COND_XEN_RSM_BE_I(pr)			\
    5.66 +[1:](pr)brl.cond.sptk 0;				\
    5.67 +	.xdata4 ".data.patch.brl_xen_rsm_be_i", 1b-.
    5.68 +
    5.69 +	.section ".data.patch.brl_xen_get_psr", "a"
    5.70 +	.previous
    5.71 +#define BRL_COND_XEN_GET_PSR(pr)			\
    5.72 +[1:](pr)brl.cond.sptk 0;				\
    5.73 +	.xdata4 ".data.patch.brl_xen_get_psr", 1b-.
    5.74 +
    5.75 +	.section ".data.patch.brl_xen_ssm_i_0", "a"
    5.76 +	.previous
    5.77 +#define BRL_COND_XEN_SSM_I_0(pr)			\
    5.78 +[1:](pr)brl.cond.sptk 0;				\
    5.79 +	.xdata4 ".data.patch.brl_xen_ssm_i_0", 1b-.
    5.80 +
    5.81 +	.section ".data.patch.brl_xen_ssm_i_1", "a"
    5.82 +	.previous
    5.83 +#define BRL_COND_XEN_SSM_I_1(pr)			\
    5.84 +[1:](pr)brl.cond.sptk 0;				\
    5.85 +	.xdata4 ".data.patch.brl_xen_ssm_i_1", 1b-.
    5.86 +#endif
    5.87 +
    5.88 +GLOBAL_ENTRY(__kernel_syscall_via_break)
    5.89 +	.prologue
    5.90 +	.altrp b6
    5.91 +	.body
    5.92 +	/*
    5.93 +	 * Note: for (fast) syscall restart to work, the break instruction must be
    5.94 +	 *	 the first one in the bundle addressed by syscall_via_break.
    5.95 +	 */
    5.96 +{ .mib
    5.97 +	break 0x100000
    5.98 +	nop.i 0
    5.99 +	br.ret.sptk.many b6
   5.100 +}
   5.101 +END(__kernel_syscall_via_break)
   5.102 +
   5.103 +/*
   5.104 + * On entry:
   5.105 + *	r11 = saved ar.pfs
   5.106 + *	r15 = system call #
   5.107 + *	b0  = saved return address
   5.108 + *	b6  = return address
   5.109 + * On exit:
   5.110 + *	r11 = saved ar.pfs
   5.111 + *	r15 = system call #
   5.112 + *	b0  = saved return address
   5.113 + *	all other "scratch" registers:	undefined
   5.114 + *	all "preserved" registers:	same as on entry
   5.115 + */
   5.116 +
   5.117 +GLOBAL_ENTRY(__kernel_syscall_via_epc)
   5.118 +	.prologue
   5.119 +	.altrp b6
   5.120 +	.body
   5.121 +{
   5.122 +	/*
   5.123 +	 * Note: the kernel cannot assume that the first two instructions in this
   5.124 +	 * bundle get executed.  The remaining code must be safe even if
   5.125 +	 * they do not get executed.
   5.126 +	 */
   5.127 +	adds r17=-1024,r15			// A
   5.128 +	mov r10=0				// A    default to successful syscall execution
   5.129 +	epc					// B	causes split-issue
   5.130 +}
   5.131 +	;;
   5.132 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
   5.133 +	// r20 = 1
   5.134 +	// r22 = &vcpu->evtchn_mask
   5.135 +	// r23 = &vpsr.ic
   5.136 +	// r24 = vcpu->pending_interruption
   5.137 +	// r25 = tmp
   5.138 +	// r28 = &running_on_xen
   5.139 +	// r30 = running_on_xen
   5.140 +	// r31 = tmp
   5.141 +	// p11 = tmp
   5.142 +	// p12 = running_on_xen
   5.143 +	// p13 = !running_on_xen
   5.144 +	// p14 = tmp
   5.145 +	// p15 = tmp
   5.146 +#define isXen	p12
   5.147 +#define isRaw	p13
   5.148 +	LOAD_RUNNING_ON_XEN(r28)
   5.149 +	movl r22=XSI_PSR_I_ADDR
   5.150 +	movl r23=XSI_PSR_IC
   5.151 +	movl r24=XSI_PSR_I_ADDR+(XSI_PEND_OFS-XSI_PSR_I_ADDR_OFS)
   5.152 +	mov r20=1
   5.153 +	;;
   5.154 +	ld4 r30=[r28]
   5.155 +	;;
   5.156 +	cmp.ne isXen,isRaw=r0,r30
   5.157 +	;;
   5.158 +(isRaw)	rsm psr.be | psr.i
   5.159 +	BRL_COND_XEN_RSM_BE_I(isXen)
   5.160 +	.global .vdso_rsm_be_i_ret
   5.161 +.vdso_rsm_be_i_ret:
   5.162 +#else
   5.163 +	rsm psr.be | psr.i			// M2 (5 cyc to srlz.d)
   5.164 +#endif
   5.165 +	LOAD_FSYSCALL_TABLE(r14)		// X
   5.166 +	;;
   5.167 +	mov r16=IA64_KR(CURRENT)		// M2 (12 cyc)
   5.168 +	shladd r18=r17,3,r14			// A
   5.169 +	mov r19=NR_syscalls-1			// A
   5.170 +	;;
   5.171 +	lfetch [r18]				// M0|1
   5.172 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
   5.173 +(isRaw)	mov r29=psr
   5.174 +	BRL_COND_XEN_GET_PSR(isXen)
   5.175 +	.global .vdso_get_psr_ret
   5.176 +.vdso_get_psr_ret:
   5.177 +#else
   5.178 +	mov r29=psr				// M2 (12 cyc)
   5.179 +#endif
   5.180 +	// If r17 is a NaT, p6 will be zero
   5.181 +	cmp.geu p6,p7=r19,r17			// A    (sysnr > 0 && sysnr < 1024+NR_syscalls)?
   5.182 +	;;
   5.183 +	mov r21=ar.fpsr				// M2 (12 cyc)
   5.184 +	tnat.nz p10,p9=r15			// I0
   5.185 +	mov.i r26=ar.pfs			// I0 (would stall anyhow due to srlz.d...)
   5.186 +	;;
   5.187 +	srlz.d					// M0 (forces split-issue) ensure PSR.BE==0
   5.188 +(p6)	ld8 r18=[r18]				// M0|1
   5.189 +	nop.i 0
   5.190 +	;;
   5.191 +	nop.m 0
   5.192 +(p6)	tbit.z.unc p8,p0=r18,0			// I0 (dual-issues with "mov b7=r18"!)
   5.193 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
   5.194 +	;;
   5.195 +	// p14 = running_on_xen && p8
   5.196 +	// p15 = !running_on_xen && p8
   5.197 +(p8)	cmp.ne.unc p14,p15=r0,r30
   5.198 +	;;
   5.199 +(p15)	ssm psr.i
   5.200 +	BRL_COND_XEN_SSM_I_0(p14)
   5.201 +	.global .vdso_ssm_i_0_ret
   5.202 +.vdso_ssm_i_0_ret:
   5.203 +#else
   5.204 +	nop.i 0
   5.205 +	;;
   5.206 +(p8)	ssm psr.i
   5.207 +#endif
   5.208 +(p6)	mov b7=r18				// I0
   5.209 +(p8)	br.dptk.many b7				// B
   5.210 +
   5.211 +	mov r27=ar.rsc				// M2 (12 cyc)
   5.212 +/*
   5.213 + * brl.cond doesn't work as intended because the linker would convert this branch
   5.214 + * into a branch to a PLT.  Perhaps there will be a way to avoid this with some
   5.215 + * future version of the linker.  In the meantime, we just use an indirect branch
   5.216 + * instead.
   5.217 + */
   5.218 +#ifdef CONFIG_ITANIUM
   5.219 +(p6)	add r14=-8,r14				// r14 <- addr of fsys_bubble_down entry
   5.220 +	;;
   5.221 +(p6)	ld8 r14=[r14]				// r14 <- fsys_bubble_down
   5.222 +	;;
   5.223 +(p6)	mov b7=r14
   5.224 +(p6)	br.sptk.many b7
   5.225 +#else
   5.226 +	BRL_COND_FSYS_BUBBLE_DOWN(p6)
   5.227 +#endif
   5.228 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
   5.229 +(isRaw)	ssm psr.i
   5.230 +	BRL_COND_XEN_SSM_I_1(isXen)
   5.231 +	.global .vdso_ssm_i_1_ret
   5.232 +.vdso_ssm_i_1_ret:
   5.233 +#else
   5.234 +	ssm psr.i
   5.235 +#endif
   5.236 +	mov r10=-1
   5.237 +(p10)	mov r8=EINVAL
   5.238 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
   5.239 +	dv_serialize_data // shut up gas warning.
   5.240 +		          // we know xen_hyper_ssm_i_0 or xen_hyper_ssm_i_1
   5.241 +		          // doesn't change p9 and p10
   5.242 +#endif
   5.243 +(p9)	mov r8=ENOSYS
   5.244 +	FSYS_RETURN
   5.245 +END(__kernel_syscall_via_epc)
   5.246 +
   5.247 +#	define ARG0_OFF		(16 + IA64_SIGFRAME_ARG0_OFFSET)
   5.248 +#	define ARG1_OFF		(16 + IA64_SIGFRAME_ARG1_OFFSET)
   5.249 +#	define ARG2_OFF		(16 + IA64_SIGFRAME_ARG2_OFFSET)
   5.250 +#	define SIGHANDLER_OFF	(16 + IA64_SIGFRAME_HANDLER_OFFSET)
   5.251 +#	define SIGCONTEXT_OFF	(16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET)
   5.252 +
   5.253 +#	define FLAGS_OFF	IA64_SIGCONTEXT_FLAGS_OFFSET
   5.254 +#	define CFM_OFF		IA64_SIGCONTEXT_CFM_OFFSET
   5.255 +#	define FR6_OFF		IA64_SIGCONTEXT_FR6_OFFSET
   5.256 +#	define BSP_OFF		IA64_SIGCONTEXT_AR_BSP_OFFSET
   5.257 +#	define RNAT_OFF		IA64_SIGCONTEXT_AR_RNAT_OFFSET
   5.258 +#	define UNAT_OFF		IA64_SIGCONTEXT_AR_UNAT_OFFSET
   5.259 +#	define FPSR_OFF		IA64_SIGCONTEXT_AR_FPSR_OFFSET
   5.260 +#	define PR_OFF		IA64_SIGCONTEXT_PR_OFFSET
   5.261 +#	define RP_OFF		IA64_SIGCONTEXT_IP_OFFSET
   5.262 +#	define SP_OFF		IA64_SIGCONTEXT_R12_OFFSET
   5.263 +#	define RBS_BASE_OFF	IA64_SIGCONTEXT_RBS_BASE_OFFSET
   5.264 +#	define LOADRS_OFF	IA64_SIGCONTEXT_LOADRS_OFFSET
   5.265 +#	define base0		r2
   5.266 +#	define base1		r3
   5.267 +	/*
   5.268 +	 * When we get here, the memory stack looks like this:
   5.269 +	 *
   5.270 +	 *   +===============================+
   5.271 +       	 *   |				     |
   5.272 +       	 *   //	    struct sigframe          //
   5.273 +       	 *   |				     |
   5.274 +	 *   +-------------------------------+ <-- sp+16
   5.275 +	 *   |      16 byte of scratch       |
   5.276 +	 *   |            space              |
   5.277 +	 *   +-------------------------------+ <-- sp
   5.278 +	 *
   5.279 +	 * The register stack looks _exactly_ the way it looked at the time the signal
   5.280 +	 * occurred.  In other words, we're treading on a potential mine-field: each
   5.281 +	 * incoming general register may be a NaT value (including sp, in which case the
   5.282 +	 * process ends up dying with a SIGSEGV).
   5.283 +	 *
   5.284 +	 * The first thing need to do is a cover to get the registers onto the backing
   5.285 +	 * store.  Once that is done, we invoke the signal handler which may modify some
   5.286 +	 * of the machine state.  After returning from the signal handler, we return
   5.287 +	 * control to the previous context by executing a sigreturn system call.  A signal
   5.288 +	 * handler may call the rt_sigreturn() function to directly return to a given
   5.289 +	 * sigcontext.  However, the user-level sigreturn() needs to do much more than
   5.290 +	 * calling the rt_sigreturn() system call as it needs to unwind the stack to
   5.291 +	 * restore preserved registers that may have been saved on the signal handler's
   5.292 +	 * call stack.
   5.293 +	 */
   5.294 +
   5.295 +#define SIGTRAMP_SAVES										\
   5.296 +	.unwabi 3, 's';		/* mark this as a sigtramp handler (saves scratch regs) */	\
   5.297 +	.unwabi @svr4, 's'; /* backwards compatibility with old unwinders (remove in v2.7) */	\
   5.298 +	.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF;						\
   5.299 +	.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF;						\
   5.300 +	.savesp pr, PR_OFF+SIGCONTEXT_OFF;     							\
   5.301 +	.savesp rp, RP_OFF+SIGCONTEXT_OFF;							\
   5.302 +	.savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF;							\
   5.303 +	.vframesp SP_OFF+SIGCONTEXT_OFF
   5.304 +
   5.305 +GLOBAL_ENTRY(__kernel_sigtramp)
   5.306 +	// describe the state that is active when we get here:
   5.307 +	.prologue
   5.308 +	SIGTRAMP_SAVES
   5.309 +	.body
   5.310 +
   5.311 +	.label_state 1
   5.312 +
   5.313 +	adds base0=SIGHANDLER_OFF,sp
   5.314 +	adds base1=RBS_BASE_OFF+SIGCONTEXT_OFF,sp
   5.315 +	br.call.sptk.many rp=1f
   5.316 +1:
   5.317 +	ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF)	// get pointer to signal handler's plabel
   5.318 +	ld8 r15=[base1]					// get address of new RBS base (or NULL)
   5.319 +	cover				// push args in interrupted frame onto backing store
   5.320 +	;;
   5.321 +	cmp.ne p1,p0=r15,r0		// do we need to switch rbs? (note: pr is saved by kernel)
   5.322 +	mov.m r9=ar.bsp			// fetch ar.bsp
   5.323 +	.spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
   5.324 +(p1)	br.cond.spnt setup_rbs		// yup -> (clobbers p8, r14-r16, and r18-r20)
   5.325 +back_from_setup_rbs:
   5.326 +	alloc r8=ar.pfs,0,0,3,0
   5.327 +	ld8 out0=[base0],16		// load arg0 (signum)
   5.328 +	adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1
   5.329 +	;;
   5.330 +	ld8 out1=[base1]		// load arg1 (siginfop)
   5.331 +	ld8 r10=[r17],8			// get signal handler entry point
   5.332 +	;;
   5.333 +	ld8 out2=[base0]		// load arg2 (sigcontextp)
   5.334 +	ld8 gp=[r17]			// get signal handler's global pointer
   5.335 +	adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
   5.336 +	;;
   5.337 +	.spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF
   5.338 +	st8 [base0]=r9			// save sc_ar_bsp
   5.339 +	adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
   5.340 +	adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
   5.341 +	;;
   5.342 +	stf.spill [base0]=f6,32
   5.343 +	stf.spill [base1]=f7,32
   5.344 +	;;
   5.345 +	stf.spill [base0]=f8,32
   5.346 +	stf.spill [base1]=f9,32
   5.347 +	mov b6=r10
   5.348 +	;;
   5.349 +	stf.spill [base0]=f10,32
   5.350 +	stf.spill [base1]=f11,32
   5.351 +	;;
   5.352 +	stf.spill [base0]=f12,32
   5.353 +	stf.spill [base1]=f13,32
   5.354 +	;;
   5.355 +	stf.spill [base0]=f14,32
   5.356 +	stf.spill [base1]=f15,32
   5.357 +	br.call.sptk.many rp=b6			// call the signal handler
   5.358 +.ret0:	adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
   5.359 +	;;
   5.360 +	ld8 r15=[base0]				// fetch sc_ar_bsp
   5.361 +	mov r14=ar.bsp
   5.362 +	;;
   5.363 +	cmp.ne p1,p0=r14,r15			// do we need to restore the rbs?
   5.364 +(p1)	br.cond.spnt restore_rbs		// yup -> (clobbers r14-r18, f6 & f7)
   5.365 +	;;
   5.366 +back_from_restore_rbs:
   5.367 +	adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
   5.368 +	adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
   5.369 +	;;
   5.370 +	ldf.fill f6=[base0],32
   5.371 +	ldf.fill f7=[base1],32
   5.372 +	;;
   5.373 +	ldf.fill f8=[base0],32
   5.374 +	ldf.fill f9=[base1],32
   5.375 +	;;
   5.376 +	ldf.fill f10=[base0],32
   5.377 +	ldf.fill f11=[base1],32
   5.378 +	;;
   5.379 +	ldf.fill f12=[base0],32
   5.380 +	ldf.fill f13=[base1],32
   5.381 +	;;
   5.382 +	ldf.fill f14=[base0],32
   5.383 +	ldf.fill f15=[base1],32
   5.384 +	mov r15=__NR_rt_sigreturn
   5.385 +	.restore sp				// pop .prologue
   5.386 +	break __BREAK_SYSCALL
   5.387 +
   5.388 +	.prologue
   5.389 +	SIGTRAMP_SAVES
   5.390 +setup_rbs:
   5.391 +	mov ar.rsc=0				// put RSE into enforced lazy mode
   5.392 +	;;
   5.393 +	.save ar.rnat, r19
   5.394 +	mov r19=ar.rnat				// save RNaT before switching backing store area
   5.395 +	adds r14=(RNAT_OFF+SIGCONTEXT_OFF),sp
   5.396 +
   5.397 +	mov r18=ar.bspstore
   5.398 +	mov ar.bspstore=r15			// switch over to new register backing store area
   5.399 +	;;
   5.400 +
   5.401 +	.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
   5.402 +	st8 [r14]=r19				// save sc_ar_rnat
   5.403 +	.body
   5.404 +	mov.m r16=ar.bsp			// sc_loadrs <- (new bsp - new bspstore) << 16
   5.405 +	adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp
   5.406 +	;;
   5.407 +	invala
   5.408 +	sub r15=r16,r15
   5.409 +	extr.u r20=r18,3,6
   5.410 +	;;
   5.411 +	mov ar.rsc=0xf				// set RSE into eager mode, pl 3
   5.412 +	cmp.eq p8,p0=63,r20
   5.413 +	shl r15=r15,16
   5.414 +	;;
   5.415 +	st8 [r14]=r15				// save sc_loadrs
   5.416 +(p8)	st8 [r18]=r19		// if bspstore points at RNaT slot, store RNaT there now
   5.417 +	.restore sp				// pop .prologue
   5.418 +	br.cond.sptk back_from_setup_rbs
   5.419 +
   5.420 +	.prologue
   5.421 +	SIGTRAMP_SAVES
   5.422 +	.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
   5.423 +	.body
   5.424 +restore_rbs:
   5.425 +	// On input:
   5.426 +	//	r14 = bsp1 (bsp at the time of return from signal handler)
   5.427 +	//	r15 = bsp0 (bsp at the time the signal occurred)
   5.428 +	//
   5.429 +	// Here, we need to calculate bspstore0, the value that ar.bspstore needs
   5.430 +	// to be set to, based on bsp0 and the size of the dirty partition on
   5.431 +	// the alternate stack (sc_loadrs >> 16).  This can be done with the
   5.432 +	// following algorithm:
   5.433 +	//
   5.434 +	//  bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 19), bsp1));
   5.435 +	//
   5.436 +	// This is what the code below does.
   5.437 +	//
   5.438 +	alloc r2=ar.pfs,0,0,0,0			// alloc null frame
   5.439 +	adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp
   5.440 +	adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp
   5.441 +	;;
   5.442 +	ld8 r17=[r16]
   5.443 +	ld8 r16=[r18]			// get new rnat
   5.444 +	extr.u r18=r15,3,6	// r18 <- rse_slot_num(bsp0)
   5.445 +	;;
   5.446 +	mov ar.rsc=r17			// put RSE into enforced lazy mode
   5.447 +	shr.u r17=r17,16
   5.448 +	;;
   5.449 +	sub r14=r14,r17		// r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16)
   5.450 +	shr.u r17=r17,3		// r17 <- (sc_loadrs >> 19)
   5.451 +	;;
   5.452 +	loadrs			// restore dirty partition
   5.453 +	extr.u r14=r14,3,6	// r14 <- rse_slot_num(bspstore1)
   5.454 +	;;
   5.455 +	add r14=r14,r17		// r14 <- rse_slot_num(bspstore1) + (sc_loadrs >> 19)
   5.456 +	;;
   5.457 +	shr.u r14=r14,6		// r14 <- (rse_slot_num(bspstore1) + (sc_loadrs >> 19))/0x40
   5.458 +	;;
   5.459 +	sub r14=r14,r17		// r14 <- -rse_num_regs(bspstore1, bsp1)
   5.460 +	movl r17=0x8208208208208209
   5.461 +	;;
   5.462 +	add r18=r18,r14		// r18 (delta) <- rse_slot_num(bsp0) - rse_num_regs(bspstore1,bsp1)
   5.463 +	setf.sig f7=r17
   5.464 +	cmp.lt p7,p0=r14,r0	// p7 <- (r14 < 0)?
   5.465 +	;;
   5.466 +(p7)	adds r18=-62,r18	// delta -= 62
   5.467 +	;;
   5.468 +	setf.sig f6=r18
   5.469 +	;;
   5.470 +	xmpy.h f6=f6,f7
   5.471 +	;;
   5.472 +	getf.sig r17=f6
   5.473 +	;;
   5.474 +	add r17=r17,r18
   5.475 +	shr r18=r18,63
   5.476 +	;;
   5.477 +	shr r17=r17,5
   5.478 +	;;
   5.479 +	sub r17=r17,r18		// r17 = delta/63
   5.480 +	;;
   5.481 +	add r17=r14,r17		// r17 <- delta/63 - rse_num_regs(bspstore1, bsp1)
   5.482 +	;;
   5.483 +	shladd r15=r17,3,r15	// r15 <- bsp0 + 8*(delta/63 - rse_num_regs(bspstore1, bsp1))
   5.484 +	;;
   5.485 +	mov ar.bspstore=r15			// switch back to old register backing store area
   5.486 +	;;
   5.487 +	mov ar.rnat=r16				// restore RNaT
   5.488 +	mov ar.rsc=0xf				// (will be restored later on from sc_ar_rsc)
   5.489 +	// invala not necessary as that will happen when returning to user-mode
   5.490 +	br.cond.sptk back_from_restore_rbs
   5.491 +END(__kernel_sigtramp)
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S	Fri Jul 28 10:51:38 2006 +0100
     6.3 @@ -0,0 +1,117 @@
     6.4 +/*
     6.5 + * Linker script for gate DSO.  The gate pages are an ELF shared object prelinked to its
     6.6 + * virtual address, with only one read-only segment and one execute-only segment (both fit
     6.7 + * in one page).  This script controls its layout.
     6.8 + */
     6.9 +
    6.10 +#include <linux/config.h>
    6.11 +
    6.12 +#include <asm/system.h>
    6.13 +
    6.14 +SECTIONS
    6.15 +{
    6.16 +  . = GATE_ADDR + SIZEOF_HEADERS;
    6.17 +
    6.18 +  .hash				: { *(.hash) }				:readable
    6.19 +  .dynsym			: { *(.dynsym) }
    6.20 +  .dynstr			: { *(.dynstr) }
    6.21 +  .gnu.version			: { *(.gnu.version) }
    6.22 +  .gnu.version_d		: { *(.gnu.version_d) }
    6.23 +  .gnu.version_r		: { *(.gnu.version_r) }
    6.24 +  .dynamic			: { *(.dynamic) }			:readable :dynamic
    6.25 +
    6.26 +  /*
    6.27 +   * This linker script is used both with -r and with -shared.  For the layouts to match,
    6.28 +   * we need to skip more than enough space for the dynamic symbol table et al.  If this
    6.29 +   * amount is insufficient, ld -shared will barf.  Just increase it here.
    6.30 +   */
    6.31 +  . = GATE_ADDR + 0x500;
    6.32 +
    6.33 +  .data.patch			: {
    6.34 +				    __start_gate_mckinley_e9_patchlist = .;
    6.35 +				    *(.data.patch.mckinley_e9)
    6.36 +				    __end_gate_mckinley_e9_patchlist = .;
    6.37 +
    6.38 +				    __start_gate_vtop_patchlist = .;
    6.39 +				    *(.data.patch.vtop)
    6.40 +				    __end_gate_vtop_patchlist = .;
    6.41 +
    6.42 +				    __start_gate_fsyscall_patchlist = .;
    6.43 +				    *(.data.patch.fsyscall_table)
    6.44 +				    __end_gate_fsyscall_patchlist = .;
    6.45 +
    6.46 +				    __start_gate_brl_fsys_bubble_down_patchlist = .;
    6.47 +				    *(.data.patch.brl_fsys_bubble_down)
    6.48 +				    __end_gate_brl_fsys_bubble_down_patchlist = .;
    6.49 +
    6.50 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
    6.51 +				    __start_gate_running_on_xen_patchlist = .;
    6.52 +				    *(.data.patch.running_on_xen)
    6.53 +				    __end_gate_running_on_xen_patchlist = .;
    6.54 +
    6.55 +				    __start_gate_brl_xen_rsm_be_i_patchlist = .;
    6.56 +				    *(.data.patch.brl_xen_rsm_be_i)
    6.57 +				    __end_gate_brl_xen_rsm_be_i_patchlist = .;
    6.58 +
    6.59 +				    __start_gate_brl_xen_get_psr_patchlist = .;
    6.60 +				    *(.data.patch.brl_xen_get_psr)
    6.61 +				    __end_gate_brl_xen_get_psr_patchlist = .;
    6.62 +
    6.63 +				    __start_gate_brl_xen_ssm_i_0_patchlist = .;
    6.64 +				    *(.data.patch.brl_xen_ssm_i_0)
    6.65 +				    __end_gate_brl_xen_ssm_i_0_patchlist = .;
    6.66 +
    6.67 +				    __start_gate_brl_xen_ssm_i_1_patchlist = .;
    6.68 +				    *(.data.patch.brl_xen_ssm_i_1)
    6.69 +				    __end_gate_brl_xen_ssm_i_1_patchlist = .;
    6.70 +#endif
    6.71 +  }									:readable
    6.72 +  .IA_64.unwind_info		: { *(.IA_64.unwind_info*) }
    6.73 +  .IA_64.unwind			: { *(.IA_64.unwind*) }			:readable :unwind
    6.74 +#ifdef HAVE_BUGGY_SEGREL
    6.75 +  .text (GATE_ADDR + PAGE_SIZE)	: { *(.text) *(.text.*) }		:readable
    6.76 +#else
    6.77 +  . = ALIGN (PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1));
    6.78 +  .text				: { *(.text) *(.text.*) }		:epc
    6.79 +#endif
    6.80 +
    6.81 +  /DISCARD/			: {
    6.82 +  	*(.got.plt) *(.got)
    6.83 +	*(.data .data.* .gnu.linkonce.d.*)
    6.84 +	*(.dynbss)
    6.85 +	*(.bss .bss.* .gnu.linkonce.b.*)
    6.86 +	*(__ex_table)
    6.87 +  }
    6.88 +}
    6.89 +
    6.90 +/*
    6.91 + * We must supply the ELF program headers explicitly to get just one
    6.92 + * PT_LOAD segment, and set the flags explicitly to make segments read-only.
    6.93 + */
    6.94 +PHDRS
    6.95 +{
    6.96 +  readable  PT_LOAD	FILEHDR	PHDRS	FLAGS(4);	/* PF_R */
    6.97 +#ifndef HAVE_BUGGY_SEGREL
    6.98 +  epc	    PT_LOAD	FILEHDR PHDRS	FLAGS(1);	/* PF_X */
    6.99 +#endif
   6.100 +  dynamic   PT_DYNAMIC			FLAGS(4);	/* PF_R */
   6.101 +  unwind    0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
   6.102 +}
   6.103 +
   6.104 +/*
   6.105 + * This controls what symbols we export from the DSO.
   6.106 + */
   6.107 +VERSION
   6.108 +{
   6.109 +  LINUX_2.5 {
   6.110 +    global:
   6.111 +	__kernel_syscall_via_break;
   6.112 +	__kernel_syscall_via_epc;
   6.113 +	__kernel_sigtramp;
   6.114 +
   6.115 +    local: *;
   6.116 +  };
   6.117 +}
   6.118 +
   6.119 +/* The ELF entry point can be used to set the AT_SYSINFO value.  */
   6.120 +ENTRY(__kernel_syscall_via_epc)
     7.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c	Thu Jul 27 17:44:14 2006 -0500
     7.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c	Fri Jul 28 10:51:38 2006 +0100
     7.3 @@ -31,6 +31,9 @@
     7.4  #include <linux/smp_lock.h>
     7.5  #include <linux/threads.h>
     7.6  #include <linux/bitops.h>
     7.7 +#ifdef CONFIG_XEN
     7.8 +#include <linux/cpu.h>
     7.9 +#endif
    7.10  
    7.11  #include <asm/delay.h>
    7.12  #include <asm/intrinsics.h>
    7.13 @@ -235,6 +238,9 @@ static struct irqaction ipi_irqaction = 
    7.14  #include <xen/evtchn.h>
    7.15  #include <xen/interface/callback.h>
    7.16  
    7.17 +static DEFINE_PER_CPU(int, timer_irq) = -1;
    7.18 +static DEFINE_PER_CPU(int, ipi_irq) = -1;
    7.19 +static DEFINE_PER_CPU(int, resched_irq) = -1;
    7.20  static char timer_name[NR_CPUS][15];
    7.21  static char ipi_name[NR_CPUS][15];
    7.22  static char resched_name[NR_CPUS][15];
    7.23 @@ -252,6 +258,7 @@ static unsigned short late_irq_cnt = 0;
    7.24  static unsigned short saved_irq_cnt = 0;
    7.25  static int xen_slab_ready = 0;
    7.26  
    7.27 +#ifdef CONFIG_SMP
    7.28  /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
    7.29   * it ends up to issue several memory accesses upon percpu data and
    7.30   * thus adds unnecessary traffic to other paths.
    7.31 @@ -268,6 +275,7 @@ static struct irqaction resched_irqactio
    7.32  	.flags =	SA_INTERRUPT,
    7.33  	.name =		"RESCHED"
    7.34  };
    7.35 +#endif
    7.36  
    7.37  /*
    7.38   * This is xen version percpu irq registration, which needs bind
    7.39 @@ -294,6 +302,7 @@ xen_register_percpu_irq (unsigned int ir
    7.40  			ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
    7.41  				action->handler, action->flags,
    7.42  				timer_name[cpu], action->dev_id);
    7.43 +			per_cpu(timer_irq,cpu) = ret;
    7.44  			printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
    7.45  			break;
    7.46  		case IA64_IPI_RESCHEDULE:
    7.47 @@ -301,6 +310,7 @@ xen_register_percpu_irq (unsigned int ir
    7.48  			ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
    7.49  				action->handler, action->flags,
    7.50  				resched_name[cpu], action->dev_id);
    7.51 +			per_cpu(resched_irq,cpu) = ret;
    7.52  			printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
    7.53  			break;
    7.54  		case IA64_IPI_VECTOR:
    7.55 @@ -308,6 +318,7 @@ xen_register_percpu_irq (unsigned int ir
    7.56  			ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
    7.57  				action->handler, action->flags,
    7.58  				ipi_name[cpu], action->dev_id);
    7.59 +			per_cpu(ipi_irq,cpu) = ret;
    7.60  			printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
    7.61  			break;
    7.62  		case IA64_SPURIOUS_INT_VECTOR:
    7.63 @@ -343,7 +354,7 @@ xen_bind_early_percpu_irq (void)
    7.64  	 */
    7.65  	for (i = 0; i < late_irq_cnt; i++)
    7.66  		xen_register_percpu_irq(saved_percpu_irqs[i].irq,
    7.67 -			saved_percpu_irqs[i].action, 0);
    7.68 +		                        saved_percpu_irqs[i].action, 0);
    7.69  }
    7.70  
    7.71  /* FIXME: There's no obvious point to check whether slab is ready. So
    7.72 @@ -353,6 +364,38 @@ extern void (*late_time_init)(void);
    7.73  extern char xen_event_callback;
    7.74  extern void xen_init_IRQ(void);
    7.75  
    7.76 +#ifdef CONFIG_HOTPLUG_CPU
    7.77 +static int __devinit
    7.78 +unbind_evtchn_callback(struct notifier_block *nfb,
    7.79 +                       unsigned long action, void *hcpu)
    7.80 +{
    7.81 +	unsigned int cpu = (unsigned long)hcpu;
    7.82 +
    7.83 +	if (action == CPU_DEAD) {
    7.84 +		/* Unregister evtchn.  */
    7.85 +		if (per_cpu(ipi_irq,cpu) >= 0) {
    7.86 +			unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
    7.87 +			per_cpu(ipi_irq, cpu) = -1;
    7.88 +		}
    7.89 +		if (per_cpu(resched_irq,cpu) >= 0) {
    7.90 +			unbind_from_irqhandler (per_cpu(resched_irq, cpu),
    7.91 +						NULL);
    7.92 +			per_cpu(resched_irq, cpu) = -1;
    7.93 +		}
    7.94 +		if (per_cpu(timer_irq,cpu) >= 0) {
    7.95 +			unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
    7.96 +			per_cpu(timer_irq, cpu) = -1;
    7.97 +		}
    7.98 +	}
    7.99 +	return NOTIFY_OK;
   7.100 +}
   7.101 +
   7.102 +static struct notifier_block unbind_evtchn_notifier = {
   7.103 +	.notifier_call = unbind_evtchn_callback,
   7.104 +	.priority = 0
   7.105 +};
   7.106 +#endif
   7.107 +
   7.108  DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
   7.109  void xen_smp_intr_init(void)
   7.110  {
   7.111 @@ -363,21 +406,22 @@ void xen_smp_intr_init(void)
   7.112  		.type = CALLBACKTYPE_event,
   7.113  		.address = (unsigned long)&xen_event_callback,
   7.114  	};
   7.115 -	static cpumask_t registered_cpumask;
   7.116  
   7.117 -	if (!cpu)
   7.118 +	if (cpu == 0) {
   7.119 +		/* Initialization was already done for boot cpu.  */
   7.120 +#ifdef CONFIG_HOTPLUG_CPU
   7.121 +		/* Register the notifier only once.  */
   7.122 +		register_cpu_notifier(&unbind_evtchn_notifier);
   7.123 +#endif
   7.124  		return;
   7.125 +	}
   7.126  
   7.127  	/* This should be piggyback when setup vcpu guest context */
   7.128  	BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
   7.129  
   7.130 -	if (!cpu_isset(cpu, registered_cpumask)) {
   7.131 -		cpu_set(cpu, registered_cpumask);
   7.132 -		for (i = 0; i < saved_irq_cnt; i++)
   7.133 -			xen_register_percpu_irq(saved_percpu_irqs[i].irq,
   7.134 -						saved_percpu_irqs[i].action,
   7.135 -						0);
   7.136 -	}
   7.137 +	for (i = 0; i < saved_irq_cnt; i++)
   7.138 +		xen_register_percpu_irq(saved_percpu_irqs[i].irq,
   7.139 +		                        saved_percpu_irqs[i].action, 0);
   7.140  #endif /* CONFIG_SMP */
   7.141  }
   7.142  #endif /* CONFIG_XEN */
   7.143 @@ -388,12 +432,13 @@ register_percpu_irq (ia64_vector vec, st
   7.144  	irq_desc_t *desc;
   7.145  	unsigned int irq;
   7.146  
   7.147 +#ifdef CONFIG_XEN
   7.148 +	if (is_running_on_xen())
   7.149 +		return xen_register_percpu_irq(vec, action, 1);
   7.150 +#endif
   7.151 +
   7.152  	for (irq = 0; irq < NR_IRQS; ++irq)
   7.153  		if (irq_to_vector(irq) == vec) {
   7.154 -#ifdef CONFIG_XEN
   7.155 -			if (is_running_on_xen())
   7.156 -				return xen_register_percpu_irq(vec, action, 1);
   7.157 -#endif
   7.158  			desc = irq_descp(irq);
   7.159  			desc->status |= IRQ_PER_CPU;
   7.160  			desc->handler = &irq_type_ia64_lsapic;
   7.161 @@ -441,6 +486,7 @@ ia64_send_ipi (int cpu, int vector, int 
   7.162          if (is_running_on_xen()) {
   7.163  		int irq = -1;
   7.164  
   7.165 +#ifdef CONFIG_SMP
   7.166  		/* TODO: we need to call vcpu_up here */
   7.167  		if (unlikely(vector == ap_wakeup_vector)) {
   7.168  			extern void xen_send_ipi (int cpu, int vec);
   7.169 @@ -448,6 +494,7 @@ ia64_send_ipi (int cpu, int vector, int 
   7.170  			//vcpu_prepare_and_up(cpu);
   7.171  			return;
   7.172  		}
   7.173 +#endif
   7.174  
   7.175  		switch(vector) {
   7.176  		case IA64_IPI_VECTOR:
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/patch.c	Fri Jul 28 10:51:38 2006 +0100
     8.3 @@ -0,0 +1,268 @@
     8.4 +/*
     8.5 + * Instruction-patching support.
     8.6 + *
     8.7 + * Copyright (C) 2003 Hewlett-Packard Co
     8.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     8.9 + */
    8.10 +#include <linux/init.h>
    8.11 +#include <linux/string.h>
    8.12 +
    8.13 +#include <asm/patch.h>
    8.14 +#include <asm/processor.h>
    8.15 +#include <asm/sections.h>
    8.16 +#include <asm/system.h>
    8.17 +#include <asm/unistd.h>
    8.18 +
    8.19 +/*
    8.20 + * This was adapted from code written by Tony Luck:
    8.21 + *
    8.22 + * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
    8.23 + * like this:
    8.24 + *
    8.25 + * 6  6         5         4         3         2         1
    8.26 + * 3210987654321098765432109876543210987654321098765432109876543210
    8.27 + * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
    8.28 + *
    8.29 + * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    8.30 + * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
    8.31 + */
    8.32 +static u64
    8.33 +get_imm64 (u64 insn_addr)
    8.34 +{
    8.35 +	u64 *p = (u64 *) (insn_addr & -16);	/* mask out slot number */
    8.36 +
    8.37 +	return ( (p[1] & 0x0800000000000000UL) << 4)  | /*A*/
    8.38 +		((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
    8.39 +		((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
    8.40 +		((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
    8.41 +		((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
    8.42 +		((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
    8.43 +		((p[1] & 0x000007f000000000UL) >> 36);  /*G*/
    8.44 +}
    8.45 +
    8.46 +/* Patch instruction with "val" where "mask" has 1 bits. */
    8.47 +void
    8.48 +ia64_patch (u64 insn_addr, u64 mask, u64 val)
    8.49 +{
    8.50 +	u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
    8.51 +#	define insn_mask ((1UL << 41) - 1)
    8.52 +	unsigned long shift;
    8.53 +
    8.54 +	b0 = b[0]; b1 = b[1];
    8.55 +	shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */
    8.56 +	if (shift >= 64) {
    8.57 +		m1 = mask << (shift - 64);
    8.58 +		v1 = val << (shift - 64);
    8.59 +	} else {
    8.60 +		m0 = mask << shift; m1 = mask >> (64 - shift);
    8.61 +		v0 = val  << shift; v1 = val >> (64 - shift);
    8.62 +		b[0] = (b0 & ~m0) | (v0 & m0);
    8.63 +	}
    8.64 +	b[1] = (b1 & ~m1) | (v1 & m1);
    8.65 +}
    8.66 +
    8.67 +void
    8.68 +ia64_patch_imm64 (u64 insn_addr, u64 val)
    8.69 +{
    8.70 +	/* The assembler may generate offset pointing to either slot 1
    8.71 +	   or slot 2 for a long (2-slot) instruction, occupying slots 1
    8.72 +	   and 2.  */
    8.73 +  	insn_addr &= -16UL;
    8.74 +	ia64_patch(insn_addr + 2,
    8.75 +		   0x01fffefe000UL, (  ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
    8.76 +				     | ((val & 0x0000000000200000UL) <<  0) /* bit 21 -> 21 */
    8.77 +				     | ((val & 0x00000000001f0000UL) <<  6) /* bit 16 -> 22 */
    8.78 +				     | ((val & 0x000000000000ff80UL) << 20) /* bit  7 -> 27 */
    8.79 +				     | ((val & 0x000000000000007fUL) << 13) /* bit  0 -> 13 */));
    8.80 +	ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
    8.81 +}
    8.82 +
    8.83 +void
    8.84 +ia64_patch_imm60 (u64 insn_addr, u64 val)
    8.85 +{
    8.86 +	/* The assembler may generate offset pointing to either slot 1
    8.87 +	   or slot 2 for a long (2-slot) instruction, occupying slots 1
    8.88 +	   and 2.  */
    8.89 +  	insn_addr &= -16UL;
    8.90 +	ia64_patch(insn_addr + 2,
    8.91 +		   0x011ffffe000UL, (  ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
    8.92 +				     | ((val & 0x00000000000fffffUL) << 13) /* bit  0 -> 13 */));
    8.93 +	ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18);
    8.94 +}
    8.95 +
    8.96 +/*
    8.97 + * We need sometimes to load the physical address of a kernel
    8.98 + * object.  Often we can convert the virtual address to physical
    8.99 + * at execution time, but sometimes (either for performance reasons
   8.100 + * or during error recovery) we cannot to this.  Patch the marked
   8.101 + * bundles to load the physical address.
   8.102 + */
   8.103 +void __init
   8.104 +ia64_patch_vtop (unsigned long start, unsigned long end)
   8.105 +{
   8.106 +	s32 *offp = (s32 *) start;
   8.107 +	u64 ip;
   8.108 +
   8.109 +	while (offp < (s32 *) end) {
   8.110 +		ip = (u64) offp + *offp;
   8.111 +
   8.112 +		/* replace virtual address with corresponding physical address: */
   8.113 +		ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
   8.114 +		ia64_fc((void *) ip);
   8.115 +		++offp;
   8.116 +	}
   8.117 +	ia64_sync_i();
   8.118 +	ia64_srlz_i();
   8.119 +}
   8.120 +
   8.121 +void
   8.122 +ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
   8.123 +{
   8.124 +	static int first_time = 1;
   8.125 +	int need_workaround;
   8.126 +	s32 *offp = (s32 *) start;
   8.127 +	u64 *wp;
   8.128 +
   8.129 +	need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0);
   8.130 +
   8.131 +	if (first_time) {
   8.132 +		first_time = 0;
   8.133 +		if (need_workaround)
   8.134 +			printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n");
   8.135 +		else
   8.136 +			printk(KERN_INFO "McKinley Errata 9 workaround not needed; "
   8.137 +			       "disabling it\n");
   8.138 +	}
   8.139 +	if (need_workaround)
   8.140 +		return;
   8.141 +
   8.142 +	while (offp < (s32 *) end) {
   8.143 +		wp = (u64 *) ia64_imva((char *) offp + *offp);
   8.144 +		wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
   8.145 +		wp[1] = 0x0004000000000200UL;
   8.146 +		wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
   8.147 +		wp[3] = 0x0084006880000200UL;
   8.148 +		ia64_fc(wp); ia64_fc(wp + 2);
   8.149 +		++offp;
   8.150 +	}
   8.151 +	ia64_sync_i();
   8.152 +	ia64_srlz_i();
   8.153 +}
   8.154 +
   8.155 +static void
   8.156 +patch_fsyscall_table (unsigned long start, unsigned long end)
   8.157 +{
   8.158 +	extern unsigned long fsyscall_table[NR_syscalls];
   8.159 +	s32 *offp = (s32 *) start;
   8.160 +	u64 ip;
   8.161 +
   8.162 +	while (offp < (s32 *) end) {
   8.163 +		ip = (u64) ia64_imva((char *) offp + *offp);
   8.164 +		ia64_patch_imm64(ip, (u64) fsyscall_table);
   8.165 +		ia64_fc((void *) ip);
   8.166 +		++offp;
   8.167 +	}
   8.168 +	ia64_sync_i();
   8.169 +	ia64_srlz_i();
   8.170 +}
   8.171 +
   8.172 +static void
   8.173 +patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
   8.174 +{
   8.175 +	extern char fsys_bubble_down[];
   8.176 +	s32 *offp = (s32 *) start;
   8.177 +	u64 ip;
   8.178 +
   8.179 +	while (offp < (s32 *) end) {
   8.180 +		ip = (u64) offp + *offp;
   8.181 +		ia64_patch_imm60((u64) ia64_imva((void *) ip),
   8.182 +				 (u64) (fsys_bubble_down - (ip & -16)) / 16);
   8.183 +		ia64_fc((void *) ip);
   8.184 +		++offp;
   8.185 +	}
   8.186 +	ia64_sync_i();
   8.187 +	ia64_srlz_i();
   8.188 +}
   8.189 +
   8.190 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
   8.191 +extern char __start_gate_running_on_xen_patchlist[];
   8.192 +extern char __end_gate_running_on_xen_patchlist[];
   8.193 +
   8.194 +void
   8.195 +patch_running_on_xen(unsigned long start, unsigned long end)
   8.196 +{
   8.197 +	extern int running_on_xen;
   8.198 +	s32 *offp = (s32 *)start;
   8.199 +	u64 ip;
   8.200 +
   8.201 +	while (offp < (s32 *)end) {
   8.202 +		ip = (u64)ia64_imva((char *)offp + *offp);
   8.203 +		ia64_patch_imm64(ip, (u64)&running_on_xen);
   8.204 +		ia64_fc((void *)ip);
   8.205 +		++offp;
   8.206 +	}
   8.207 +	ia64_sync_i();
   8.208 +	ia64_srlz_i();
   8.209 +}
   8.210 +
   8.211 +static void
   8.212 +patch_brl_symaddr(unsigned long start, unsigned long end,
   8.213 +                  unsigned long symaddr)
   8.214 +{
   8.215 +	s32 *offp = (s32 *)start;
   8.216 +	u64 ip;
   8.217 +
   8.218 +	while (offp < (s32 *)end) {
   8.219 +		ip = (u64)offp + *offp;
   8.220 +		ia64_patch_imm60((u64)ia64_imva((void *)ip),
   8.221 +				 (u64)(symaddr - (ip & -16)) / 16);
   8.222 +		ia64_fc((void *)ip);
   8.223 +		++offp;
   8.224 +	}
   8.225 +	ia64_sync_i();
   8.226 +	ia64_srlz_i();
   8.227 +}
   8.228 +
   8.229 +#define EXTERN_PATCHLIST(name)					\
   8.230 +	extern char __start_gate_brl_##name##_patchlist[];	\
   8.231 +	extern char __end_gate_brl_##name##_patchlist[];	\
   8.232 +	extern char name[]
   8.233 +
   8.234 +#define PATCH_BRL_SYMADDR(name)						\
   8.235 +	patch_brl_symaddr((unsigned long)__start_gate_brl_##name##_patchlist, \
   8.236 +	                  (unsigned long)__end_gate_brl_##name##_patchlist,   \
   8.237 +	                  (unsigned long)name)
   8.238 +
   8.239 +static void
   8.240 +patch_brl_in_vdso(void)
   8.241 +{
   8.242 +	EXTERN_PATCHLIST(xen_rsm_be_i);
   8.243 +	EXTERN_PATCHLIST(xen_get_psr);
   8.244 +	EXTERN_PATCHLIST(xen_ssm_i_0);
   8.245 +	EXTERN_PATCHLIST(xen_ssm_i_1);
   8.246 +
   8.247 +	PATCH_BRL_SYMADDR(xen_rsm_be_i);
   8.248 +	PATCH_BRL_SYMADDR(xen_get_psr);
   8.249 +	PATCH_BRL_SYMADDR(xen_ssm_i_0);
   8.250 +	PATCH_BRL_SYMADDR(xen_ssm_i_1);
   8.251 +}
   8.252 +#else
   8.253 +#define patch_running_on_xen(start, end)	do { } while (0)
   8.254 +#define patch_brl_in_vdso()			do { } while (0)
   8.255 +#endif
   8.256 +
   8.257 +void
   8.258 +ia64_patch_gate (void)
   8.259 +{
   8.260 +#	define START(name)	((unsigned long) __start_gate_##name##_patchlist)
   8.261 +#	define END(name)	((unsigned long)__end_gate_##name##_patchlist)
   8.262 +
   8.263 +	patch_fsyscall_table(START(fsyscall), END(fsyscall));
   8.264 +	patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
   8.265 +#ifdef CONFIG_XEN
   8.266 +	patch_running_on_xen(START(running_on_xen), END(running_on_xen));
   8.267 +	patch_brl_in_vdso();
   8.268 +#endif
   8.269 +	ia64_patch_vtop(START(vtop), END(vtop));
   8.270 +	ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
   8.271 +}
     9.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Thu Jul 27 17:44:14 2006 -0500
     9.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Fri Jul 28 10:51:38 2006 +0100
     9.3 @@ -75,6 +75,20 @@ unsigned long __per_cpu_offset[NR_CPUS];
     9.4  EXPORT_SYMBOL(__per_cpu_offset);
     9.5  #endif
     9.6  
     9.7 +#ifdef CONFIG_XEN
     9.8 +static int
     9.9 +xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
    9.10 +{
    9.11 +	HYPERVISOR_shutdown(SHUTDOWN_crash);
    9.12 +	/* we're never actually going to get here... */
    9.13 +	return NOTIFY_DONE;
    9.14 +}
    9.15 +
    9.16 +static struct notifier_block xen_panic_block = {
    9.17 +	xen_panic_event, NULL, 0 /* try to go last */
    9.18 +};
    9.19 +#endif
    9.20 +
    9.21  extern void ia64_setup_printk_clock(void);
    9.22  
    9.23  DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
    9.24 @@ -418,8 +432,11 @@ setup_arch (char **cmdline_p)
    9.25  	unw_init();
    9.26  
    9.27  #ifdef CONFIG_XEN
    9.28 -	if (is_running_on_xen())
    9.29 +	if (is_running_on_xen()) {
    9.30  		setup_xen_features();
    9.31 +		/* Register a call for panic conditions. */
    9.32 +		notifier_chain_register(&panic_notifier_list, &xen_panic_block);
    9.33 +	}
    9.34  #endif
    9.35  
    9.36  	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
    9.37 @@ -523,15 +540,14 @@ setup_arch (char **cmdline_p)
    9.38  		shared_info_t *s = HYPERVISOR_shared_info;
    9.39  
    9.40  		xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
    9.41 -		xen_start_info->flags = s->arch.flags;
    9.42  
    9.43  		printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%ld "
    9.44  		       "flags=0x%x\n", s->arch.start_info_pfn,
    9.45  		       xen_start_info->nr_pages, xen_start_info->flags);
    9.46  
    9.47  		/* xen_start_info isn't setup yet, get the flags manually */
    9.48 -		if (s->arch.flags & SIF_INITDOMAIN) {
    9.49 -			if (!(s->arch.flags & SIF_PRIVILEGED))
    9.50 +		if (xen_start_info->flags & SIF_INITDOMAIN) {
    9.51 +			if (!(xen_start_info->flags & SIF_PRIVILEGED))
    9.52  				panic("Xen granted us console access "
    9.53  				      "but not privileged status");
    9.54  		} else {
    10.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S	Thu Jul 27 17:44:14 2006 -0500
    10.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S	Fri Jul 28 10:51:38 2006 +0100
    10.3 @@ -351,3 +351,59 @@ GLOBAL_ENTRY(xen_send_ipi)
    10.4          br.ret.sptk.many rp
    10.5          ;;
    10.6  END(xen_send_ipi)
    10.7 +
    10.8 +#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
    10.9 +// Those are vdso specialized.
   10.10 +// In fsys mode, call, ret can't be used.
   10.11 +GLOBAL_ENTRY(xen_rsm_be_i)
   10.12 +	ld8 r22=[r22]
   10.13 +	;; 
   10.14 +	st1 [r22]=r20
   10.15 +	st4 [r23]=r0
   10.16 +	XEN_HYPER_RSM_BE
   10.17 +	st4 [r23]=r20
   10.18 +	brl.cond.sptk	.vdso_rsm_be_i_ret
   10.19 +	;; 
   10.20 +END(xen_rsm_be_i)
   10.21 +
   10.22 +GLOBAL_ENTRY(xen_get_psr)
   10.23 +	mov r31=r8
   10.24 +	mov r25=IA64_PSR_IC
   10.25 +	st4 [r23]=r0
   10.26 +	XEN_HYPER_GET_PSR
   10.27 +	;; 
   10.28 +	st4 [r23]=r20
   10.29 +	or r29=r8,r25 // vpsr.ic was cleared for hyperprivop
   10.30 +	mov r8=r31
   10.31 +	brl.cond.sptk	.vdso_get_psr_ret
   10.32 +	;; 
   10.33 +END(xen_get_psr)
   10.34 +
   10.35 +GLOBAL_ENTRY(xen_ssm_i_0)
   10.36 +	st4 [r22]=r20
   10.37 +	ld4 r25=[r24]
   10.38 +	;;
   10.39 +	cmp.ne.unc p11,p0=r0, r25
   10.40 +	;; 
   10.41 +(p11)	st4 [r22]=r0
   10.42 +(p11)	st4 [r23]=r0
   10.43 +(p11)	XEN_HYPER_SSM_I
   10.44 +	
   10.45 +	brl.cond.sptk	.vdso_ssm_i_0_ret
   10.46 +	;; 
   10.47 +END(xen_ssm_i_0)
   10.48 +
   10.49 +GLOBAL_ENTRY(xen_ssm_i_1)
   10.50 +	st4 [r22]=r20
   10.51 +	ld4 r25=[r24]
   10.52 +	;; 
   10.53 +	cmp.ne.unc p11,p0=r0, r25
   10.54 +	;; 
   10.55 +(p11)	st4 [r22]=r0
   10.56 +(p11)	st4 [r23]=r0
   10.57 +(p11)	XEN_HYPER_SSM_I
   10.58 +	;;
   10.59 +	brl.cond.sptk	.vdso_ssm_i_1_ret
   10.60 +	;; 
   10.61 +END(xen_ssm_i_1)
   10.62 +#endif
    11.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Thu Jul 27 17:44:14 2006 -0500
    11.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Fri Jul 28 10:51:38 2006 +0100
    11.3 @@ -198,7 +198,7 @@ int
    11.4  		.nr_exchanged = 0
    11.5  	};
    11.6  
    11.7 -	if (order > MAX_CONTIG_ORDER)
    11.8 +	if (unlikely(order > MAX_CONTIG_ORDER))
    11.9  		return -ENOMEM;
   11.10  	
   11.11  	set_xen_guest_handle(exchange.in.extent_start, in_frames);
   11.12 @@ -299,7 +299,7 @@ void
   11.13  	if (!test_bit(start_gpfn, contiguous_bitmap))
   11.14  		return;
   11.15  
   11.16 -	if (order > MAX_CONTIG_ORDER)
   11.17 +	if (unlikely(order > MAX_CONTIG_ORDER))
   11.18  		return;
   11.19  
   11.20  	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
   11.21 @@ -547,8 +547,10 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
   11.22  	unsigned long gpfn;
   11.23  	unsigned long flags;
   11.24  
   11.25 -	BUG_ON((addr & ~PAGE_MASK) != 0);
   11.26 -	BUG_ON(mfn == INVALID_MFN);
   11.27 +	if ((addr & ~PAGE_MASK) != 0 || mfn == INVALID_MFN) {
   11.28 +		error = -EINVAL;
   11.29 +		goto out;
   11.30 +	}
   11.31  
   11.32  	if (entry->gpfn != INVALID_GPFN) {
   11.33  		error = -EBUSY;
   11.34 @@ -793,3 +795,13 @@ direct_remap_pfn_range(struct vm_area_st
   11.35  	return error;
   11.36  }
   11.37  
   11.38 +
   11.39 +/* Called after suspend, to resume time.  */
   11.40 +void
   11.41 +time_resume(void)
   11.42 +{
   11.43 +	extern void ia64_cpu_local_tick(void);
   11.44 +
   11.45 +	/* Just trigger a tick.  */
   11.46 +	ia64_cpu_local_tick();
   11.47 +}
    12.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/util.c	Thu Jul 27 17:44:14 2006 -0500
    12.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/util.c	Fri Jul 28 10:51:38 2006 +0100
    12.3 @@ -71,6 +71,9 @@ void free_vm_area(struct vm_struct *area
    12.4  	unsigned int order = get_order(area->size);
    12.5  	unsigned long i;
    12.6  
    12.7 +	/* xenbus_map_ring_valloc overrides this field!  */
    12.8 +	area->phys_addr = __pa(area->addr);
    12.9 +
   12.10  	// This area is used for foreign page mappping.
   12.11  	// So underlying machine page may not be assigned.
   12.12  	for (i = 0; i < (1 << order); i++) {
    13.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S	Thu Jul 27 17:44:14 2006 -0500
    13.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S	Fri Jul 28 10:51:38 2006 +0100
    13.3 @@ -33,3 +33,23 @@ GLOBAL_ENTRY(early_xen_setup)
    13.4  	br.ret.sptk.many rp
    13.5  	;;
    13.6  END(early_xen_setup)
    13.7 +
    13.8 +#include <xen/interface/xen.h>
    13.9 +
   13.10 +/* Stub for suspend.
   13.11 +   Just force the stacked registers to be written in memory.  */	
   13.12 +GLOBAL_ENTRY(HYPERVISOR_suspend)
   13.13 +	alloc r20=ar.pfs,0,0,0,0
   13.14 +	mov r14=2
   13.15 +	mov r15=r12
   13.16 +	;;
   13.17 +	/* We don't want to deal with RSE.  */
   13.18 +	flushrs
   13.19 +	mov r2=__HYPERVISOR_sched_op
   13.20 +	st4 [r12]=r14
   13.21 +	;;
   13.22 +	break 0x1000
   13.23 +	;; 
   13.24 +	mov ar.pfs=r20
   13.25 +	br.ret.sptk.many b0
   13.26 +END(HYPERVISOR_suspend)
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Thu Jul 27 17:44:14 2006 -0500
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Fri Jul 28 10:51:38 2006 +0100
    14.3 @@ -39,6 +39,7 @@ extern void ctrl_alt_del(void);
    14.4   */
    14.5  #define SHUTDOWN_HALT      4
    14.6  
    14.7 +#if defined(__i386__) || defined(__x86_64__)
    14.8  void machine_emergency_restart(void)
    14.9  {
   14.10  	/* We really want to get pending console data out before we die. */
   14.11 @@ -60,10 +61,8 @@ void machine_power_off(void)
   14.12  {
   14.13  	/* We really want to get pending console data out before we die. */
   14.14  	xencons_force_flush();
   14.15 -#if defined(__i386__) || defined(__x86_64__)
   14.16  	if (pm_power_off)
   14.17  		pm_power_off();
   14.18 -#endif
   14.19  	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
   14.20  }
   14.21  
   14.22 @@ -71,7 +70,7 @@ int reboot_thru_bios = 0;	/* for dmi_sca
   14.23  EXPORT_SYMBOL(machine_restart);
   14.24  EXPORT_SYMBOL(machine_halt);
   14.25  EXPORT_SYMBOL(machine_power_off);
   14.26 -
   14.27 +#endif
   14.28  
   14.29  /******************************************************************************
   14.30   * Stop/pickle callback handling.
   14.31 @@ -82,6 +81,7 @@ static int shutting_down = SHUTDOWN_INVA
   14.32  static void __shutdown_handler(void *unused);
   14.33  static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
   14.34  
   14.35 +#if defined(__i386__) || defined(__x86_64__)
   14.36  /* Ensure we run on the idle task page tables so that we will
   14.37     switch page tables before running user space. This is needed
   14.38     on architectures with separate kernel and user page tables
   14.39 @@ -98,25 +98,30 @@ static void switch_idle_mm(void)
   14.40  	current->active_mm = &init_mm;
   14.41  	mmdrop(mm);
   14.42  }
   14.43 +#endif
   14.44  
   14.45  static int __do_suspend(void *ignore)
   14.46  {
   14.47 -	int i, j, k, fpp, err;
   14.48 -
   14.49 +	int err;
   14.50 +#if defined(__i386__) || defined(__x86_64__)
   14.51 +	int i, j, k, fpp;
   14.52  	extern unsigned long max_pfn;
   14.53  	extern unsigned long *pfn_to_mfn_frame_list_list;
   14.54  	extern unsigned long *pfn_to_mfn_frame_list[];
   14.55 +#endif
   14.56  
   14.57  	extern void time_resume(void);
   14.58  
   14.59  	BUG_ON(smp_processor_id() != 0);
   14.60  	BUG_ON(in_interrupt());
   14.61  
   14.62 +#if defined(__i386__) || defined(__x86_64__)
   14.63  	if (xen_feature(XENFEAT_auto_translated_physmap)) {
   14.64  		printk(KERN_WARNING "Cannot suspend in "
   14.65  		       "auto_translated_physmap mode.\n");
   14.66  		return -EOPNOTSUPP;
   14.67  	}
   14.68 +#endif
   14.69  
   14.70  	err = smp_suspend();
   14.71  	if (err)
   14.72 @@ -129,18 +134,24 @@ static int __do_suspend(void *ignore)
   14.73  #ifdef __i386__
   14.74  	kmem_cache_shrink(pgd_cache);
   14.75  #endif
   14.76 +#if defined(__i386__) || defined(__x86_64__)
   14.77  	mm_pin_all();
   14.78  
   14.79  	__cli();
   14.80 +#elif defined(__ia64__)
   14.81 +	local_irq_disable();
   14.82 +#endif
   14.83  	preempt_enable();
   14.84  
   14.85  	gnttab_suspend();
   14.86  
   14.87 +#if defined(__i386__) || defined(__x86_64__)
   14.88  	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
   14.89  	clear_fixmap(FIX_SHARED_INFO);
   14.90  
   14.91  	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
   14.92  	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
   14.93 +#endif
   14.94  
   14.95  	/*
   14.96  	 * We'll stop somewhere inside this hypercall. When it returns,
   14.97 @@ -150,6 +161,7 @@ static int __do_suspend(void *ignore)
   14.98  
   14.99  	shutting_down = SHUTDOWN_INVALID;
  14.100  
  14.101 +#if defined(__i386__) || defined(__x86_64__)
  14.102  	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
  14.103  
  14.104  	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
  14.105 @@ -171,6 +183,7 @@ static int __do_suspend(void *ignore)
  14.106  			virt_to_mfn(&phys_to_machine_mapping[i]);
  14.107  	}
  14.108  	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
  14.109 +#endif
  14.110  
  14.111  	gnttab_resume();
  14.112  
  14.113 @@ -178,9 +191,13 @@ static int __do_suspend(void *ignore)
  14.114  
  14.115  	time_resume();
  14.116  
  14.117 +#if defined(__i386__) || defined(__x86_64__)
  14.118  	switch_idle_mm();
  14.119  
  14.120  	__sti();
  14.121 +#elif defined(__ia64__)
  14.122 +	local_irq_enable();
  14.123 +#endif
  14.124  
  14.125  	xencons_resume();
  14.126  
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Jul 27 17:44:14 2006 -0500
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Jul 28 10:51:38 2006 +0100
    15.3 @@ -99,24 +99,21 @@ static spinlock_t net_schedule_list_lock
    15.4  #define MAX_MFN_ALLOC 64
    15.5  static unsigned long mfn_list[MAX_MFN_ALLOC];
    15.6  static unsigned int alloc_index = 0;
    15.7 -static DEFINE_SPINLOCK(mfn_lock);
    15.8  
    15.9  static unsigned long alloc_mfn(void)
   15.10  {
   15.11 -	unsigned long mfn = 0, flags;
   15.12 +	unsigned long mfn = 0;
   15.13  	struct xen_memory_reservation reservation = {
   15.14  		.nr_extents   = MAX_MFN_ALLOC,
   15.15  		.extent_order = 0,
   15.16  		.domid        = DOMID_SELF
   15.17  	};
   15.18  	set_xen_guest_handle(reservation.extent_start, mfn_list);
   15.19 -	spin_lock_irqsave(&mfn_lock, flags);
   15.20  	if ( unlikely(alloc_index == 0) )
   15.21  		alloc_index = HYPERVISOR_memory_op(
   15.22  			XENMEM_increase_reservation, &reservation);
   15.23  	if ( alloc_index != 0 )
   15.24  		mfn = mfn_list[--alloc_index];
   15.25 -	spin_unlock_irqrestore(&mfn_lock, flags);
   15.26  	return mfn;
   15.27  }
   15.28  
   15.29 @@ -222,9 +219,13 @@ static void net_rx_action(unsigned long 
   15.30  	unsigned long vdata, old_mfn, new_mfn;
   15.31  	struct sk_buff_head rxq;
   15.32  	struct sk_buff *skb;
   15.33 -	u16 notify_list[NET_RX_RING_SIZE];
   15.34  	int notify_nr = 0;
   15.35  	int ret;
   15.36 +	/*
   15.37 +	 * Putting hundreds of bytes on the stack is considered rude.
   15.38 +	 * Static works because a tasklet can only be on one CPU at any time.
   15.39 +	 */
   15.40 +	static u16 notify_list[NET_RX_RING_SIZE];
   15.41  
   15.42  	skb_queue_head_init(&rxq);
   15.43  
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Jul 27 17:44:14 2006 -0500
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Jul 28 10:51:38 2006 +0100
    16.3 @@ -788,6 +788,8 @@ static int network_start_xmit(struct sk_
    16.4  
    16.5  		gso->u.gso.size = skb_shinfo(skb)->gso_size;
    16.6  		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
    16.7 +		gso->u.gso.pad = 0;
    16.8 +		gso->u.gso.features = 0;
    16.9  
   16.10  		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
   16.11  		gso->flags = 0;
    17.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h	Thu Jul 27 17:44:14 2006 -0500
    17.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h	Fri Jul 28 10:51:38 2006 +0100
    17.3 @@ -302,23 +302,7 @@ HYPERVISOR_vcpu_op(
    17.4      return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
    17.5  }
    17.6  
    17.7 -static inline int
    17.8 -HYPERVISOR_suspend(
    17.9 -	unsigned long srec)
   17.10 -{
   17.11 -	struct sched_shutdown sched_shutdown = {
   17.12 -		.reason = SHUTDOWN_suspend
   17.13 -	};
   17.14 -
   17.15 -	int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
   17.16 -			     &sched_shutdown, srec);
   17.17 -
   17.18 -	if (rc == -ENOSYS)
   17.19 -		rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
   17.20 -				 SHUTDOWN_suspend, srec);
   17.21 -
   17.22 -	return rc;
   17.23 -}
   17.24 +extern int HYPERVISOR_suspend(unsigned long srec);
   17.25  
   17.26  static inline int
   17.27  HYPERVISOR_callback_op(
    18.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h	Thu Jul 27 17:44:14 2006 -0500
    18.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h	Fri Jul 28 10:51:38 2006 +0100
    18.3 @@ -48,6 +48,8 @@
    18.4  #define	XEN_HYPER_GET_PMD		break HYPERPRIVOP_GET_PMD
    18.5  #define	XEN_HYPER_GET_EFLAG		break HYPERPRIVOP_GET_EFLAG
    18.6  #define	XEN_HYPER_SET_EFLAG		break HYPERPRIVOP_SET_EFLAG
    18.7 +#define	XEN_HYPER_RSM_BE		break HYPERPRIVOP_RSM_BE
    18.8 +#define	XEN_HYPER_GET_PSR		break HYPERPRIVOP_GET_PSR
    18.9  
   18.10  #define XSI_IFS			(XSI_BASE + XSI_IFS_OFS)
   18.11  #define XSI_PRECOVER_IFS	(XSI_BASE + XSI_PRECOVER_IFS_OFS)
    19.1 --- a/tools/ioemu/patches/domain-reset	Thu Jul 27 17:44:14 2006 -0500
    19.2 +++ b/tools/ioemu/patches/domain-reset	Fri Jul 28 10:51:38 2006 +0100
    19.3 @@ -1,7 +1,7 @@
    19.4  Index: ioemu/target-i386-dm/helper2.c
    19.5  ===================================================================
    19.6 ---- ioemu.orig/target-i386-dm/helper2.c	2006-07-12 11:35:00.710827712 +0100
    19.7 -+++ ioemu/target-i386-dm/helper2.c	2006-07-12 11:35:02.419613627 +0100
    19.8 +--- ioemu.orig/target-i386-dm/helper2.c	2006-07-27 11:16:57.527492229 +0100
    19.9 ++++ ioemu/target-i386-dm/helper2.c	2006-07-27 11:16:59.381287013 +0100
   19.10  @@ -123,6 +123,25 @@
   19.11   /* called from main_cpu_reset */
   19.12   void cpu_reset(CPUX86State *env)
   19.13 @@ -41,9 +41,9 @@ Index: ioemu/target-i386-dm/helper2.c
   19.14           /* Wait up to 10 msec. */
   19.15  Index: ioemu/vl.c
   19.16  ===================================================================
   19.17 ---- ioemu.orig/vl.c	2006-07-12 11:35:02.273631916 +0100
   19.18 -+++ ioemu/vl.c	2006-07-12 11:35:02.421613376 +0100
   19.19 -@@ -4411,7 +4411,7 @@
   19.20 +--- ioemu.orig/vl.c	2006-07-27 11:16:59.317294097 +0100
   19.21 ++++ ioemu/vl.c	2006-07-27 11:16:59.384286681 +0100
   19.22 +@@ -4412,7 +4412,7 @@
   19.23   } QEMUResetEntry;
   19.24   
   19.25   static QEMUResetEntry *first_reset_entry;
   19.26 @@ -54,8 +54,8 @@ Index: ioemu/vl.c
   19.27   
   19.28  Index: ioemu/vl.h
   19.29  ===================================================================
   19.30 ---- ioemu.orig/vl.h	2006-07-12 11:35:01.454734511 +0100
   19.31 -+++ ioemu/vl.h	2006-07-12 11:35:02.422613251 +0100
   19.32 +--- ioemu.orig/vl.h	2006-07-27 11:16:58.127425816 +0100
   19.33 ++++ ioemu/vl.h	2006-07-27 11:16:59.384286681 +0100
   19.34  @@ -122,6 +122,7 @@
   19.35   
   19.36   void qemu_register_reset(QEMUResetHandler *func, void *opaque);
    20.1 --- a/tools/ioemu/patches/domain-timeoffset	Thu Jul 27 17:44:14 2006 -0500
    20.2 +++ b/tools/ioemu/patches/domain-timeoffset	Fri Jul 28 10:51:38 2006 +0100
    20.3 @@ -1,7 +1,7 @@
    20.4  Index: ioemu/hw/mc146818rtc.c
    20.5  ===================================================================
    20.6 ---- ioemu.orig/hw/mc146818rtc.c	2006-07-26 15:17:35.110819901 +0100
    20.7 -+++ ioemu/hw/mc146818rtc.c	2006-07-26 15:17:40.292255496 +0100
    20.8 +--- ioemu.orig/hw/mc146818rtc.c	2006-07-27 11:17:18.007225084 +0100
    20.9 ++++ ioemu/hw/mc146818rtc.c	2006-07-27 11:17:48.250876949 +0100
   20.10  @@ -178,10 +178,27 @@
   20.11       }
   20.12   }
   20.13 @@ -46,8 +46,8 @@ Index: ioemu/hw/mc146818rtc.c
   20.14   static void rtc_copy_date(RTCState *s)
   20.15  Index: ioemu/hw/pc.c
   20.16  ===================================================================
   20.17 ---- ioemu.orig/hw/pc.c	2006-07-26 15:17:39.820306906 +0100
   20.18 -+++ ioemu/hw/pc.c	2006-07-26 15:17:40.293255388 +0100
   20.19 +--- ioemu.orig/hw/pc.c	2006-07-27 11:17:47.993905398 +0100
   20.20 ++++ ioemu/hw/pc.c	2006-07-27 11:17:48.251876839 +0100
   20.21  @@ -151,7 +151,7 @@
   20.22   }
   20.23   
   20.24 @@ -117,8 +117,8 @@ Index: ioemu/hw/pc.c
   20.25   QEMUMachine pc_machine = {
   20.26  Index: ioemu/vl.c
   20.27  ===================================================================
   20.28 ---- ioemu.orig/vl.c	2006-07-26 15:17:40.169268893 +0100
   20.29 -+++ ioemu/vl.c	2006-07-26 15:17:40.296255061 +0100
   20.30 +--- ioemu.orig/vl.c	2006-07-27 11:17:48.126890676 +0100
   20.31 ++++ ioemu/vl.c	2006-07-27 11:17:48.254876507 +0100
   20.32  @@ -164,6 +164,8 @@
   20.33   
   20.34   int xc_handle;
   20.35 @@ -128,7 +128,7 @@ Index: ioemu/vl.c
   20.36   char domain_name[1024] = { 'H','V', 'M', 'X', 'E', 'N', '-'};
   20.37   extern int domid;
   20.38   
   20.39 -@@ -4799,6 +4801,7 @@
   20.40 +@@ -4800,6 +4802,7 @@
   20.41   #endif
   20.42              "-loadvm file    start right away with a saved state (loadvm in monitor)\n"
   20.43   	   "-vnc display    start a VNC server on display\n"
   20.44 @@ -136,7 +136,7 @@ Index: ioemu/vl.c
   20.45              "\n"
   20.46              "During emulation, the following keys are useful:\n"
   20.47              "ctrl-alt-f      toggle full screen\n"
   20.48 -@@ -4889,6 +4892,7 @@
   20.49 +@@ -4890,6 +4893,7 @@
   20.50   
   20.51       QEMU_OPTION_d,
   20.52       QEMU_OPTION_vcpus,
   20.53 @@ -144,7 +144,7 @@ Index: ioemu/vl.c
   20.54   };
   20.55   
   20.56   typedef struct QEMUOption {
   20.57 -@@ -4967,6 +4971,7 @@
   20.58 +@@ -4968,6 +4972,7 @@
   20.59       
   20.60       { "d", HAS_ARG, QEMU_OPTION_d },
   20.61       { "vcpus", 1, QEMU_OPTION_vcpus },
   20.62 @@ -152,7 +152,7 @@ Index: ioemu/vl.c
   20.63       { NULL },
   20.64   };
   20.65   
   20.66 -@@ -5669,6 +5674,9 @@
   20.67 +@@ -5670,6 +5675,9 @@
   20.68                   vcpus = atoi(optarg);
   20.69                   fprintf(logfile, "qemu: the number of cpus is %d\n", vcpus);
   20.70                   break;
   20.71 @@ -162,7 +162,7 @@ Index: ioemu/vl.c
   20.72               }
   20.73           }
   20.74       }
   20.75 -@@ -5992,7 +6000,8 @@
   20.76 +@@ -5993,7 +6001,8 @@
   20.77   
   20.78       machine->init(ram_size, vga_ram_size, boot_device,
   20.79                     ds, fd_filename, snapshot,
   20.80 @@ -174,8 +174,8 @@ Index: ioemu/vl.c
   20.81       qemu_mod_timer(gui_timer, qemu_get_clock(rt_clock));
   20.82  Index: ioemu/vl.h
   20.83  ===================================================================
   20.84 ---- ioemu.orig/vl.h	2006-07-26 15:17:39.825306361 +0100
   20.85 -+++ ioemu/vl.h	2006-07-26 15:17:40.297254952 +0100
   20.86 +--- ioemu.orig/vl.h	2006-07-27 11:17:47.998904845 +0100
   20.87 ++++ ioemu/vl.h	2006-07-27 11:17:48.254876507 +0100
   20.88  @@ -556,7 +556,7 @@
   20.89                                    int boot_device,
   20.90                DisplayState *ds, const char **fd_filename, int snapshot,
    21.1 --- a/tools/ioemu/patches/hypervisor-pit	Thu Jul 27 17:44:14 2006 -0500
    21.2 +++ b/tools/ioemu/patches/hypervisor-pit	Fri Jul 28 10:51:38 2006 +0100
    21.3 @@ -1,7 +1,7 @@
    21.4  Index: ioemu/Makefile.target
    21.5  ===================================================================
    21.6 ---- ioemu.orig/Makefile.target	2006-07-12 11:35:01.899678766 +0100
    21.7 -+++ ioemu/Makefile.target	2006-07-12 11:35:02.711577049 +0100
    21.8 +--- ioemu.orig/Makefile.target	2006-07-27 11:16:58.970332506 +0100
    21.9 ++++ ioemu/Makefile.target	2006-07-27 11:16:59.758245283 +0100
   21.10  @@ -333,7 +333,7 @@
   21.11   ifeq ($(TARGET_BASE_ARCH), i386)
   21.12   # Hardware support
   21.13 @@ -13,8 +13,8 @@ Index: ioemu/Makefile.target
   21.14   endif
   21.15  Index: ioemu/hw/pc.c
   21.16  ===================================================================
   21.17 ---- ioemu.orig/hw/pc.c	2006-07-12 11:35:02.059658723 +0100
   21.18 -+++ ioemu/hw/pc.c	2006-07-12 11:35:02.712576924 +0100
   21.19 +--- ioemu.orig/hw/pc.c	2006-07-27 11:16:59.036325200 +0100
   21.20 ++++ ioemu/hw/pc.c	2006-07-27 11:16:59.759245173 +0100
   21.21  @@ -38,7 +38,9 @@
   21.22   
   21.23   static fdctrl_t *floppy_controller;
   21.24 @@ -38,9 +38,9 @@ Index: ioemu/hw/pc.c
   21.25           pic_set_alt_irq_func(isa_pic, ioapic_set_irq, ioapic);
   21.26  Index: ioemu/vl.c
   21.27  ===================================================================
   21.28 ---- ioemu.orig/vl.c	2006-07-12 11:35:02.649584815 +0100
   21.29 -+++ ioemu/vl.c	2006-07-12 11:35:02.715576548 +0100
   21.30 -@@ -5033,6 +5033,7 @@
   21.31 +--- ioemu.orig/vl.c	2006-07-27 11:16:59.614261222 +0100
   21.32 ++++ ioemu/vl.c	2006-07-27 11:16:59.762244841 +0100
   21.33 +@@ -5034,6 +5034,7 @@
   21.34   
   21.35   #ifdef HAS_AUDIO
   21.36   struct soundhw soundhw[] = {
   21.37 @@ -48,7 +48,7 @@ Index: ioemu/vl.c
   21.38   #ifdef TARGET_I386
   21.39       {
   21.40           "pcspk",
   21.41 -@@ -5042,6 +5043,7 @@
   21.42 +@@ -5043,6 +5044,7 @@
   21.43           { .init_isa = pcspk_audio_init }
   21.44       },
   21.45   #endif
    22.1 --- a/tools/ioemu/patches/ioemu-ia64	Thu Jul 27 17:44:14 2006 -0500
    22.2 +++ b/tools/ioemu/patches/ioemu-ia64	Fri Jul 28 10:51:38 2006 +0100
    22.3 @@ -1,7 +1,7 @@
    22.4  Index: ioemu/hw/iommu.c
    22.5  ===================================================================
    22.6 ---- ioemu.orig/hw/iommu.c	2006-07-26 15:17:35.639762285 +0100
    22.7 -+++ ioemu/hw/iommu.c	2006-07-26 15:17:39.078387722 +0100
    22.8 +--- ioemu.orig/hw/iommu.c	2006-07-28 09:56:58.571272016 +0100
    22.9 ++++ ioemu/hw/iommu.c	2006-07-28 10:02:10.171049510 +0100
   22.10  @@ -82,7 +82,11 @@
   22.11   #define IOPTE_VALID         0x00000002 /* IOPTE is valid */
   22.12   #define IOPTE_WAZ           0x00000001 /* Write as zeros */
   22.13 @@ -16,8 +16,8 @@ Index: ioemu/hw/iommu.c
   22.14   
   22.15  Index: ioemu/cpu-all.h
   22.16  ===================================================================
   22.17 ---- ioemu.orig/cpu-all.h	2006-07-26 15:17:38.728425843 +0100
   22.18 -+++ ioemu/cpu-all.h	2006-07-26 15:17:39.079387613 +0100
   22.19 +--- ioemu.orig/cpu-all.h	2006-07-28 09:58:38.815935452 +0100
   22.20 ++++ ioemu/cpu-all.h	2006-07-28 10:02:10.171049510 +0100
   22.21  @@ -835,6 +835,31 @@
   22.22                   :"=m" (*(volatile long *)addr)
   22.23                   :"dIr" (nr));
   22.24 @@ -52,9 +52,9 @@ Index: ioemu/cpu-all.h
   22.25   /* memory API */
   22.26  Index: ioemu/vl.c
   22.27  ===================================================================
   22.28 ---- ioemu.orig/vl.c	2006-07-26 15:17:39.011395020 +0100
   22.29 -+++ ioemu/vl.c	2006-07-26 21:11:35.957492161 +0100
   22.30 -@@ -5577,6 +5577,7 @@
   22.31 +--- ioemu.orig/vl.c	2006-07-28 09:58:59.672577418 +0100
   22.32 ++++ ioemu/vl.c	2006-07-28 10:02:10.174049171 +0100
   22.33 +@@ -5578,6 +5578,7 @@
   22.34           exit(-1);
   22.35       }
   22.36   
   22.37 @@ -62,7 +62,7 @@ Index: ioemu/vl.c
   22.38       if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
   22.39           fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
   22.40           exit(-1);
   22.41 -@@ -5597,6 +5598,34 @@
   22.42 +@@ -5598,6 +5599,34 @@
   22.43       fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n", nr_pages - 1,
   22.44               (uint64_t)(page_array[nr_pages - 1]));
   22.45   
   22.46 @@ -99,9 +99,9 @@ Index: ioemu/vl.c
   22.47   #ifdef CONFIG_SOFTMMU
   22.48  Index: ioemu/target-i386-dm/exec-dm.c
   22.49  ===================================================================
   22.50 ---- ioemu.orig/target-i386-dm/exec-dm.c	2006-07-26 15:17:38.283474311 +0100
   22.51 -+++ ioemu/target-i386-dm/exec-dm.c	2006-07-26 15:17:39.081387395 +0100
   22.52 -@@ -340,6 +340,23 @@
   22.53 +--- ioemu.orig/target-i386-dm/exec-dm.c	2006-07-28 09:58:22.882736989 +0100
   22.54 ++++ ioemu/target-i386-dm/exec-dm.c	2006-07-28 10:03:19.972165675 +0100
   22.55 +@@ -341,6 +341,23 @@
   22.56       return io_mem_read[io_index >> IO_MEM_SHIFT];
   22.57   }
   22.58   
   22.59 @@ -125,20 +125,20 @@ Index: ioemu/target-i386-dm/exec-dm.c
   22.60   /* physical memory access (slow version, mainly for debug) */
   22.61   #if defined(CONFIG_USER_ONLY)
   22.62   void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
   22.63 -@@ -455,6 +472,9 @@
   22.64 +@@ -456,6 +473,9 @@
   22.65                   ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
   22.66                       (addr & ~TARGET_PAGE_MASK);
   22.67                   memcpy(buf, ptr, l);
   22.68  +#ifdef __ia64__
   22.69  +                sync_icache((unsigned long)ptr, l);
   22.70  +#endif 
   22.71 -             }
   22.72 -         }
   22.73 -         len -= l;
   22.74 +             } else {
   22.75 +                 /* unreported MMIO space */
   22.76 +                 memset(buf, 0xff, len);
   22.77  Index: ioemu/exec-all.h
   22.78  ===================================================================
   22.79 ---- ioemu.orig/exec-all.h	2006-07-26 15:17:38.200483351 +0100
   22.80 -+++ ioemu/exec-all.h	2006-07-26 21:11:41.262898983 +0100
   22.81 +--- ioemu.orig/exec-all.h	2006-07-28 09:56:58.572271903 +0100
   22.82 ++++ ioemu/exec-all.h	2006-07-28 10:02:10.175049059 +0100
   22.83  @@ -462,12 +462,13 @@
   22.84   }
   22.85   #endif
   22.86 @@ -158,8 +158,8 @@ Index: ioemu/exec-all.h
   22.87   
   22.88  Index: ioemu/target-i386-dm/cpu.h
   22.89  ===================================================================
   22.90 ---- ioemu.orig/target-i386-dm/cpu.h	2006-07-26 15:17:38.282474420 +0100
   22.91 -+++ ioemu/target-i386-dm/cpu.h	2006-07-26 15:17:39.082387287 +0100
   22.92 +--- ioemu.orig/target-i386-dm/cpu.h	2006-07-28 09:56:58.572271903 +0100
   22.93 ++++ ioemu/target-i386-dm/cpu.h	2006-07-28 10:02:10.175049059 +0100
   22.94  @@ -80,7 +80,11 @@
   22.95   /* helper2.c */
   22.96   int main_loop(void);
   22.97 @@ -175,7 +175,7 @@ Index: ioemu/target-i386-dm/cpu.h
   22.98  Index: ioemu/ia64_intrinsic.h
   22.99  ===================================================================
  22.100  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
  22.101 -+++ ioemu/ia64_intrinsic.h	2006-07-26 15:17:39.083387178 +0100
  22.102 ++++ ioemu/ia64_intrinsic.h	2006-07-28 10:02:10.176048946 +0100
  22.103  @@ -0,0 +1,276 @@
  22.104  +#ifndef IA64_INTRINSIC_H
  22.105  +#define IA64_INTRINSIC_H
    23.1 --- a/tools/ioemu/patches/qemu-bugfixes	Thu Jul 27 17:44:14 2006 -0500
    23.2 +++ b/tools/ioemu/patches/qemu-bugfixes	Fri Jul 28 10:51:38 2006 +0100
    23.3 @@ -1,7 +1,7 @@
    23.4  Index: ioemu/console.c
    23.5  ===================================================================
    23.6 ---- ioemu.orig/console.c	2006-07-26 13:39:11.999009495 +0100
    23.7 -+++ ioemu/console.c	2006-07-26 14:15:19.413719225 +0100
    23.8 +--- ioemu.orig/console.c	2006-07-27 11:16:53.732912290 +0100
    23.9 ++++ ioemu/console.c	2006-07-27 11:16:57.753467214 +0100
   23.10  @@ -449,7 +449,7 @@
   23.11               c++;
   23.12           }
   23.13 @@ -50,8 +50,8 @@ Index: ioemu/console.c
   23.14       s->y_base = 0;
   23.15  Index: ioemu/usb-linux.c
   23.16  ===================================================================
   23.17 ---- ioemu.orig/usb-linux.c	2006-07-26 13:39:11.999009495 +0100
   23.18 -+++ ioemu/usb-linux.c	2006-07-26 13:39:16.622514851 +0100
   23.19 +--- ioemu.orig/usb-linux.c	2006-07-27 11:16:53.732912290 +0100
   23.20 ++++ ioemu/usb-linux.c	2006-07-27 11:16:57.754467103 +0100
   23.21  @@ -26,6 +26,7 @@
   23.22   #if defined(__linux__)
   23.23   #include <dirent.h>
   23.24 @@ -60,3 +60,15 @@ Index: ioemu/usb-linux.c
   23.25   #include <linux/usbdevice_fs.h>
   23.26   #include <linux/version.h>
   23.27   
   23.28 +Index: ioemu/vl.c
   23.29 +===================================================================
   23.30 +--- ioemu.orig/vl.c	2006-07-27 11:16:57.681475183 +0100
   23.31 ++++ ioemu/vl.c	2006-07-27 11:17:33.279534373 +0100
   23.32 +@@ -3201,6 +3201,7 @@
   23.33 +             if (net_tap_fd_init(vlan, fd))
   23.34 +                 ret = 0;
   23.35 +         } else {
   23.36 ++            ifname[0] = '\0';
   23.37 +             get_param_value(ifname, sizeof(ifname), "ifname", p);
   23.38 +             if (get_param_value(setup_script, sizeof(setup_script), "script", p) == 0) {
   23.39 +                 pstrcpy(setup_script, sizeof(setup_script), DEFAULT_NETWORK_SCRIPT);
    24.1 --- a/tools/ioemu/patches/qemu-logging	Thu Jul 27 17:44:14 2006 -0500
    24.2 +++ b/tools/ioemu/patches/qemu-logging	Fri Jul 28 10:51:38 2006 +0100
    24.3 @@ -1,8 +1,8 @@
    24.4  Index: ioemu/vl.c
    24.5  ===================================================================
    24.6 ---- ioemu.orig/vl.c	2006-07-14 15:55:59.491503372 +0100
    24.7 -+++ ioemu/vl.c	2006-07-14 15:55:59.693480386 +0100
    24.8 -@@ -4697,7 +4697,7 @@
    24.9 +--- ioemu.orig/vl.c	2006-07-27 11:16:57.756466882 +0100
   24.10 ++++ ioemu/vl.c	2006-07-27 11:16:57.828458912 +0100
   24.11 +@@ -4698,7 +4698,7 @@
   24.12              "-S              freeze CPU at startup (use 'c' to start execution)\n"
   24.13              "-s              wait gdb connection to port %d\n"
   24.14              "-p port         change gdb connection port\n"
   24.15 @@ -11,7 +11,7 @@ Index: ioemu/vl.c
   24.16              "-hdachs c,h,s[,t]  force hard disk 0 physical geometry and the optional BIOS\n"
   24.17              "                translation (t=none or lba) (usually qemu can guess them)\n"
   24.18              "-L path         set the directory for the BIOS and VGA BIOS\n"
   24.19 -@@ -4775,7 +4775,7 @@
   24.20 +@@ -4776,7 +4776,7 @@
   24.21       QEMU_OPTION_S,
   24.22       QEMU_OPTION_s,
   24.23       QEMU_OPTION_p,
   24.24 @@ -20,7 +20,7 @@ Index: ioemu/vl.c
   24.25       QEMU_OPTION_hdachs,
   24.26       QEMU_OPTION_L,
   24.27   #ifdef USE_CODE_COPY
   24.28 -@@ -4844,7 +4844,7 @@
   24.29 +@@ -4845,7 +4845,7 @@
   24.30       { "S", 0, QEMU_OPTION_S },
   24.31       { "s", 0, QEMU_OPTION_s },
   24.32       { "p", HAS_ARG, QEMU_OPTION_p },
   24.33 @@ -29,7 +29,7 @@ Index: ioemu/vl.c
   24.34       { "hdachs", HAS_ARG, QEMU_OPTION_hdachs },
   24.35       { "L", HAS_ARG, QEMU_OPTION_L },
   24.36   #ifdef USE_CODE_COPY
   24.37 -@@ -5095,6 +5095,8 @@
   24.38 +@@ -5096,6 +5096,8 @@
   24.39       char usb_devices[MAX_VM_USB_PORTS][128];
   24.40       int usb_devices_index;
   24.41   
   24.42 @@ -38,7 +38,7 @@ Index: ioemu/vl.c
   24.43       LIST_INIT (&vm_change_state_head);
   24.44   #if !defined(CONFIG_SOFTMMU)
   24.45       /* we never want that malloc() uses mmap() */
   24.46 -@@ -5144,6 +5146,11 @@
   24.47 +@@ -5145,6 +5147,11 @@
   24.48       nb_nics = 0;
   24.49       /* default mac address of the first network interface */
   24.50       
   24.51 @@ -50,7 +50,7 @@ Index: ioemu/vl.c
   24.52       optind = 1;
   24.53       for(;;) {
   24.54           if (optind >= argc)
   24.55 -@@ -5329,7 +5336,7 @@
   24.56 +@@ -5330,7 +5337,7 @@
   24.57                       exit(1);
   24.58                   }
   24.59                   break;
   24.60 @@ -59,7 +59,7 @@ Index: ioemu/vl.c
   24.61                   {
   24.62                       int mask;
   24.63                       CPULogItem *item;
   24.64 -@@ -5700,7 +5707,7 @@
   24.65 +@@ -5701,7 +5708,7 @@
   24.66           stk.ss_flags = 0;
   24.67   
   24.68           if (sigaltstack(&stk, NULL) < 0) {
    25.1 --- a/tools/ioemu/patches/qemu-smp	Thu Jul 27 17:44:14 2006 -0500
    25.2 +++ b/tools/ioemu/patches/qemu-smp	Fri Jul 28 10:51:38 2006 +0100
    25.3 @@ -1,7 +1,7 @@
    25.4  Index: ioemu/vl.c
    25.5  ===================================================================
    25.6 ---- ioemu.orig/vl.c	2006-07-12 11:35:01.687705323 +0100
    25.7 -+++ ioemu/vl.c	2006-07-12 11:35:01.753697055 +0100
    25.8 +--- ioemu.orig/vl.c	2006-07-27 11:16:58.619371357 +0100
    25.9 ++++ ioemu/vl.c	2006-07-27 11:16:58.823348777 +0100
   25.10  @@ -159,6 +159,8 @@
   25.11   #define MAX_CPUS 1
   25.12   #endif
   25.13 @@ -11,7 +11,7 @@ Index: ioemu/vl.c
   25.14   int xc_handle;
   25.15   
   25.16   char domain_name[1024] = { 'H','V', 'M', 'X', 'E', 'N', '-'};
   25.17 -@@ -4635,6 +4637,7 @@
   25.18 +@@ -4636,6 +4638,7 @@
   25.19              "-m megs         set virtual RAM size to megs MB [default=%d]\n"
   25.20              "-smp n          set the number of CPUs to 'n' [default=1]\n"
   25.21              "-nographic      disable graphical output and redirect serial I/Os to console\n"
   25.22 @@ -19,7 +19,7 @@ Index: ioemu/vl.c
   25.23   #ifndef _WIN32
   25.24   	   "-k language     use keyboard layout (for example \"fr\" for French)\n"
   25.25   #endif
   25.26 -@@ -4809,6 +4812,7 @@
   25.27 +@@ -4810,6 +4813,7 @@
   25.28       QEMU_OPTION_vnc,
   25.29   
   25.30       QEMU_OPTION_d,
   25.31 @@ -27,7 +27,7 @@ Index: ioemu/vl.c
   25.32   };
   25.33   
   25.34   typedef struct QEMUOption {
   25.35 -@@ -4886,6 +4890,7 @@
   25.36 +@@ -4887,6 +4891,7 @@
   25.37       { "cirrusvga", 0, QEMU_OPTION_cirrusvga },
   25.38       
   25.39       { "d", HAS_ARG, QEMU_OPTION_d },
   25.40 @@ -35,7 +35,7 @@ Index: ioemu/vl.c
   25.41       { NULL },
   25.42   };
   25.43   
   25.44 -@@ -5508,6 +5513,10 @@
   25.45 +@@ -5509,6 +5514,10 @@
   25.46                   domid = atoi(optarg);
   25.47                   fprintf(logfile, "domid: %d\n", domid);
   25.48                   break;
    26.1 --- a/tools/ioemu/patches/qemu-target-i386-dm	Thu Jul 27 17:44:14 2006 -0500
    26.2 +++ b/tools/ioemu/patches/qemu-target-i386-dm	Fri Jul 28 10:51:38 2006 +0100
    26.3 @@ -1,7 +1,7 @@
    26.4  Index: ioemu/Makefile.target
    26.5  ===================================================================
    26.6 ---- ioemu.orig/Makefile.target	2006-07-26 11:45:57.572129351 +0100
    26.7 -+++ ioemu/Makefile.target	2006-07-26 11:45:57.589127569 +0100
    26.8 +--- ioemu.orig/Makefile.target	2006-07-28 09:56:49.468301708 +0100
    26.9 ++++ ioemu/Makefile.target	2006-07-28 09:56:58.486281629 +0100
   26.10  @@ -57,6 +57,8 @@
   26.11   QEMU_SYSTEM=qemu-fast
   26.12   endif
   26.13 @@ -32,8 +32,8 @@ Index: ioemu/Makefile.target
   26.14   endif
   26.15  Index: ioemu/configure
   26.16  ===================================================================
   26.17 ---- ioemu.orig/configure	2006-07-26 11:45:57.573129246 +0100
   26.18 -+++ ioemu/configure	2006-07-26 11:45:57.590127464 +0100
   26.19 +--- ioemu.orig/configure	2006-07-28 09:56:49.469301595 +0100
   26.20 ++++ ioemu/configure	2006-07-28 09:56:49.486299672 +0100
   26.21  @@ -359,6 +359,8 @@
   26.22       if [ "$user" = "yes" ] ; then
   26.23           target_list="i386-user arm-user armeb-user sparc-user ppc-user mips-user mipsel-user $target_list"
   26.24 @@ -45,8 +45,8 @@ Index: ioemu/configure
   26.25   fi
   26.26  Index: ioemu/monitor.c
   26.27  ===================================================================
   26.28 ---- ioemu.orig/monitor.c	2006-07-26 11:45:57.576128931 +0100
   26.29 -+++ ioemu/monitor.c	2006-07-26 11:45:57.591127359 +0100
   26.30 +--- ioemu.orig/monitor.c	2006-07-28 09:56:49.472301255 +0100
   26.31 ++++ ioemu/monitor.c	2006-07-28 09:56:58.720255164 +0100
   26.32  @@ -1142,6 +1142,10 @@
   26.33         "", "show host USB devices", },
   26.34       { "profile", "", do_info_profile,
   26.35 @@ -60,8 +60,8 @@ Index: ioemu/monitor.c
   26.36   
   26.37  Index: ioemu/vl.c
   26.38  ===================================================================
   26.39 ---- ioemu.orig/vl.c	2006-07-26 11:45:57.579128617 +0100
   26.40 -+++ ioemu/vl.c	2006-07-26 11:45:57.593127149 +0100
   26.41 +--- ioemu.orig/vl.c	2006-07-28 09:56:49.475300916 +0100
   26.42 ++++ ioemu/vl.c	2006-07-28 09:56:58.917232883 +0100
   26.43  @@ -87,7 +87,7 @@
   26.44   
   26.45   #include "exec-all.h"
   26.46 @@ -98,8 +98,8 @@ Index: ioemu/vl.c
   26.47   {
   26.48  Index: ioemu/vl.h
   26.49  ===================================================================
   26.50 ---- ioemu.orig/vl.h	2006-07-26 11:45:39.289045710 +0100
   26.51 -+++ ioemu/vl.h	2006-07-26 11:45:57.594127044 +0100
   26.52 +--- ioemu.orig/vl.h	2006-07-28 09:56:49.281322859 +0100
   26.53 ++++ ioemu/vl.h	2006-07-28 09:56:58.917232883 +0100
   26.54  @@ -38,6 +38,8 @@
   26.55   #include <fcntl.h>
   26.56   #include <sys/stat.h>
   26.57 @@ -132,7 +132,7 @@ Index: ioemu/vl.h
   26.58  Index: ioemu/target-i386-dm/cpu.h
   26.59  ===================================================================
   26.60  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
   26.61 -+++ ioemu/target-i386-dm/cpu.h	2006-07-26 11:45:57.594127044 +0100
   26.62 ++++ ioemu/target-i386-dm/cpu.h	2006-07-28 09:56:58.572271903 +0100
   26.63  @@ -0,0 +1,86 @@
   26.64  +/*
   26.65  + * i386 virtual CPU header
   26.66 @@ -223,8 +223,8 @@ Index: ioemu/target-i386-dm/cpu.h
   26.67  Index: ioemu/target-i386-dm/exec-dm.c
   26.68  ===================================================================
   26.69  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
   26.70 -+++ ioemu/target-i386-dm/exec-dm.c	2006-07-26 11:46:01.059763730 +0100
   26.71 -@@ -0,0 +1,512 @@
   26.72 ++++ ioemu/target-i386-dm/exec-dm.c	2006-07-28 09:58:22.882736989 +0100
   26.73 +@@ -0,0 +1,516 @@
   26.74  +/*
   26.75  + *  virtual page mapping and translated block handling
   26.76  + * 
   26.77 @@ -291,6 +291,7 @@ Index: ioemu/target-i386-dm/exec-dm.c
   26.78  +#endif /* !CONFIG_DM */
   26.79  +
   26.80  +uint64_t phys_ram_size;
   26.81 ++extern uint64_t ram_size;
   26.82  +int phys_ram_fd;
   26.83  +uint8_t *phys_ram_base;
   26.84  +uint8_t *phys_ram_dirty;
   26.85 @@ -632,7 +633,7 @@ Index: ioemu/target-i386-dm/exec-dm.c
   26.86  +            l = len;
   26.87  +	
   26.88  +        pd = page;
   26.89 -+        io_index = iomem_index(page);
   26.90 ++        io_index = iomem_index(addr);
   26.91  +        if (is_write) {
   26.92  +            if (io_index) {
   26.93  +                if (l >= 4 && ((addr & 3) == 0)) {
   26.94 @@ -677,11 +678,14 @@ Index: ioemu/target-i386-dm/exec-dm.c
   26.95  +                    stb_raw(buf, val);
   26.96  +                    l = 1;
   26.97  +                }
   26.98 -+            } else {
   26.99 ++            } else if (addr < ram_size) {
  26.100  +                /* RAM case */
  26.101  +                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
  26.102  +                    (addr & ~TARGET_PAGE_MASK);
  26.103  +                memcpy(buf, ptr, l);
  26.104 ++            } else {
  26.105 ++                /* unreported MMIO space */
  26.106 ++                memset(buf, 0xff, len);
  26.107  +            }
  26.108  +        }
  26.109  +        len -= l;
  26.110 @@ -740,7 +744,7 @@ Index: ioemu/target-i386-dm/exec-dm.c
  26.111  Index: ioemu/target-i386-dm/helper2.c
  26.112  ===================================================================
  26.113  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
  26.114 -+++ ioemu/target-i386-dm/helper2.c	2006-07-26 11:45:57.596126835 +0100
  26.115 ++++ ioemu/target-i386-dm/helper2.c	2006-07-28 09:56:58.312301309 +0100
  26.116  @@ -0,0 +1,464 @@
  26.117  +/*
  26.118  + *  i386 helpers (without register variable usage)
  26.119 @@ -1209,7 +1213,7 @@ Index: ioemu/target-i386-dm/helper2.c
  26.120  Index: ioemu/target-i386-dm/i8259-dm.c
  26.121  ===================================================================
  26.122  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
  26.123 -+++ ioemu/target-i386-dm/i8259-dm.c	2006-07-26 11:45:57.596126835 +0100
  26.124 ++++ ioemu/target-i386-dm/i8259-dm.c	2006-07-28 09:56:49.492298993 +0100
  26.125  @@ -0,0 +1,107 @@
  26.126  +/* Xen 8259 stub for interrupt controller emulation
  26.127  + * 
  26.128 @@ -1321,7 +1325,7 @@ Index: ioemu/target-i386-dm/i8259-dm.c
  26.129  Index: ioemu/target-i386-dm/qemu-dm.debug
  26.130  ===================================================================
  26.131  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
  26.132 -+++ ioemu/target-i386-dm/qemu-dm.debug	2006-07-26 11:45:57.596126835 +0100
  26.133 ++++ ioemu/target-i386-dm/qemu-dm.debug	2006-07-28 09:56:49.493298880 +0100
  26.134  @@ -0,0 +1,5 @@
  26.135  +#!/bin/sh
  26.136  +
  26.137 @@ -1331,7 +1335,7 @@ Index: ioemu/target-i386-dm/qemu-dm.debu
  26.138  Index: ioemu/target-i386-dm/qemu-ifup
  26.139  ===================================================================
  26.140  --- /dev/null	1970-01-01 00:00:00.000000000 +0000
  26.141 -+++ ioemu/target-i386-dm/qemu-ifup	2006-07-26 11:45:57.597126730 +0100
  26.142 ++++ ioemu/target-i386-dm/qemu-ifup	2006-07-28 09:56:49.493298880 +0100
  26.143  @@ -0,0 +1,10 @@
  26.144  +#!/bin/sh
  26.145  +
    27.1 --- a/tools/ioemu/patches/shared-vram	Thu Jul 27 17:44:14 2006 -0500
    27.2 +++ b/tools/ioemu/patches/shared-vram	Fri Jul 28 10:51:38 2006 +0100
    27.3 @@ -1,7 +1,7 @@
    27.4  Index: ioemu/hw/cirrus_vga.c
    27.5  ===================================================================
    27.6 ---- ioemu.orig/hw/cirrus_vga.c	2006-07-26 15:17:35.230806831 +0100
    27.7 -+++ ioemu/hw/cirrus_vga.c	2006-07-26 15:17:39.819307015 +0100
    27.8 +--- ioemu.orig/hw/cirrus_vga.c	2006-07-27 11:16:53.059986783 +0100
    27.9 ++++ ioemu/hw/cirrus_vga.c	2006-07-27 11:16:59.923227020 +0100
   27.10  @@ -28,6 +28,9 @@
   27.11    */
   27.12   #include "vl.h"
   27.13 @@ -176,8 +176,8 @@ Index: ioemu/hw/cirrus_vga.c
   27.14   }
   27.15  Index: ioemu/hw/pc.c
   27.16  ===================================================================
   27.17 ---- ioemu.orig/hw/pc.c	2006-07-26 15:17:39.752314312 +0100
   27.18 -+++ ioemu/hw/pc.c	2006-07-26 15:17:39.820306906 +0100
   27.19 +--- ioemu.orig/hw/pc.c	2006-07-27 11:16:59.759245173 +0100
   27.20 ++++ ioemu/hw/pc.c	2006-07-27 11:16:59.924226909 +0100
   27.21  @@ -783,14 +783,14 @@
   27.22       if (cirrus_vga_enabled) {
   27.23           if (pci_enabled) {
   27.24 @@ -198,8 +198,8 @@ Index: ioemu/hw/pc.c
   27.25   
   27.26  Index: ioemu/hw/vga.c
   27.27  ===================================================================
   27.28 ---- ioemu.orig/hw/vga.c	2006-07-26 15:17:39.352357879 +0100
   27.29 -+++ ioemu/hw/vga.c	2006-07-26 15:17:39.821306797 +0100
   27.30 +--- ioemu.orig/hw/vga.c	2006-07-27 11:16:59.103317784 +0100
   27.31 ++++ ioemu/hw/vga.c	2006-07-27 11:16:59.925226798 +0100
   27.32  @@ -1799,6 +1799,7 @@
   27.33       /* TODO: add vbe support if enabled */
   27.34   }
   27.35 @@ -217,7 +217,7 @@ Index: ioemu/hw/vga.c
   27.36       s->vram_offset = vga_ram_offset;
   27.37       s->vram_size = vga_ram_size;
   27.38       s->ds = ds;
   27.39 -@@ -1941,6 +1942,31 @@
   27.40 +@@ -1943,6 +1944,31 @@
   27.41       return 0;
   27.42   }
   27.43   
   27.44 @@ -251,8 +251,8 @@ Index: ioemu/hw/vga.c
   27.45   
   27.46  Index: ioemu/hw/vga_int.h
   27.47  ===================================================================
   27.48 ---- ioemu.orig/hw/vga_int.h	2006-07-26 15:17:38.201483242 +0100
   27.49 -+++ ioemu/hw/vga_int.h	2006-07-26 15:17:39.822306688 +0100
   27.50 +--- ioemu.orig/hw/vga_int.h	2006-07-27 11:16:57.447501084 +0100
   27.51 ++++ ioemu/hw/vga_int.h	2006-07-27 11:16:59.925226798 +0100
   27.52  @@ -166,5 +166,6 @@
   27.53                                unsigned int color0, unsigned int color1,
   27.54                                unsigned int color_xor);
   27.55 @@ -262,9 +262,9 @@ Index: ioemu/hw/vga_int.h
   27.56   extern const uint8_t gr_mask[16];
   27.57  Index: ioemu/vl.c
   27.58  ===================================================================
   27.59 ---- ioemu.orig/vl.c	2006-07-26 15:17:39.755313985 +0100
   27.60 -+++ ioemu/vl.c	2006-07-26 15:17:39.824306470 +0100
   27.61 -@@ -5148,6 +5148,78 @@
   27.62 +--- ioemu.orig/vl.c	2006-07-27 11:16:59.762244841 +0100
   27.63 ++++ ioemu/vl.c	2006-07-27 11:16:59.928226466 +0100
   27.64 +@@ -5149,6 +5149,78 @@
   27.65   
   27.66   #define MAX_NET_CLIENTS 32
   27.67   
   27.68 @@ -345,8 +345,8 @@ Index: ioemu/vl.c
   27.69   #ifdef CONFIG_GDBSTUB
   27.70  Index: ioemu/vl.h
   27.71  ===================================================================
   27.72 ---- ioemu.orig/vl.h	2006-07-26 15:17:39.621328580 +0100
   27.73 -+++ ioemu/vl.h	2006-07-26 15:17:39.825306361 +0100
   27.74 +--- ioemu.orig/vl.h	2006-07-27 11:16:59.549268417 +0100
   27.75 ++++ ioemu/vl.h	2006-07-27 11:16:59.929226356 +0100
   27.76  @@ -136,6 +136,13 @@
   27.77   
   27.78   void main_loop_wait(int timeout);
    28.1 --- a/tools/ioemu/patches/support-xm-console	Thu Jul 27 17:44:14 2006 -0500
    28.2 +++ b/tools/ioemu/patches/support-xm-console	Fri Jul 28 10:51:38 2006 +0100
    28.3 @@ -1,7 +1,8 @@
    28.4 -diff -r d08c08f8fbf3 vl.c
    28.5 ---- a/vl.c	Mon Jun 26 15:18:25 2006 +0100
    28.6 -+++ b/vl.c	Mon Jun 26 15:18:37 2006 +0100
    28.7 -@@ -1535,26 +1535,65 @@ CharDriverState *qemu_chr_open_stdio(voi
    28.8 +Index: ioemu/vl.c
    28.9 +===================================================================
   28.10 +--- ioemu.orig/vl.c	2006-07-27 11:16:59.384286681 +0100
   28.11 ++++ ioemu/vl.c	2006-07-27 11:16:59.614261222 +0100
   28.12 +@@ -1535,26 +1535,65 @@
   28.13       return chr;
   28.14   }
   28.15   
   28.16 @@ -65,19 +66,18 @@ diff -r d08c08f8fbf3 vl.c
   28.17  -    tty.c_cc[VMIN] = 1;
   28.18  -    tty.c_cc[VTIME] = 0;
   28.19  -    tcsetattr (master_fd, TCSAFLUSH, &tty);
   28.20 --
   28.21 --    fprintf(stderr, "char device redirected to %s\n", slave_name);
   28.22  +    /* Set raw attributes on the pty. */
   28.23  +    cfmakeraw(&tty);
   28.24  +    tcsetattr(slave_fd, TCSAFLUSH, &tty);
   28.25  +    
   28.26  +    fprintf(stderr, "char device redirected to %s\n", ptsname(master_fd));
   28.27  +    store_console_dev(domid, ptsname(master_fd));
   28.28 -+
   28.29 + 
   28.30 +-    fprintf(stderr, "char device redirected to %s\n", slave_name);
   28.31       return qemu_chr_open_fd(master_fd, master_fd);
   28.32   }
   28.33   
   28.34 -@@ -5297,7 +5336,9 @@ int main(int argc, char **argv)
   28.35 +@@ -5298,7 +5337,9 @@
   28.36                   break;
   28.37               case QEMU_OPTION_nographic:
   28.38                   pstrcpy(monitor_device, sizeof(monitor_device), "stdio");
    29.1 --- a/tools/ioemu/patches/vnc-cleanup	Thu Jul 27 17:44:14 2006 -0500
    29.2 +++ b/tools/ioemu/patches/vnc-cleanup	Fri Jul 28 10:51:38 2006 +0100
    29.3 @@ -1,7 +1,8 @@
    29.4 -diff -r c84300f3abc2 vnc.c
    29.5 ---- a/vnc.c	Wed Jul 05 18:11:23 2006 +0100
    29.6 -+++ b/vnc.c	Thu Jul 06 14:27:28 2006 +0100
    29.7 -@@ -83,13 +83,16 @@ static void vnc_dpy_update(DisplayState 
    29.8 +Index: ioemu/vnc.c
    29.9 +===================================================================
   29.10 +--- ioemu.orig/vnc.c	2006-07-27 11:16:52.783017443 +0100
   29.11 ++++ ioemu/vnc.c	2006-07-27 11:17:00.722138579 +0100
   29.12 +@@ -83,13 +83,16 @@
   29.13   static void vnc_dpy_update(DisplayState *ds, int x, int y, int w, int h)
   29.14   {
   29.15       VncState *vs = ds->opaque;
   29.16 @@ -21,7 +22,7 @@ diff -r c84300f3abc2 vnc.c
   29.17   }
   29.18   
   29.19   static void vnc_framebuffer_update(VncState *vs, int x, int y, int w, int h,
   29.20 -@@ -262,6 +265,7 @@ static void vnc_update_client(void *opaq
   29.21 +@@ -262,6 +265,7 @@
   29.22   static void vnc_update_client(void *opaque)
   29.23   {
   29.24       VncState *vs = opaque;
   29.25 @@ -29,7 +30,7 @@ diff -r c84300f3abc2 vnc.c
   29.26   
   29.27       if (vs->need_update && vs->csock != -1) {
   29.28   	int y;
   29.29 -@@ -282,7 +286,7 @@ static void vnc_update_client(void *opaq
   29.30 +@@ -282,7 +286,7 @@
   29.31   	row = vs->ds->data;
   29.32   	old_row = vs->old_data;
   29.33   
   29.34 @@ -38,7 +39,7 @@ diff -r c84300f3abc2 vnc.c
   29.35   	    if (vs->dirty_row[y] & width_mask) {
   29.36   		int x;
   29.37   		char *ptr, *old_ptr;
   29.38 -@@ -307,10 +311,8 @@ static void vnc_update_client(void *opaq
   29.39 +@@ -307,10 +311,8 @@
   29.40   	    old_row += vs->ds->linesize;
   29.41   	}
   29.42   
   29.43 @@ -51,7 +52,7 @@ diff -r c84300f3abc2 vnc.c
   29.44   
   29.45   	/* Count rectangles */
   29.46   	n_rectangles = 0;
   29.47 -@@ -348,7 +350,9 @@ static void vnc_update_client(void *opaq
   29.48 +@@ -348,7 +350,9 @@
   29.49   	vnc_flush(vs);
   29.50   
   29.51       }
   29.52 @@ -62,10 +63,11 @@ diff -r c84300f3abc2 vnc.c
   29.53   }
   29.54   
   29.55   static void vnc_timer_init(VncState *vs)
   29.56 -diff -r c84300f3abc2 vl.c
   29.57 ---- a/vl.c	Wed Jul 05 18:11:23 2006 +0100
   29.58 -+++ b/vl.c	Thu Jul 06 14:27:28 2006 +0100
   29.59 -@@ -4586,10 +4586,10 @@ void main_loop_wait(int timeout)
   29.60 +Index: ioemu/vl.c
   29.61 +===================================================================
   29.62 +--- ioemu.orig/vl.c	2006-07-27 11:17:00.311184072 +0100
   29.63 ++++ ioemu/vl.c	2006-07-27 11:17:00.724138358 +0100
   29.64 +@@ -4587,10 +4587,10 @@
   29.65           /* XXX: better handling of removal */
   29.66           for(ioh = first_io_handler; ioh != NULL; ioh = ioh_next) {
   29.67               ioh_next = ioh->next;
    30.1 --- a/tools/ioemu/patches/vnc-fixes	Thu Jul 27 17:44:14 2006 -0500
    30.2 +++ b/tools/ioemu/patches/vnc-fixes	Fri Jul 28 10:51:38 2006 +0100
    30.3 @@ -1,8 +1,8 @@
    30.4  Index: ioemu/vl.c
    30.5  ===================================================================
    30.6 ---- ioemu.orig/vl.c	2006-07-26 14:29:04.481598583 +0100
    30.7 -+++ ioemu/vl.c	2006-07-26 14:31:22.668325993 +0100
    30.8 -@@ -6003,8 +6003,10 @@
    30.9 +--- ioemu.orig/vl.c	2006-07-27 11:17:00.724138358 +0100
   30.10 ++++ ioemu/vl.c	2006-07-27 11:17:00.874121755 +0100
   30.11 +@@ -6004,8 +6004,10 @@
   30.12                     kernel_filename, kernel_cmdline, initrd_filename,
   30.13                     timeoffset);
   30.14   
   30.15 @@ -17,8 +17,8 @@ Index: ioemu/vl.c
   30.16       if (use_gdbstub) {
   30.17  Index: ioemu/vnc.c
   30.18  ===================================================================
   30.19 ---- ioemu.orig/vnc.c	2006-07-26 14:29:04.479598804 +0100
   30.20 -+++ ioemu/vnc.c	2006-07-26 14:31:22.669325883 +0100
   30.21 +--- ioemu.orig/vnc.c	2006-07-27 11:17:00.722138579 +0100
   30.22 ++++ ioemu/vnc.c	2006-07-27 11:17:00.875121644 +0100
   30.23  @@ -3,6 +3,7 @@
   30.24    * 
   30.25    * Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
   30.26 @@ -493,8 +493,8 @@ Index: ioemu/vnc.c
   30.27   }
   30.28  Index: ioemu/vl.h
   30.29  ===================================================================
   30.30 ---- ioemu.orig/vl.h	2006-07-26 14:31:22.669325883 +0100
   30.31 -+++ ioemu/vl.h	2006-07-26 14:32:44.505279724 +0100
   30.32 +--- ioemu.orig/vl.h	2006-07-27 11:17:00.311184072 +0100
   30.33 ++++ ioemu/vl.h	2006-07-27 11:17:00.875121644 +0100
   30.34  @@ -301,6 +301,7 @@
   30.35   int is_graphic_console(void);
   30.36   CharDriverState *text_console_init(DisplayState *ds);
    31.1 --- a/tools/ioemu/patches/vnc-start-vncviewer	Thu Jul 27 17:44:14 2006 -0500
    31.2 +++ b/tools/ioemu/patches/vnc-start-vncviewer	Fri Jul 28 10:51:38 2006 +0100
    31.3 @@ -1,7 +1,7 @@
    31.4  Index: ioemu/vnc.c
    31.5  ===================================================================
    31.6 ---- ioemu.orig/vnc.c	2006-07-26 14:33:08.166663983 +0100
    31.7 -+++ ioemu/vnc.c	2006-07-26 14:33:08.225657462 +0100
    31.8 +--- ioemu.orig/vnc.c	2006-07-27 11:17:00.875121644 +0100
    31.9 ++++ ioemu/vnc.c	2006-07-27 11:17:01.032104266 +0100
   31.10  @@ -1002,3 +1002,25 @@
   31.11   
   31.12       vnc_dpy_resize(vs->ds, 640, 400);
   31.13 @@ -30,8 +30,8 @@ Index: ioemu/vnc.c
   31.14  +}
   31.15  Index: ioemu/vl.c
   31.16  ===================================================================
   31.17 ---- ioemu.orig/vl.c	2006-07-26 14:33:08.165664094 +0100
   31.18 -+++ ioemu/vl.c	2006-07-26 14:33:08.227657240 +0100
   31.19 +--- ioemu.orig/vl.c	2006-07-27 11:17:00.874121755 +0100
   31.20 ++++ ioemu/vl.c	2006-07-27 11:17:01.035103934 +0100
   31.21  @@ -121,6 +121,7 @@
   31.22   int bios_size;
   31.23   static DisplayState display_state;
   31.24 @@ -40,7 +40,7 @@ Index: ioemu/vl.c
   31.25   const char* keyboard_layout = NULL;
   31.26   int64_t ticks_per_sec;
   31.27   int boot_device = 'c';
   31.28 -@@ -4801,6 +4802,7 @@
   31.29 +@@ -4802,6 +4803,7 @@
   31.30   #endif
   31.31              "-loadvm file    start right away with a saved state (loadvm in monitor)\n"
   31.32   	   "-vnc display    start a VNC server on display\n"
   31.33 @@ -48,7 +48,7 @@ Index: ioemu/vl.c
   31.34              "-timeoffset     time offset (in seconds) from local time\n"
   31.35              "\n"
   31.36              "During emulation, the following keys are useful:\n"
   31.37 -@@ -4889,6 +4891,7 @@
   31.38 +@@ -4890,6 +4892,7 @@
   31.39       QEMU_OPTION_usbdevice,
   31.40       QEMU_OPTION_smp,
   31.41       QEMU_OPTION_vnc,
   31.42 @@ -56,7 +56,7 @@ Index: ioemu/vl.c
   31.43   
   31.44       QEMU_OPTION_d,
   31.45       QEMU_OPTION_vcpus,
   31.46 -@@ -4964,6 +4967,7 @@
   31.47 +@@ -4965,6 +4968,7 @@
   31.48       { "usbdevice", HAS_ARG, QEMU_OPTION_usbdevice },
   31.49       { "smp", HAS_ARG, QEMU_OPTION_smp },
   31.50       { "vnc", HAS_ARG, QEMU_OPTION_vnc },
   31.51 @@ -64,7 +64,7 @@ Index: ioemu/vl.c
   31.52       
   31.53       /* temporary options */
   31.54       { "usb", 0, QEMU_OPTION_usb },
   31.55 -@@ -5294,6 +5298,7 @@
   31.56 +@@ -5295,6 +5299,7 @@
   31.57   #endif
   31.58       snapshot = 0;
   31.59       nographic = 0;
   31.60 @@ -72,7 +72,7 @@ Index: ioemu/vl.c
   31.61       kernel_filename = NULL;
   31.62       kernel_cmdline = "";
   31.63   #ifdef TARGET_PPC
   31.64 -@@ -5663,6 +5668,9 @@
   31.65 +@@ -5664,6 +5669,9 @@
   31.66   		    exit(1);
   31.67   		}
   31.68   		break;
   31.69 @@ -82,7 +82,7 @@ Index: ioemu/vl.c
   31.70               case QEMU_OPTION_domainname:
   31.71                   strncat(domain_name, optarg, sizeof(domain_name) - 20);
   31.72                   break;
   31.73 -@@ -5910,6 +5918,8 @@
   31.74 +@@ -5911,6 +5919,8 @@
   31.75           dumb_display_init(ds);
   31.76       } else if (vnc_display != -1) {
   31.77   	vnc_display_init(ds, vnc_display);
   31.78 @@ -93,8 +93,8 @@ Index: ioemu/vl.c
   31.79           sdl_display_init(ds, full_screen);
   31.80  Index: ioemu/vl.h
   31.81  ===================================================================
   31.82 ---- ioemu.orig/vl.h	2006-07-26 14:33:08.167663873 +0100
   31.83 -+++ ioemu/vl.h	2006-07-26 14:33:08.228657130 +0100
   31.84 +--- ioemu.orig/vl.h	2006-07-27 11:17:00.875121644 +0100
   31.85 ++++ ioemu/vl.h	2006-07-27 11:17:01.036103823 +0100
   31.86  @@ -733,6 +733,7 @@
   31.87   
   31.88   /* vnc.c */
    32.1 --- a/tools/ioemu/patches/xen-domain-name	Thu Jul 27 17:44:14 2006 -0500
    32.2 +++ b/tools/ioemu/patches/xen-domain-name	Fri Jul 28 10:51:38 2006 +0100
    32.3 @@ -1,7 +1,7 @@
    32.4  Index: ioemu/sdl.c
    32.5  ===================================================================
    32.6 ---- ioemu.orig/sdl.c	2006-07-12 11:33:54.665109493 +0100
    32.7 -+++ ioemu/sdl.c	2006-07-12 11:35:01.450735012 +0100
    32.8 +--- ioemu.orig/sdl.c	2006-07-27 11:16:53.590928008 +0100
    32.9 ++++ ioemu/sdl.c	2006-07-27 11:16:58.124426148 +0100
   32.10  @@ -268,14 +268,14 @@
   32.11   static void sdl_update_caption(void)
   32.12   {
   32.13 @@ -21,8 +21,8 @@ Index: ioemu/sdl.c
   32.14   static void sdl_hide_cursor(void)
   32.15  Index: ioemu/vl.c
   32.16  ===================================================================
   32.17 ---- ioemu.orig/vl.c	2006-07-12 11:35:01.094779608 +0100
   32.18 -+++ ioemu/vl.c	2006-07-12 11:35:01.453734636 +0100
   32.19 +--- ioemu.orig/vl.c	2006-07-27 11:16:57.828458912 +0100
   32.20 ++++ ioemu/vl.c	2006-07-27 11:16:58.126425927 +0100
   32.21  @@ -159,6 +159,8 @@
   32.22   #define MAX_CPUS 1
   32.23   #endif
   32.24 @@ -32,7 +32,7 @@ Index: ioemu/vl.c
   32.25   /***********************************************************/
   32.26   /* x86 ISA bus support */
   32.27   
   32.28 -@@ -4698,6 +4700,7 @@
   32.29 +@@ -4699,6 +4701,7 @@
   32.30              "-s              wait gdb connection to port %d\n"
   32.31              "-p port         change gdb connection port\n"
   32.32              "-l item1,...    output log to %s (use -d ? for a list of log items)\n"
   32.33 @@ -40,7 +40,7 @@ Index: ioemu/vl.c
   32.34              "-hdachs c,h,s[,t]  force hard disk 0 physical geometry and the optional BIOS\n"
   32.35              "                translation (t=none or lba) (usually qemu can guess them)\n"
   32.36              "-L path         set the directory for the BIOS and VGA BIOS\n"
   32.37 -@@ -4787,6 +4790,7 @@
   32.38 +@@ -4788,6 +4791,7 @@
   32.39       QEMU_OPTION_g,
   32.40       QEMU_OPTION_std_vga,
   32.41       QEMU_OPTION_monitor,
   32.42 @@ -48,7 +48,7 @@ Index: ioemu/vl.c
   32.43       QEMU_OPTION_serial,
   32.44       QEMU_OPTION_parallel,
   32.45       QEMU_OPTION_loadvm,
   32.46 -@@ -4860,6 +4864,7 @@
   32.47 +@@ -4861,6 +4865,7 @@
   32.48       { "localtime", 0, QEMU_OPTION_localtime },
   32.49       { "std-vga", 0, QEMU_OPTION_std_vga },
   32.50       { "monitor", 1, QEMU_OPTION_monitor },
   32.51 @@ -56,7 +56,7 @@ Index: ioemu/vl.c
   32.52       { "serial", 1, QEMU_OPTION_serial },
   32.53       { "parallel", 1, QEMU_OPTION_parallel },
   32.54       { "loadvm", HAS_ARG, QEMU_OPTION_loadvm },
   32.55 -@@ -5483,6 +5488,9 @@
   32.56 +@@ -5484,6 +5489,9 @@
   32.57   		    exit(1);
   32.58   		}
   32.59   		break;
   32.60 @@ -68,8 +68,8 @@ Index: ioemu/vl.c
   32.61       }
   32.62  Index: ioemu/vl.h
   32.63  ===================================================================
   32.64 ---- ioemu.orig/vl.h	2006-07-12 11:35:00.955797021 +0100
   32.65 -+++ ioemu/vl.h	2006-07-12 11:35:01.454734511 +0100
   32.66 +--- ioemu.orig/vl.h	2006-07-27 11:16:57.682475072 +0100
   32.67 ++++ ioemu/vl.h	2006-07-27 11:16:58.127425816 +0100
   32.68  @@ -1094,4 +1094,5 @@
   32.69   
   32.70   void kqemu_record_dump(void);
    33.1 --- a/tools/ioemu/patches/xen-domid	Thu Jul 27 17:44:14 2006 -0500
    33.2 +++ b/tools/ioemu/patches/xen-domid	Fri Jul 28 10:51:38 2006 +0100
    33.3 @@ -1,7 +1,8 @@
    33.4 -diff -r 03705e837ce8 vl.c
    33.5 ---- a/vl.c	Tue May 30 14:10:44 2006 +0100
    33.6 -+++ b/vl.c	Tue May 30 14:11:16 2006 +0100
    33.7 -@@ -160,6 +160,7 @@ int vnc_display = -1;
    33.8 +Index: ioemu/vl.c
    33.9 +===================================================================
   33.10 +--- ioemu.orig/vl.c	2006-07-27 11:16:58.126425927 +0100
   33.11 ++++ ioemu/vl.c	2006-07-27 11:16:58.296407110 +0100
   33.12 +@@ -160,6 +160,7 @@
   33.13   #endif
   33.14   
   33.15   char domain_name[1024] = { 'H','V', 'M', 'X', 'E', 'N', '-'};
   33.16 @@ -9,7 +10,7 @@ diff -r 03705e837ce8 vl.c
   33.17   
   33.18   /***********************************************************/
   33.19   /* x86 ISA bus support */
   33.20 -@@ -4700,6 +4701,7 @@ void help(void)
   33.21 +@@ -4701,6 +4702,7 @@
   33.22              "-s              wait gdb connection to port %d\n"
   33.23              "-p port         change gdb connection port\n"
   33.24              "-l item1,...    output log to %s (use -d ? for a list of log items)\n"
   33.25 @@ -17,7 +18,7 @@ diff -r 03705e837ce8 vl.c
   33.26              "-domain-name    domain name that we're serving\n"
   33.27              "-hdachs c,h,s[,t]  force hard disk 0 physical geometry and the optional BIOS\n"
   33.28              "                translation (t=none or lba) (usually qemu can guess them)\n"
   33.29 -@@ -4803,6 +4805,8 @@ enum {
   33.30 +@@ -4804,6 +4806,8 @@
   33.31       QEMU_OPTION_usbdevice,
   33.32       QEMU_OPTION_smp,
   33.33       QEMU_OPTION_vnc,
   33.34 @@ -26,7 +27,7 @@ diff -r 03705e837ce8 vl.c
   33.35   };
   33.36   
   33.37   typedef struct QEMUOption {
   33.38 -@@ -4878,6 +4882,8 @@ const QEMUOption qemu_options[] = {
   33.39 +@@ -4879,6 +4883,8 @@
   33.40       /* temporary options */
   33.41       { "usb", 0, QEMU_OPTION_usb },
   33.42       { "cirrusvga", 0, QEMU_OPTION_cirrusvga },
   33.43 @@ -35,7 +36,7 @@ diff -r 03705e837ce8 vl.c
   33.44       { NULL },
   33.45   };
   33.46   
   33.47 -@@ -5491,6 +5497,10 @@ int main(int argc, char **argv)
   33.48 +@@ -5492,6 +5498,10 @@
   33.49               case QEMU_OPTION_domainname:
   33.50                   strncat(domain_name, optarg, sizeof(domain_name) - 20);
   33.51                   break;
    34.1 --- a/tools/ioemu/patches/xen-mm	Thu Jul 27 17:44:14 2006 -0500
    34.2 +++ b/tools/ioemu/patches/xen-mm	Fri Jul 28 10:51:38 2006 +0100
    34.3 @@ -1,7 +1,7 @@
    34.4  Index: ioemu/hw/pc.c
    34.5  ===================================================================
    34.6 ---- ioemu.orig/hw/pc.c	2006-07-14 15:55:59.489503600 +0100
    34.7 -+++ ioemu/hw/pc.c	2006-07-14 15:56:00.354405169 +0100
    34.8 +--- ioemu.orig/hw/pc.c	2006-07-27 11:16:57.678475515 +0100
    34.9 ++++ ioemu/hw/pc.c	2006-07-27 11:16:58.447390396 +0100
   34.10  @@ -639,7 +639,9 @@
   34.11       }
   34.12   
   34.13 @@ -25,8 +25,8 @@ Index: ioemu/hw/pc.c
   34.14       isa_bios_size = bios_size;
   34.15  Index: ioemu/vl.c
   34.16  ===================================================================
   34.17 ---- ioemu.orig/vl.c	2006-07-14 15:56:00.271414614 +0100
   34.18 -+++ ioemu/vl.c	2006-07-14 15:56:00.358404714 +0100
   34.19 +--- ioemu.orig/vl.c	2006-07-27 11:16:58.296407110 +0100
   34.20 ++++ ioemu/vl.c	2006-07-27 11:16:58.450390064 +0100
   34.21  @@ -159,6 +159,8 @@
   34.22   #define MAX_CPUS 1
   34.23   #endif
   34.24 @@ -36,7 +36,7 @@ Index: ioemu/vl.c
   34.25   char domain_name[1024] = { 'H','V', 'M', 'X', 'E', 'N', '-'};
   34.26   extern int domid;
   34.27   
   34.28 -@@ -5105,6 +5107,9 @@
   34.29 +@@ -5106,6 +5108,9 @@
   34.30       QEMUMachine *machine;
   34.31       char usb_devices[MAX_VM_USB_PORTS][128];
   34.32       int usb_devices_index;
   34.33 @@ -46,7 +46,7 @@ Index: ioemu/vl.c
   34.34   
   34.35       char qemu_dm_logfilename[64];
   34.36   
   34.37 -@@ -5341,11 +5346,13 @@
   34.38 +@@ -5342,11 +5347,13 @@
   34.39                   ram_size = atol(optarg) * 1024 * 1024;
   34.40                   if (ram_size <= 0)
   34.41                       help();
   34.42 @@ -60,7 +60,7 @@ Index: ioemu/vl.c
   34.43                   break;
   34.44               case QEMU_OPTION_l:
   34.45                   {
   34.46 -@@ -5559,6 +5566,39 @@
   34.47 +@@ -5560,6 +5567,39 @@
   34.48       /* init the memory */
   34.49       phys_ram_size = ram_size + vga_ram_size + bios_size;
   34.50   
   34.51 @@ -100,7 +100,7 @@ Index: ioemu/vl.c
   34.52   #ifdef CONFIG_SOFTMMU
   34.53       phys_ram_base = qemu_vmalloc(phys_ram_size);
   34.54       if (!phys_ram_base) {
   34.55 -@@ -5599,6 +5639,8 @@
   34.56 +@@ -5600,6 +5640,8 @@
   34.57       }
   34.58   #endif
   34.59   
    35.1 --- a/tools/ioemu/patches/xen-network	Thu Jul 27 17:44:14 2006 -0500
    35.2 +++ b/tools/ioemu/patches/xen-network	Fri Jul 28 10:51:38 2006 +0100
    35.3 @@ -1,7 +1,7 @@
    35.4  Index: ioemu/vl.c
    35.5  ===================================================================
    35.6 ---- ioemu.orig/vl.c	2006-07-12 11:35:01.753697055 +0100
    35.7 -+++ ioemu/vl.c	2006-07-12 11:35:02.126650330 +0100
    35.8 +--- ioemu.orig/vl.c	2006-07-27 11:16:58.823348777 +0100
    35.9 ++++ ioemu/vl.c	2006-07-27 11:16:59.169310479 +0100
   35.10  @@ -89,6 +89,7 @@
   35.11   #include "exec-all.h"
   35.12   
   35.13 @@ -40,7 +40,7 @@ Index: ioemu/vl.c
   35.14           int fd;
   35.15           if (get_param_value(buf, sizeof(buf), "fd", p) > 0) {
   35.16               fd = strtol(buf, NULL, 0);
   35.17 -@@ -3212,7 +3215,10 @@
   35.18 +@@ -3213,7 +3216,10 @@
   35.19               if (get_param_value(setup_script, sizeof(setup_script), "script", p) == 0) {
   35.20                   pstrcpy(setup_script, sizeof(setup_script), DEFAULT_NETWORK_SCRIPT);
   35.21               }
   35.22 @@ -52,7 +52,7 @@ Index: ioemu/vl.c
   35.23           }
   35.24       } else
   35.25   #endif
   35.26 -@@ -4671,7 +4677,7 @@
   35.27 +@@ -4672,7 +4678,7 @@
   35.28              "-net tap[,vlan=n],ifname=name\n"
   35.29              "                connect the host TAP network interface to VLAN 'n'\n"
   35.30   #else
    36.1 --- a/tools/ioemu/target-i386-dm/exec-dm.c	Thu Jul 27 17:44:14 2006 -0500
    36.2 +++ b/tools/ioemu/target-i386-dm/exec-dm.c	Fri Jul 28 10:51:38 2006 +0100
    36.3 @@ -64,6 +64,7 @@ uint8_t *code_gen_ptr;
    36.4  #endif /* !CONFIG_DM */
    36.5  
    36.6  uint64_t phys_ram_size;
    36.7 +extern uint64_t ram_size;
    36.8  int phys_ram_fd;
    36.9  uint8_t *phys_ram_base;
   36.10  uint8_t *phys_ram_dirty;
   36.11 @@ -422,7 +423,7 @@ void cpu_physical_memory_rw(target_phys_
   36.12              l = len;
   36.13  	
   36.14          pd = page;
   36.15 -        io_index = iomem_index(page);
   36.16 +        io_index = iomem_index(addr);
   36.17          if (is_write) {
   36.18              if (io_index) {
   36.19                  if (l >= 4 && ((addr & 3) == 0)) {
   36.20 @@ -467,7 +468,7 @@ void cpu_physical_memory_rw(target_phys_
   36.21                      stb_raw(buf, val);
   36.22                      l = 1;
   36.23                  }
   36.24 -            } else {
   36.25 +            } else if (addr < ram_size) {
   36.26                  /* RAM case */
   36.27                  ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
   36.28                      (addr & ~TARGET_PAGE_MASK);
   36.29 @@ -475,6 +476,9 @@ void cpu_physical_memory_rw(target_phys_
   36.30  #ifdef __ia64__
   36.31                  sync_icache((unsigned long)ptr, l);
   36.32  #endif 
   36.33 +            } else {
   36.34 +                /* unreported MMIO space */
   36.35 +                memset(buf, 0xff, len);
   36.36              }
   36.37          }
   36.38          len -= l;
    37.1 --- a/tools/ioemu/vl.c	Thu Jul 27 17:44:14 2006 -0500
    37.2 +++ b/tools/ioemu/vl.c	Fri Jul 28 10:51:38 2006 +0100
    37.3 @@ -3284,6 +3284,7 @@ int net_client_init(const char *str)
    37.4              if (net_tap_fd_init(vlan, fd))
    37.5                  ret = 0;
    37.6          } else {
    37.7 +            ifname[0] = '\0';
    37.8              get_param_value(ifname, sizeof(ifname), "ifname", p);
    37.9              if (get_param_value(setup_script, sizeof(setup_script), "script", p) == 0) {
   37.10                  pstrcpy(setup_script, sizeof(setup_script), DEFAULT_NETWORK_SCRIPT);
    38.1 --- a/tools/libxc/Makefile	Thu Jul 27 17:44:14 2006 -0500
    38.2 +++ b/tools/libxc/Makefile	Fri Jul 28 10:51:38 2006 +0100
    38.3 @@ -31,10 +31,13 @@ GUEST_SRCS-y += xc_load_elf.c
    38.4  GUEST_SRCS-y += xg_private.c
    38.5  GUEST_SRCS-$(CONFIG_POWERPC) += xc_ppc_linux_build.c
    38.6  GUEST_SRCS-$(CONFIG_X86) += xc_linux_build.c
    38.7 -GUEST_SRCS-$(CONFIG_IA64) += xc_ia64_stubs.c xc_linux_build.c
    38.8 +GUEST_SRCS-$(CONFIG_IA64) += xc_linux_build.c
    38.9  GUEST_SRCS-$(CONFIG_MIGRATE) += xc_linux_restore.c xc_linux_save.c
   38.10  GUEST_SRCS-$(CONFIG_HVM) += xc_hvm_build.c
   38.11  
   38.12 +# This Makefile only adds files if CONFIG_IA64 is y.
   38.13 +include ia64/Makefile
   38.14 +
   38.15  CFLAGS   += -Werror
   38.16  CFLAGS   += -fno-strict-aliasing
   38.17  CFLAGS   += $(INCLUDES) -I.
   38.18 @@ -99,6 +102,7 @@ TAGS:
   38.19  .PHONY: clean
   38.20  clean:
   38.21  	rm -rf *.a *.so* *.o *.opic *.rpm $(LIB) *~ $(DEPS) xen
   38.22 +	rm -rf ia64/*.o ia64/*.opic
   38.23  
   38.24  .PHONY: rpm
   38.25  rpm: build
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/tools/libxc/ia64/Makefile	Fri Jul 28 10:51:38 2006 +0100
    39.3 @@ -0,0 +1,5 @@
    39.4 +CTRL_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_stubs.c
    39.5 +
    39.6 +GUEST_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_hvm_build.c
    39.7 +GUEST_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_linux_save.c
    39.8 +GUEST_SRCS-$(CONFIG_IA64) += ia64/xc_ia64_linux_restore.c
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/tools/libxc/ia64/xc_ia64_hvm_build.c	Fri Jul 28 10:51:38 2006 +0100
    40.3 @@ -0,0 +1,673 @@
    40.4 +#include "xg_private.h"
    40.5 +#include "xenguest.h"
    40.6 +#include "xc_private.h"
    40.7 +#include "xc_elf.h"
    40.8 +#include <stdlib.h>
    40.9 +#include <zlib.h>
   40.10 +#include "xen/arch-ia64.h"
   40.11 +#include <xen/hvm/ioreq.h>
   40.12 +
   40.13 +static int
   40.14 +xc_ia64_copy_to_domain_pages(int xc_handle, uint32_t domid, void* src_page,
   40.15 +                             unsigned long dst_pfn, int nr_pages)
   40.16 +{
   40.17 +    // N.B. gva should be page aligned
   40.18 +
   40.19 +    xen_pfn_t *page_array = NULL;
   40.20 +    int i;
   40.21 +
   40.22 +    page_array = malloc(nr_pages * sizeof(xen_pfn_t));
   40.23 +    if (page_array == NULL) {
   40.24 +        PERROR("Could not allocate memory");
   40.25 +        goto error_out;
   40.26 +    }
   40.27 +    if (xc_ia64_get_pfn_list(xc_handle, domid, page_array,
   40.28 +                             dst_pfn, nr_pages) != nr_pages) {
   40.29 +        PERROR("Could not get the page frame list");
   40.30 +        goto error_out;
   40.31 +    }
   40.32 +
   40.33 +    for (i = 0; i < nr_pages; i++) {
   40.34 +        if (xc_copy_to_domain_page(xc_handle, domid, page_array[i],
   40.35 +                                   src_page + (i << PAGE_SHIFT)))
   40.36 +            goto error_out;
   40.37 +    }
   40.38 +    free(page_array);
   40.39 +    return 0;
   40.40 +
   40.41 +error_out:
   40.42 +    free(page_array);
   40.43 +    return -1;
   40.44 +}
   40.45 +
   40.46 +
   40.47 +#define HOB_SIGNATURE         0x3436474953424f48        // "HOBSIG64"
   40.48 +#define GFW_HOB_START         ((4UL<<30)-(14UL<<20))    // 4G - 14M
   40.49 +#define GFW_HOB_SIZE          (1UL<<20)                 // 1M
   40.50 +
   40.51 +typedef struct {
   40.52 +    unsigned long signature;
   40.53 +    unsigned int  type;
   40.54 +    unsigned int  length;
   40.55 +} HOB_GENERIC_HEADER;
   40.56 +
   40.57 +/*
   40.58 + * INFO HOB is the first data data in one HOB list
   40.59 + * it contains the control information of the HOB list
   40.60 + */
   40.61 +typedef struct {
   40.62 +    HOB_GENERIC_HEADER  header;
   40.63 +    unsigned long       length;    // current length of hob
   40.64 +    unsigned long       cur_pos;   // current poisiton of hob
   40.65 +    unsigned long       buf_size;  // size of hob buffer
   40.66 +} HOB_INFO;
   40.67 +
   40.68 +typedef struct{
   40.69 +    unsigned long start;
   40.70 +    unsigned long size;
   40.71 +} hob_mem_t;
   40.72 +
   40.73 +typedef enum {
   40.74 +    HOB_TYPE_INFO=0,
   40.75 +    HOB_TYPE_TERMINAL,
   40.76 +    HOB_TYPE_MEM,
   40.77 +    HOB_TYPE_PAL_BUS_GET_FEATURES_DATA,
   40.78 +    HOB_TYPE_PAL_CACHE_SUMMARY,
   40.79 +    HOB_TYPE_PAL_MEM_ATTRIB,
   40.80 +    HOB_TYPE_PAL_CACHE_INFO,
   40.81 +    HOB_TYPE_PAL_CACHE_PROT_INFO,
   40.82 +    HOB_TYPE_PAL_DEBUG_INFO,
   40.83 +    HOB_TYPE_PAL_FIXED_ADDR,
   40.84 +    HOB_TYPE_PAL_FREQ_BASE,
   40.85 +    HOB_TYPE_PAL_FREQ_RATIOS,
   40.86 +    HOB_TYPE_PAL_HALT_INFO,
   40.87 +    HOB_TYPE_PAL_PERF_MON_INFO,
   40.88 +    HOB_TYPE_PAL_PROC_GET_FEATURES,
   40.89 +    HOB_TYPE_PAL_PTCE_INFO,
   40.90 +    HOB_TYPE_PAL_REGISTER_INFO,
   40.91 +    HOB_TYPE_PAL_RSE_INFO,
   40.92 +    HOB_TYPE_PAL_TEST_INFO,
   40.93 +    HOB_TYPE_PAL_VM_SUMMARY,
   40.94 +    HOB_TYPE_PAL_VM_INFO,
   40.95 +    HOB_TYPE_PAL_VM_PAGE_SIZE,
   40.96 +    HOB_TYPE_NR_VCPU,
   40.97 +    HOB_TYPE_MAX
   40.98 +} hob_type_t;
   40.99 +
  40.100 +static int hob_init(void  *buffer ,unsigned long buf_size);
  40.101 +static int add_pal_hob(void* hob_buf);
  40.102 +static int add_mem_hob(void* hob_buf, unsigned long dom_mem_size);
  40.103 +static int add_vcpus_hob(void* hob_buf, unsigned long nr_vcpu);
  40.104 +static int build_hob(void* hob_buf, unsigned long hob_buf_size,
  40.105 +                     unsigned long dom_mem_size, unsigned long vcpus);
  40.106 +static int load_hob(int xc_handle,uint32_t dom, void *hob_buf,
  40.107 +                    unsigned long dom_mem_size);
  40.108 +
  40.109 +static int
  40.110 +xc_ia64_build_hob(int xc_handle, uint32_t dom,
  40.111 +                  unsigned long memsize, unsigned long vcpus)
  40.112 +{
  40.113 +    char   *hob_buf;
  40.114 +
  40.115 +    hob_buf = malloc(GFW_HOB_SIZE);
  40.116 +    if (hob_buf == NULL) {
  40.117 +        PERROR("Could not allocate hob");
  40.118 +        return -1;
  40.119 +    }
  40.120 +
  40.121 +    if (build_hob(hob_buf, GFW_HOB_SIZE, memsize, vcpus) < 0) {
  40.122 +        free(hob_buf);
  40.123 +        PERROR("Could not build hob");
  40.124 +        return -1;
  40.125 +    }
  40.126 +
  40.127 +    if (load_hob(xc_handle, dom, hob_buf, memsize) < 0) {
  40.128 +        free(hob_buf);
  40.129 +        PERROR("Could not load hob");
  40.130 +        return -1;
  40.131 +    }
  40.132 +    free(hob_buf);
  40.133 +    return 0;
  40.134 +
  40.135 +}
  40.136 +
  40.137 +static int
  40.138 +hob_init(void *buffer, unsigned long buf_size)
  40.139 +{
  40.140 +    HOB_INFO *phit;
  40.141 +    HOB_GENERIC_HEADER *terminal;
  40.142 +
  40.143 +    if (sizeof(HOB_INFO) + sizeof(HOB_GENERIC_HEADER) > buf_size) {
  40.144 +        // buffer too small
  40.145 +        return -1;
  40.146 +    }
  40.147 +
  40.148 +    phit = (HOB_INFO*)buffer;
  40.149 +    phit->header.signature = HOB_SIGNATURE;
  40.150 +    phit->header.type = HOB_TYPE_INFO;
  40.151 +    phit->header.length = sizeof(HOB_INFO);
  40.152 +    phit->length = sizeof(HOB_INFO) + sizeof(HOB_GENERIC_HEADER);
  40.153 +    phit->cur_pos = 0;
  40.154 +    phit->buf_size = buf_size;
  40.155 +
  40.156 +    terminal = (HOB_GENERIC_HEADER*)(buffer + sizeof(HOB_INFO));
  40.157 +    terminal->signature = HOB_SIGNATURE;
  40.158 +    terminal->type = HOB_TYPE_TERMINAL;
  40.159 +    terminal->length = sizeof(HOB_GENERIC_HEADER);
  40.160 +
  40.161 +    return 0;
  40.162 +}
  40.163 +
  40.164 +/*
  40.165 + *  Add a new HOB to the HOB List.
  40.166 + *
  40.167 + *  hob_start  -  start address of hob buffer
  40.168 + *  type       -  type of the hob to be added
  40.169 + *  data       -  data of the hob to be added
  40.170 + *  data_size  -  size of the data
  40.171 + */
  40.172 +static int
  40.173 +hob_add(void* hob_start, int type, void* data, int data_size)
  40.174 +{
  40.175 +    HOB_INFO *phit;
  40.176 +    HOB_GENERIC_HEADER *newhob, *tail;
  40.177 +
  40.178 +    phit = (HOB_INFO*)hob_start;
  40.179 +
  40.180 +    if (phit->length + data_size > phit->buf_size) {
  40.181 +        // no space for new hob
  40.182 +        return -1;
  40.183 +    }
  40.184 +
  40.185 +    //append new HOB
  40.186 +    newhob = (HOB_GENERIC_HEADER*)(hob_start + phit->length -
  40.187 +                                   sizeof(HOB_GENERIC_HEADER));
  40.188 +    newhob->signature = HOB_SIGNATURE;
  40.189 +    newhob->type = type;
  40.190 +    newhob->length = data_size + sizeof(HOB_GENERIC_HEADER);
  40.191 +    memcpy((void*)newhob + sizeof(HOB_GENERIC_HEADER), data, data_size);
  40.192 +
  40.193 +    // append terminal HOB
  40.194 +    tail = (HOB_GENERIC_HEADER*)(hob_start + phit->length + data_size);
  40.195 +    tail->signature = HOB_SIGNATURE;
  40.196 +    tail->type = HOB_TYPE_TERMINAL;
  40.197 +    tail->length = sizeof(HOB_GENERIC_HEADER);
  40.198 +
  40.199 +    // adjust HOB list length
  40.200 +    phit->length += sizeof(HOB_GENERIC_HEADER) + data_size;
  40.201 +
  40.202 +    return 0;
  40.203 +}
  40.204 +
  40.205 +static int
  40.206 +get_hob_size(void* hob_buf)
  40.207 +{
  40.208 +    HOB_INFO *phit = (HOB_INFO*)hob_buf;
  40.209 +
  40.210 +    if (phit->header.signature != HOB_SIGNATURE) {
  40.211 +        PERROR("xc_get_hob_size:Incorrect signature");
  40.212 +        return -1;
  40.213 +    }
  40.214 +    return phit->length;
  40.215 +}
  40.216 +
  40.217 +static int
  40.218 +build_hob(void* hob_buf, unsigned long hob_buf_size,
  40.219 +          unsigned long dom_mem_size, unsigned long vcpus)
  40.220 +{
  40.221 +    //Init HOB List
  40.222 +    if (hob_init(hob_buf, hob_buf_size) < 0) {
  40.223 +        PERROR("buffer too small");
  40.224 +        goto err_out;
  40.225 +    }
  40.226 +
  40.227 +    if (add_mem_hob(hob_buf,dom_mem_size) < 0) {
  40.228 +        PERROR("Add memory hob failed, buffer too small");
  40.229 +        goto err_out;
  40.230 +    }
  40.231 +
  40.232 +    if (add_vcpus_hob(hob_buf, vcpus) < 0) {
  40.233 +        PERROR("Add NR_VCPU hob failed, buffer too small");
  40.234 +        goto err_out;
  40.235 +    }
  40.236 +
  40.237 +    if (add_pal_hob( hob_buf ) < 0) {
  40.238 +        PERROR("Add PAL hob failed, buffer too small");
  40.239 +        goto err_out;
  40.240 +    }
  40.241 +
  40.242 +    return 0;
  40.243 +
  40.244 +err_out:
  40.245 +    return -1;
  40.246 +}
  40.247 +
  40.248 +static int
  40.249 +load_hob(int xc_handle, uint32_t dom, void *hob_buf,
  40.250 +         unsigned long dom_mem_size)
  40.251 +{
  40.252 +    // hob_buf should be page aligned
  40.253 +    int hob_size;
  40.254 +    int nr_pages;
  40.255 +
  40.256 +    hob_size = get_hob_size(hob_buf);
  40.257 +    if (hob_size < 0) {
  40.258 +        PERROR("Invalid hob data");
  40.259 +        return -1;
  40.260 +    }
  40.261 +
  40.262 +    if (hob_size > GFW_HOB_SIZE) {
  40.263 +        PERROR("No enough memory for hob data");
  40.264 +        return -1;
  40.265 +    }
  40.266 +
  40.267 +    nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT;
  40.268 +
  40.269 +    return xc_ia64_copy_to_domain_pages(xc_handle, dom, hob_buf,
  40.270 +                                        GFW_HOB_START >> PAGE_SHIFT, nr_pages);
  40.271 +}
  40.272 +
  40.273 +#define MIN(x, y) ((x) < (y)) ? (x) : (y)
  40.274 +static int
  40.275 +add_mem_hob(void* hob_buf, unsigned long dom_mem_size)
  40.276 +{
  40.277 +    hob_mem_t memhob;
  40.278 +
  40.279 +    // less than 3G
  40.280 +    memhob.start = 0;
  40.281 +    memhob.size = MIN(dom_mem_size, 0xC0000000);
  40.282 +
  40.283 +    if (hob_add(hob_buf, HOB_TYPE_MEM, &memhob, sizeof(memhob)) < 0)
  40.284 +        return -1;
  40.285 +
  40.286 +    if (dom_mem_size > 0xC0000000) {
  40.287 +        // 4G ~ 4G+remain
  40.288 +        memhob.start = 0x100000000; //4G
  40.289 +        memhob.size = dom_mem_size - 0xC0000000;
  40.290 +        if (hob_add(hob_buf, HOB_TYPE_MEM, &memhob, sizeof(memhob)) < 0)
  40.291 +            return -1;
  40.292 +    }
  40.293 +    return 0;
  40.294 +}
  40.295 +
  40.296 +static int 
  40.297 +add_vcpus_hob(void* hob_buf, unsigned long vcpus)
  40.298 +{
  40.299 +    return hob_add(hob_buf, HOB_TYPE_NR_VCPU, &vcpus, sizeof(vcpus));
  40.300 +}
  40.301 +
  40.302 +static const unsigned char config_pal_bus_get_features_data[24] = {
  40.303 +    0, 0, 0, 32, 0, 0, 240, 189, 0, 0, 0, 0, 0, 0,
  40.304 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  40.305 +};
  40.306 +
  40.307 +static const unsigned char config_pal_cache_summary[16] = {
  40.308 +    3, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0
  40.309 +};
  40.310 +
  40.311 +static const unsigned char config_pal_mem_attrib[8] = {
  40.312 +    241, 0, 0, 0, 0, 0, 0, 0
  40.313 +};
  40.314 +
  40.315 +static const unsigned char config_pal_cache_info[152] = {
  40.316 +    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.317 +    6, 4, 6, 7, 255, 1, 0, 1, 0, 64, 0, 0, 12, 12,
  40.318 +    49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 7, 0, 1,
  40.319 +    0, 1, 0, 64, 0, 0, 12, 12, 49, 0, 0, 0, 0, 0, 0,
  40.320 +    0, 0, 0, 6, 8, 7, 7, 255, 7, 0, 11, 0, 0, 16, 0,
  40.321 +    12, 17, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7,
  40.322 +    7, 7, 5, 9, 11, 0, 0, 4, 0, 12, 15, 49, 0, 254, 255,
  40.323 +    255, 255, 255, 255, 255, 255, 2, 8, 7, 7, 7, 5, 9,
  40.324 +    11, 0, 0, 4, 0, 12, 15, 49, 0, 0, 0, 0, 0, 0, 0, 0,
  40.325 +    0, 3, 12, 7, 7, 7, 14, 1, 3, 0, 0, 192, 0, 12, 20, 49, 0
  40.326 +};
  40.327 +
  40.328 +static const unsigned char config_pal_cache_prot_info[200] = {
  40.329 +    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.330 +    45, 0, 16, 8, 0, 76, 12, 64, 0, 0, 0, 0, 0, 0, 0,
  40.331 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.332 +    8, 0, 16, 4, 0, 76, 44, 68, 0, 0, 0, 0, 0, 0, 0, 0,
  40.333 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32,
  40.334 +    0, 16, 8, 0, 81, 44, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.335 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0,
  40.336 +    112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.337 +    0, 0, 0, 0, 0, 0, 254, 255, 255, 255, 255, 255, 255, 255,
  40.338 +    32, 0, 112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.339 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 160,
  40.340 +    12, 0, 84, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.341 +    0, 0, 0
  40.342 +};
  40.343 +
  40.344 +static const unsigned char config_pal_debug_info[16] = {
  40.345 +    2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
  40.346 +};
  40.347 +
  40.348 +static const unsigned char config_pal_fixed_addr[8] = {
  40.349 +    0, 0, 0, 0, 0, 0, 0, 0
  40.350 +};
  40.351 +
  40.352 +static const unsigned char config_pal_freq_base[8] = {
  40.353 +    109, 219, 182, 13, 0, 0, 0, 0
  40.354 +};
  40.355 +
  40.356 +static const unsigned char config_pal_freq_ratios[24] = {
  40.357 +    11, 1, 0, 0, 77, 7, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 4,
  40.358 +    0, 0, 0, 7, 0, 0, 0
  40.359 +};
  40.360 +
  40.361 +static const unsigned char config_pal_halt_info[64] = {
  40.362 +    0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0,
  40.363 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.364 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.365 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  40.366 +};
  40.367 +
  40.368 +static const unsigned char config_pal_perf_mon_info[136] = {
  40.369 +    12, 47, 18, 8, 0, 0, 0, 0, 241, 255, 0, 0, 255, 7, 0, 0,
  40.370 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.371 +    0, 0, 0, 0, 0, 0, 0, 0, 241, 255, 0, 0, 223, 0, 255, 255,
  40.372 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.373 +    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
  40.374 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.375 +    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
  40.376 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.377 +    0, 0, 0, 0, 0, 0, 0, 0
  40.378 +};
  40.379 +
  40.380 +static const unsigned char config_pal_proc_get_features[104] = {
  40.381 +    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.382 +    0, 0, 0, 0, 64, 6, 64, 49, 0, 0, 0, 0, 64, 6, 0, 0,
  40.383 +    0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0,
  40.384 +    231, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0,
  40.385 +    0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0,
  40.386 +    63, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0,
  40.387 +    0, 0, 0, 0, 0, 0, 0, 0
  40.388 +};
  40.389 +
  40.390 +static const unsigned char config_pal_ptce_info[24] = {
  40.391 +    0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
  40.392 +    0, 0, 0, 0, 0, 0, 0, 0
  40.393 +};
  40.394 +
  40.395 +static const unsigned char config_pal_register_info[64] = {
  40.396 +    255, 0, 47, 127, 17, 17, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0,
  40.397 +    255, 208, 128, 238, 238, 0, 0, 248, 255, 255, 255, 255, 255, 0, 0, 7, 3,
  40.398 +    251, 3, 0, 0, 0, 0, 255, 7, 3, 0, 0, 0, 0, 0, 248, 252, 4,
  40.399 +    252, 255, 255, 255, 255, 2, 248, 252, 255, 255, 255, 255, 255
  40.400 +};
  40.401 +
  40.402 +static const unsigned char config_pal_rse_info[16] = {
  40.403 +    96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  40.404 +};
  40.405 +
  40.406 +static const unsigned char config_pal_test_info[48] = {
  40.407 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.408 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.409 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  40.410 +};
  40.411 +
  40.412 +static const unsigned char config_pal_vm_summary[16] = {
  40.413 +    101, 18, 15, 2, 7, 7, 4, 2, 59, 18, 0, 0, 0, 0, 0, 0
  40.414 +};
  40.415 +
  40.416 +static const unsigned char config_pal_vm_info[104] = {
  40.417 +    2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
  40.418 +    32, 32, 0, 0, 0, 0, 0, 0, 112, 85, 21, 0, 0, 0, 0, 0, 0,
  40.419 +    0, 0, 0, 0, 0, 0, 1, 32, 32, 0, 0, 0, 0, 0, 0, 112, 85,
  40.420 +    21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 128, 128, 0,
  40.421 +    4, 0, 0, 0, 0, 112, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40.422 +    0, 0, 0, 1, 128, 128, 0, 4, 0, 0, 0, 0, 112, 85, 0, 0, 0, 0, 0
  40.423 +};
  40.424 +
  40.425 +static const unsigned char config_pal_vm_page_size[16] = {
  40.426 +    0, 112, 85, 21, 0, 0, 0, 0, 0, 112, 85, 21, 0, 0, 0, 0
  40.427 +};
  40.428 +
  40.429 +typedef struct{
  40.430 +    hob_type_t type;
  40.431 +    void* data;
  40.432 +    unsigned long size;
  40.433 +} hob_batch_t;
  40.434 +
  40.435 +static const hob_batch_t hob_batch[]={
  40.436 +    {   HOB_TYPE_PAL_BUS_GET_FEATURES_DATA,
  40.437 +        &config_pal_bus_get_features_data,
  40.438 +        sizeof(config_pal_bus_get_features_data)
  40.439 +    },
  40.440 +    {   HOB_TYPE_PAL_CACHE_SUMMARY,
  40.441 +        &config_pal_cache_summary,
  40.442 +        sizeof(config_pal_cache_summary)
  40.443 +    },
  40.444 +    {   HOB_TYPE_PAL_MEM_ATTRIB,
  40.445 +        &config_pal_mem_attrib,
  40.446 +        sizeof(config_pal_mem_attrib)
  40.447 +    },
  40.448 +    {   HOB_TYPE_PAL_CACHE_INFO,
  40.449 +        &config_pal_cache_info,
  40.450 +        sizeof(config_pal_cache_info)
  40.451 +    },
  40.452 +    {   HOB_TYPE_PAL_CACHE_PROT_INFO,
  40.453 +        &config_pal_cache_prot_info,
  40.454 +        sizeof(config_pal_cache_prot_info)
  40.455 +    },
  40.456 +    {   HOB_TYPE_PAL_DEBUG_INFO,
  40.457 +        &config_pal_debug_info,
  40.458 +        sizeof(config_pal_debug_info)
  40.459 +    },
  40.460 +    {   HOB_TYPE_PAL_FIXED_ADDR,
  40.461 +        &config_pal_fixed_addr,
  40.462 +        sizeof(config_pal_fixed_addr)
  40.463 +    },
  40.464 +    {   HOB_TYPE_PAL_FREQ_BASE,
  40.465 +        &config_pal_freq_base,
  40.466 +        sizeof(config_pal_freq_base)
  40.467 +    },
  40.468 +    {   HOB_TYPE_PAL_FREQ_RATIOS,
  40.469 +        &config_pal_freq_ratios,
  40.470 +        sizeof(config_pal_freq_ratios)
  40.471 +    },
  40.472 +    {   HOB_TYPE_PAL_HALT_INFO,
  40.473 +        &config_pal_halt_info,
  40.474 +        sizeof(config_pal_halt_info)
  40.475 +    },
  40.476 +    {   HOB_TYPE_PAL_PERF_MON_INFO,
  40.477 +        &config_pal_perf_mon_info,
  40.478 +        sizeof(config_pal_perf_mon_info)
  40.479 +    },
  40.480 +    {   HOB_TYPE_PAL_PROC_GET_FEATURES,
  40.481 +        &config_pal_proc_get_features,
  40.482 +        sizeof(config_pal_proc_get_features)
  40.483 +    },
  40.484 +    {   HOB_TYPE_PAL_PTCE_INFO,
  40.485 +        &config_pal_ptce_info,
  40.486 +        sizeof(config_pal_ptce_info)
  40.487 +    },
  40.488 +    {   HOB_TYPE_PAL_REGISTER_INFO,
  40.489 +        &config_pal_register_info,
  40.490 +        sizeof(config_pal_register_info)
  40.491 +    },
  40.492 +    {   HOB_TYPE_PAL_RSE_INFO,
  40.493 +        &config_pal_rse_info,
  40.494 +        sizeof(config_pal_rse_info)
  40.495 +    },
  40.496 +    {   HOB_TYPE_PAL_TEST_INFO,
  40.497 +        &config_pal_test_info,
  40.498 +        sizeof(config_pal_test_info)
  40.499 +    },
  40.500 +    {   HOB_TYPE_PAL_VM_SUMMARY,
  40.501 +        &config_pal_vm_summary,
  40.502 +        sizeof(config_pal_vm_summary)
  40.503 +    },
  40.504 +    {   HOB_TYPE_PAL_VM_INFO,
  40.505 +        &config_pal_vm_info,
  40.506 +        sizeof(config_pal_vm_info)
  40.507 +    },
  40.508 +    {   HOB_TYPE_PAL_VM_PAGE_SIZE,
  40.509 +        &config_pal_vm_page_size,
  40.510 +        sizeof(config_pal_vm_page_size)
  40.511 +    },
  40.512 +};
  40.513 +
  40.514 +static int
  40.515 +add_pal_hob(void* hob_buf)
  40.516 +{
  40.517 +    int i;
  40.518 +    for (i = 0; i < sizeof(hob_batch)/sizeof(hob_batch_t); i++) {
  40.519 +        if (hob_add(hob_buf, hob_batch[i].type, hob_batch[i].data,
  40.520 +                    hob_batch[i].size) < 0)
  40.521 +            return -1;
  40.522 +    }
  40.523 +    return 0;
  40.524 +}
  40.525 +
  40.526 +static int
  40.527 +setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
  40.528 +            char *image, unsigned long image_size, uint32_t vcpus,
  40.529 +            unsigned int store_evtchn, unsigned long *store_mfn)
  40.530 +{
  40.531 +    unsigned long page_array[2];
  40.532 +    shared_iopage_t *sp;
  40.533 +    int i;
  40.534 +    unsigned long dom_memsize = (memsize << 20);
  40.535 +    DECLARE_DOM0_OP;
  40.536 +
  40.537 +    if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
  40.538 +        PERROR("Guest firmware size is incorrect [%ld]?", image_size);
  40.539 +        return -1;
  40.540 +    }
  40.541 +
  40.542 +    /* This will creates the physmap.  */
  40.543 +    op.u.domain_setup.flags = XEN_DOMAINSETUP_hvm_guest;
  40.544 +    op.u.domain_setup.domain = (domid_t)dom;
  40.545 +    op.u.domain_setup.bp = 0;
  40.546 +    op.u.domain_setup.maxmem = 0;
  40.547 +    
  40.548 +    op.cmd = DOM0_DOMAIN_SETUP;
  40.549 +    if (xc_dom0_op(xc_handle, &op))
  40.550 +        goto error_out;
  40.551 +
  40.552 +    /* Load guest firmware */
  40.553 +    if (xc_ia64_copy_to_domain_pages(xc_handle, dom, image,
  40.554 +                            (GFW_START + GFW_SIZE - image_size) >> PAGE_SHIFT,
  40.555 +                            image_size >> PAGE_SHIFT)) {
  40.556 +        PERROR("Could not load guest firmware into domain");
  40.557 +        goto error_out;
  40.558 +    }
  40.559 +
  40.560 +    /* Hand-off state passed to guest firmware */
  40.561 +    if (xc_ia64_build_hob(xc_handle, dom, dom_memsize,
  40.562 +                          (unsigned long)vcpus) < 0) {
  40.563 +        PERROR("Could not build hob\n");
  40.564 +        goto error_out;
  40.565 +    }
  40.566 +
  40.567 +    /* Retrieve special pages like io, xenstore, etc. */
  40.568 +    if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
  40.569 +                             IO_PAGE_START>>PAGE_SHIFT, 2) != 2) {
  40.570 +        PERROR("Could not get the page frame list");
  40.571 +        goto error_out;
  40.572 +    }
  40.573 +
  40.574 +    *store_mfn = page_array[1];
  40.575 +    sp = (shared_iopage_t *)xc_map_foreign_range(xc_handle, dom,
  40.576 +                               PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
  40.577 +    if (sp == 0)
  40.578 +        goto error_out;
  40.579 +
  40.580 +    memset(sp, 0, PAGE_SIZE);
  40.581 +
  40.582 +    for (i = 0; i < vcpus; i++) {
  40.583 +        uint32_t vp_eport;
  40.584 +
  40.585 +        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
  40.586 +        if (vp_eport < 0) {
  40.587 +            DPRINTF("Couldn't get unbound port from VMX guest.\n");
  40.588 +            goto error_out;
  40.589 +        }
  40.590 +        sp->vcpu_iodata[i].vp_eport = vp_eport;
  40.591 +    }
  40.592 +
  40.593 +    munmap(sp, PAGE_SIZE);
  40.594 +
  40.595 +    return 0;
  40.596 +
  40.597 +error_out:
  40.598 +    return -1;
  40.599 +}
  40.600 +
  40.601 +int
  40.602 +xc_hvm_build(int xc_handle, uint32_t domid, int memsize,
  40.603 +             const char *image_name, unsigned int vcpus, unsigned int pae,
  40.604 +             unsigned int acpi, unsigned int apic, unsigned int store_evtchn,
  40.605 +             unsigned long *store_mfn)
  40.606 +{
  40.607 +    dom0_op_t launch_op, op;
  40.608 +    int rc;
  40.609 +    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
  40.610 +    char *image = NULL;
  40.611 +    unsigned long image_size;
  40.612 +    unsigned long nr_pages;
  40.613 +
  40.614 +    nr_pages = xc_get_max_pages(xc_handle, domid);
  40.615 +    if (nr_pages < 0) {
  40.616 +        PERROR("Could not find total pages for domain");
  40.617 +        goto error_out;
  40.618 +    }
  40.619 +
  40.620 +    image = xc_read_image(image_name, &image_size);
  40.621 +    if (image == NULL) {
  40.622 +        PERROR("Could not read guest firmware image %s", image_name);
  40.623 +        goto error_out;
  40.624 +    }
  40.625 +
  40.626 +    image_size = (image_size + PAGE_SIZE - 1) & PAGE_MASK;
  40.627 +
  40.628 +    if (mlock(&st_ctxt, sizeof(st_ctxt))) {
  40.629 +        PERROR("Unable to mlock ctxt");
  40.630 +        return 1;
  40.631 +    }
  40.632 +
  40.633 +    op.cmd = DOM0_GETDOMAININFO;
  40.634 +    op.u.getdomaininfo.domain = (domid_t)domid;
  40.635 +    if (do_dom0_op(xc_handle, &op) < 0 ||
  40.636 +        (uint16_t)op.u.getdomaininfo.domain != domid) {
  40.637 +        PERROR("Could not get info on domain");
  40.638 +        goto error_out;
  40.639 +    }
  40.640 +
  40.641 +    memset(ctxt, 0, sizeof(*ctxt));
  40.642 +
  40.643 +    if (setup_guest(xc_handle, domid, (unsigned long)memsize, image,
  40.644 +                    image_size, vcpus, store_evtchn, store_mfn) < 0) {
  40.645 +        ERROR("Error constructing guest OS");
  40.646 +        goto error_out;
  40.647 +    }
  40.648 +
  40.649 +    free(image);
  40.650 +
  40.651 +    ctxt->user_regs.cr_iip = 0x80000000ffffffb0UL;
  40.652 +
  40.653 +    memset(&launch_op, 0, sizeof(launch_op));
  40.654 +
  40.655 +    launch_op.u.setvcpucontext.domain = (domid_t)domid;
  40.656 +    launch_op.u.setvcpucontext.vcpu = 0;
  40.657 +    set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
  40.658 +
  40.659 +    launch_op.cmd = DOM0_SETVCPUCONTEXT;
  40.660 +    rc = do_dom0_op(xc_handle, &launch_op);
  40.661 +    return rc;
  40.662 +
  40.663 +error_out:
  40.664 +    free(image);
  40.665 +    return -1;
  40.666 +}
  40.667 +
  40.668 +/*
  40.669 + * Local variables:
  40.670 + * mode: C
  40.671 + * c-set-style: "BSD"
  40.672 + * c-basic-offset: 4
  40.673 + * tab-width: 4
  40.674 + * indent-tabs-mode: nil
  40.675 + * End:
  40.676 + */
    41.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    41.2 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c	Fri Jul 28 10:51:38 2006 +0100
    41.3 @@ -0,0 +1,320 @@
    41.4 +/******************************************************************************
    41.5 + * xc_ia64_linux_restore.c
    41.6 + *
    41.7 + * Restore the state of a Linux session.
    41.8 + *
    41.9 + * Copyright (c) 2003, K A Fraser.
   41.10 + *  Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
   41.11 + */
   41.12 +
   41.13 +#include <stdlib.h>
   41.14 +#include <unistd.h>
   41.15 +
   41.16 +#include "xg_private.h"
   41.17 +
   41.18 +#define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))
   41.19 +
   41.20 +/* total number of pages used by the current guest */
   41.21 +static unsigned long max_pfn;
   41.22 +
   41.23 +static ssize_t
   41.24 +read_exact(int fd, void *buf, size_t count)
   41.25 +{
   41.26 +    int r = 0, s;
   41.27 +    unsigned char *b = buf;
   41.28 +
   41.29 +    while (r < count) {
   41.30 +        s = read(fd, &b[r], count - r);
   41.31 +        if ((s == -1) && (errno == EINTR))
   41.32 +            continue;
   41.33 +        if (s <= 0) {
   41.34 +            break;
   41.35 +        }
   41.36 +        r += s;
   41.37 +    }
   41.38 +
   41.39 +    return (r == count) ? 1 : 0;
   41.40 +}
   41.41 +
   41.42 +static int
   41.43 +read_page(int xc_handle, int io_fd, uint32_t dom, unsigned long pfn)
   41.44 +{
   41.45 +    void *mem;
   41.46 +
   41.47 +    mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   41.48 +                               PROT_READ|PROT_WRITE, pfn);
   41.49 +    if (mem == NULL) {
   41.50 +            ERR("cannot map page");
   41.51 +	    return -1;
   41.52 +    }
   41.53 +    if (!read_exact(io_fd, mem, PAGE_SIZE)) {
   41.54 +            ERR("Error when reading from state file (5)");
   41.55 +            return -1;
   41.56 +    }
   41.57 +    munmap(mem, PAGE_SIZE);
   41.58 +    return 0;
   41.59 +}
   41.60 +
   41.61 +int
   41.62 +xc_linux_restore(int xc_handle, int io_fd, uint32_t dom,
   41.63 +                 unsigned long nr_pfns, unsigned int store_evtchn,
   41.64 +                 unsigned long *store_mfn, unsigned int console_evtchn,
   41.65 +                 unsigned long *console_mfn)
   41.66 +{
   41.67 +    DECLARE_DOM0_OP;
   41.68 +    int rc = 1, i;
   41.69 +    unsigned long mfn, pfn;
   41.70 +    unsigned long ver;
   41.71 +
   41.72 +    /* The new domain's shared-info frame number. */
   41.73 +    unsigned long shared_info_frame;
   41.74 +    unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
   41.75 +    shared_info_t *shared_info = (shared_info_t *)shared_info_page;
   41.76 +
   41.77 +    /* A copy of the CPU context of the guest. */
   41.78 +    vcpu_guest_context_t ctxt;
   41.79 +
   41.80 +    unsigned long *page_array = NULL;
   41.81 +
   41.82 +    /* A temporary mapping of the guest's start_info page. */
   41.83 +    start_info_t *start_info;
   41.84 +
   41.85 +    max_pfn = nr_pfns;
   41.86 +
   41.87 +    DPRINTF("xc_linux_restore start: max_pfn = %ld\n", max_pfn);
   41.88 +
   41.89 +
   41.90 +    if (!read_exact(io_fd, &ver, sizeof(unsigned long))) {
   41.91 +	ERR("Error when reading version");
   41.92 +	goto out;
   41.93 +    }
   41.94 +    if (ver != 1) {
   41.95 +	ERR("version of save doesn't match");
   41.96 +	goto out;
   41.97 +    }
   41.98 +
   41.99 +    if (mlock(&ctxt, sizeof(ctxt))) {
  41.100 +        /* needed for build dom0 op, but might as well do early */
  41.101 +        ERR("Unable to mlock ctxt");
  41.102 +        return 1;
  41.103 +    }
  41.104 +
  41.105 +    /* Get the domain's shared-info frame. */
  41.106 +    op.cmd = DOM0_GETDOMAININFO;
  41.107 +    op.u.getdomaininfo.domain = (domid_t)dom;
  41.108 +    if (xc_dom0_op(xc_handle, &op) < 0) {
  41.109 +        ERR("Could not get information on new domain");
  41.110 +        goto out;
  41.111 +    }
  41.112 +    shared_info_frame = op.u.getdomaininfo.shared_info_frame;
  41.113 +
  41.114 +    if (xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
  41.115 +        errno = ENOMEM;
  41.116 +        goto out;
  41.117 +    }
  41.118 +
  41.119 +    if (xc_domain_memory_increase_reservation(xc_handle, dom, max_pfn,
  41.120 +                                              0, 0, NULL) != 0) {
  41.121 +        ERR("Failed to increase reservation by %ld KB", PFN_TO_KB(max_pfn));
  41.122 +        errno = ENOMEM;
  41.123 +        goto out;
  41.124 +    }
  41.125 +
  41.126 +    DPRINTF("Increased domain reservation by %ld KB\n", PFN_TO_KB(max_pfn));
  41.127 +
  41.128 +    if (!read_exact(io_fd, &op.u.domain_setup, sizeof(op.u.domain_setup))) {
  41.129 +        ERR("read: domain setup");
  41.130 +        goto out;
  41.131 +    }
  41.132 +
  41.133 +    /* Build firmware (will be overwritten).  */
  41.134 +    op.u.domain_setup.domain = (domid_t)dom;
  41.135 +    op.u.domain_setup.flags &= ~XEN_DOMAINSETUP_query;
  41.136 +    op.u.domain_setup.bp = ((nr_pfns - 3) << PAGE_SHIFT)
  41.137 +                           + sizeof (start_info_t);
  41.138 +    op.u.domain_setup.maxmem = (nr_pfns - 3) << PAGE_SHIFT;
  41.139 +    
  41.140 +    op.cmd = DOM0_DOMAIN_SETUP;
  41.141 +    if (xc_dom0_op(xc_handle, &op))
  41.142 +        goto out;
  41.143 +
  41.144 +    /* Get pages.  */
  41.145 +    page_array = malloc(max_pfn * sizeof(unsigned long));
  41.146 +    if (page_array == NULL ) {
  41.147 +        ERR("Could not allocate memory");
  41.148 +        goto out;
  41.149 +    }
  41.150 +
  41.151 +    if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
  41.152 +                             0, max_pfn) != max_pfn) {
  41.153 +        ERR("Could not get the page frame list");
  41.154 +        goto out;
  41.155 +    }
  41.156 +
  41.157 +    DPRINTF("Reloading memory pages:   0%%\n");
  41.158 +
  41.159 +    while (1) {
  41.160 +        if (!read_exact(io_fd, &mfn, sizeof(unsigned long))) {
  41.161 +            ERR("Error when reading batch size");
  41.162 +            goto out;
  41.163 +        }
  41.164 +	if (mfn == INVALID_MFN)
  41.165 +		break;
  41.166 +
  41.167 +	pfn = page_array[mfn];
  41.168 +
  41.169 +        //DPRINTF("xc_linux_restore: page %lu/%lu at %lx\n", mfn, max_pfn, pfn);
  41.170 +
  41.171 +	if (read_page(xc_handle, io_fd, dom, page_array[mfn]) < 0)
  41.172 +		goto out;
  41.173 +    }
  41.174 +
  41.175 +    DPRINTF("Received all pages\n");
  41.176 +
  41.177 +    /* Get the list of PFNs that are not in the psuedo-phys map */
  41.178 +    {
  41.179 +        unsigned int count;
  41.180 +        unsigned long *pfntab;
  41.181 +        int rc;
  41.182 +
  41.183 +        if (!read_exact(io_fd, &count, sizeof(count))) {
  41.184 +            ERR("Error when reading pfn count");
  41.185 +            goto out;
  41.186 +        }
  41.187 +
  41.188 +        pfntab = malloc(sizeof(unsigned long) * count);
  41.189 +        if (!pfntab) {
  41.190 +            ERR("Out of memory");
  41.191 +            goto out;
  41.192 +        }
  41.193 +
  41.194 +        if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
  41.195 +            ERR("Error when reading pfntab");
  41.196 +            goto out;
  41.197 +        }
  41.198 +
  41.199 +	DPRINTF ("Try to free %u pages\n", count);
  41.200 +
  41.201 +        for (i = 0; i < count; i++) {
  41.202 +
  41.203 +	    volatile unsigned long pfn;
  41.204 +
  41.205 +            struct xen_memory_reservation reservation = {
  41.206 +                .nr_extents   = 1,
  41.207 +                .extent_order = 0,
  41.208 +                .domid        = dom
  41.209 +            };
  41.210 +            set_xen_guest_handle(reservation.extent_start,
  41.211 +				 (unsigned long *)&pfn);
  41.212 +
  41.213 +	    pfn = pfntab[i];
  41.214 +            rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
  41.215 +                              &reservation);
  41.216 +            if (rc != 1) {
  41.217 +                ERR("Could not decrease reservation : %d", rc);
  41.218 +                goto out;
  41.219 +            }
  41.220 +        }
  41.221 +
  41.222 +	DPRINTF("Decreased reservation by %d pages\n", count);
  41.223 +    }
  41.224 +
  41.225 +
  41.226 +    if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) {
  41.227 +        ERR("Error when reading ctxt");
  41.228 +        goto out;
  41.229 +    }
  41.230 +
  41.231 +    /* First to initialize.  */
  41.232 +    op.cmd = DOM0_SETVCPUCONTEXT;
  41.233 +    op.u.setvcpucontext.domain = (domid_t)dom;
  41.234 +    op.u.setvcpucontext.vcpu   = 0;
  41.235 +    set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt);
  41.236 +    if (xc_dom0_op(xc_handle, &op) != 0) {
  41.237 +	    ERR("Couldn't set vcpu context");
  41.238 +	    goto out;
  41.239 +    }
  41.240 +
  41.241 +    /* Second to set registers...  */
  41.242 +    ctxt.flags = VGCF_EXTRA_REGS;
  41.243 +    op.cmd = DOM0_SETVCPUCONTEXT;
  41.244 +    op.u.setvcpucontext.domain = (domid_t)dom;
  41.245 +    op.u.setvcpucontext.vcpu   = 0;
  41.246 +    set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt);
  41.247 +    if (xc_dom0_op(xc_handle, &op) != 0) {
  41.248 +	    ERR("Couldn't set vcpu context");
  41.249 +	    goto out;
  41.250 +    }
  41.251 +
  41.252 +    /* Just a check.  */
  41.253 +    if (xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, &ctxt)) {
  41.254 +        ERR("Could not get vcpu context");
  41.255 +	goto out;
  41.256 +    }
  41.257 +
  41.258 +    /* Then get privreg page.  */
  41.259 +    if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) {
  41.260 +	    ERR("Could not read vcpu privregs");
  41.261 +	    goto out;
  41.262 +    }
  41.263 +
  41.264 +    /* Read shared info.  */
  41.265 +    shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  41.266 +                                       PROT_READ|PROT_WRITE, shared_info_frame);
  41.267 +    if (shared_info == NULL) {
  41.268 +            ERR("cannot map page");
  41.269 +	    goto out;
  41.270 +    }
  41.271 +    if (!read_exact(io_fd, shared_info, PAGE_SIZE)) {
  41.272 +            ERR("Error when reading shared_info page");
  41.273 +	    goto out;
  41.274 +    }
  41.275 +
  41.276 +    /* clear any pending events and the selector */
  41.277 +    memset(&(shared_info->evtchn_pending[0]), 0,
  41.278 +           sizeof (shared_info->evtchn_pending));
  41.279 +    for (i = 0; i < MAX_VIRT_CPUS; i++)
  41.280 +        shared_info->vcpu_info[i].evtchn_pending_sel = 0;
  41.281 +
  41.282 +    mfn = page_array[shared_info->arch.start_info_pfn];
  41.283 +
  41.284 +    munmap (shared_info, PAGE_SIZE);
  41.285 +
  41.286 +    /* Uncanonicalise the suspend-record frame number and poke resume rec. */
  41.287 +    start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  41.288 +                                      PROT_READ | PROT_WRITE, mfn);
  41.289 +    start_info->nr_pages = max_pfn;
  41.290 +    start_info->shared_info = shared_info_frame << PAGE_SHIFT;
  41.291 +    start_info->flags = 0;
  41.292 +    *store_mfn = page_array[start_info->store_mfn];
  41.293 +    start_info->store_evtchn = store_evtchn;
  41.294 +    *console_mfn = page_array[start_info->console_mfn];
  41.295 +    start_info->console_evtchn = console_evtchn;
  41.296 +    munmap(start_info, PAGE_SIZE);
  41.297 +
  41.298 +    /*
  41.299 +     * Safety checking of saved context:
  41.300 +     *  1. user_regs is fine, as Xen checks that on context switch.
  41.301 +     *  2. fpu_ctxt is fine, as it can't hurt Xen.
  41.302 +     *  3. trap_ctxt needs the code selectors checked.
  41.303 +     *  4. ldt base must be page-aligned, no more than 8192 ents, ...
  41.304 +     *  5. gdt already done, and further checking is done by Xen.
  41.305 +     *  6. check that kernel_ss is safe.
  41.306 +     *  7. pt_base is already done.
  41.307 +     *  8. debugregs are checked by Xen.
  41.308 +     *  9. callback code selectors need checking.
  41.309 +     */
  41.310 +    DPRINTF("Domain ready to be built.\n");
  41.311 +
  41.312 +    rc = 0;
  41.313 +
  41.314 + out:
  41.315 +    if ((rc != 0) && (dom != 0))
  41.316 +        xc_domain_destroy(xc_handle, dom);
  41.317 +
  41.318 +    free (page_array);
  41.319 +
  41.320 +    DPRINTF("Restore exit with rc=%d\n", rc);
  41.321 +
  41.322 +    return rc;
  41.323 +}
    42.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    42.2 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c	Fri Jul 28 10:51:38 2006 +0100
    42.3 @@ -0,0 +1,509 @@
    42.4 +/******************************************************************************
    42.5 + * xc_ia64_linux_save.c
    42.6 + *
    42.7 + * Save the state of a running Linux session.
    42.8 + *
    42.9 + * Copyright (c) 2003, K A Fraser.
   42.10 + *  Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
   42.11 + */
   42.12 +
   42.13 +#include <inttypes.h>
   42.14 +#include <time.h>
   42.15 +#include <stdlib.h>
   42.16 +#include <unistd.h>
   42.17 +#include <sys/time.h>
   42.18 +
   42.19 +#include "xg_private.h"
   42.20 +
   42.21 +/*
   42.22 +** Default values for important tuning parameters. Can override by passing
   42.23 +** non-zero replacement values to xc_linux_save().
   42.24 +**
   42.25 +** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
   42.26 +**
   42.27 +*/
   42.28 +#define DEF_MAX_ITERS    (4 - 1)	/* limit us to 4 times round loop  */
   42.29 +#define DEF_MAX_FACTOR   3		/* never send more than 3x nr_pfns */
   42.30 +
   42.31 +/*
   42.32 +** During (live) save/migrate, we maintain a number of bitmaps to track
   42.33 +** which pages we have to send, and to skip.
   42.34 +*/
   42.35 +
   42.36 +#define BITS_PER_LONG (sizeof(unsigned long) * 8)
   42.37 +
   42.38 +#define BITMAP_ENTRY(_nr,_bmap) \
   42.39 +   ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
   42.40 +
   42.41 +#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
   42.42 +
   42.43 +static inline int test_bit (int nr, volatile void * addr)
   42.44 +{
   42.45 +    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
   42.46 +}
   42.47 +
   42.48 +static inline void clear_bit (int nr, volatile void * addr)
   42.49 +{
   42.50 +    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
   42.51 +}
   42.52 +
   42.53 +static inline void set_bit ( int nr, volatile void * addr)
   42.54 +{
   42.55 +    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
   42.56 +}
   42.57 +
   42.58 +/* total number of pages used by the current guest */
   42.59 +static unsigned long max_pfn;
   42.60 +
   42.61 +static int xc_ia64_shadow_control(int xc_handle,
   42.62 +                                  uint32_t domid,
   42.63 +                                  unsigned int sop,
   42.64 +                                  unsigned long *dirty_bitmap,
   42.65 +                                  unsigned long pages,
   42.66 +                                  xc_shadow_control_stats_t *stats)
   42.67 +{
   42.68 +    if (dirty_bitmap != NULL && pages > 0) {
   42.69 +        int i;
   42.70 +        unsigned char *bmap = (unsigned char *)dirty_bitmap;
   42.71 +        unsigned long bmap_bytes =
   42.72 +            ((pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1)) / 8;
   42.73 +        unsigned int bmap_pages = (bmap_bytes + PAGE_SIZE - 1) / PAGE_SIZE; 
   42.74 +
   42.75 +        /* Touch the page so that it is in the TC.
   42.76 +           FIXME: use a more reliable method.  */
   42.77 +        for (i = 0 ; i < bmap_pages ; i++)
   42.78 +            bmap[i * PAGE_SIZE] = 0;
   42.79 +        /* Because bmap is not page aligned (allocated by malloc), be sure the
   42.80 +           last page is touched.  */
   42.81 +        bmap[bmap_bytes - 1] = 0;
   42.82 +    }
   42.83 +
   42.84 +    return xc_shadow_control(xc_handle, domid, sop,
   42.85 +                             dirty_bitmap, pages, stats);
   42.86 +}
   42.87 +
   42.88 +static inline ssize_t
   42.89 +write_exact(int fd, void *buf, size_t count)
   42.90 +{
   42.91 +    if (write(fd, buf, count) != count)
   42.92 +        return 0;
   42.93 +    return 1;
   42.94 +}
   42.95 +
   42.96 +static int
   42.97 +suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
   42.98 +                  int dom, xc_dominfo_t *info)
   42.99 +{
  42.100 +    int i = 0;
  42.101 +
  42.102 +    if (!(*suspend)(dom)) {
  42.103 +        ERR("Suspend request failed");
  42.104 +        return -1;
  42.105 +    }
  42.106 +
  42.107 +retry:
  42.108 +
  42.109 +    if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
  42.110 +        ERR("Could not get domain info");
  42.111 +        return -1;
  42.112 +    }
  42.113 +
  42.114 +    if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
  42.115 +        return 0; // success
  42.116 +
  42.117 +    if (info->paused) {
  42.118 +        // try unpausing domain, wait, and retest
  42.119 +        xc_domain_unpause(xc_handle, dom);
  42.120 +
  42.121 +        ERR("Domain was paused. Wait and re-test.");
  42.122 +        usleep(10000);  // 10ms
  42.123 +
  42.124 +        goto retry;
  42.125 +    }
  42.126 +
  42.127 +
  42.128 +    if(++i < 100) {
  42.129 +        ERR("Retry suspend domain.");
  42.130 +        usleep(10000);  // 10ms
  42.131 +        goto retry;
  42.132 +    }
  42.133 +
  42.134 +    ERR("Unable to suspend domain.");
  42.135 +
  42.136 +    return -1;
  42.137 +}
  42.138 +
  42.139 +int
  42.140 +xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
  42.141 +              uint32_t max_factor, uint32_t flags, int (*suspend)(int))
  42.142 +{
  42.143 +    DECLARE_DOM0_OP;
  42.144 +    xc_dominfo_t info;
  42.145 +
  42.146 +    int rc = 1;
  42.147 +
  42.148 +    //int live  = (flags & XCFLAGS_LIVE);
  42.149 +    int debug = (flags & XCFLAGS_DEBUG);
  42.150 +    int live  = (flags & XCFLAGS_LIVE);
  42.151 +
  42.152 +    /* The new domain's shared-info frame number. */
  42.153 +    unsigned long shared_info_frame;
  42.154 +
  42.155 +    /* A copy of the CPU context of the guest. */
  42.156 +    vcpu_guest_context_t ctxt;
  42.157 +
  42.158 +    unsigned long *page_array = NULL;
  42.159 +
  42.160 +    /* Live mapping of shared info structure */
  42.161 +    shared_info_t *live_shinfo = NULL;
  42.162 +
  42.163 +    /* Iteration number.  */
  42.164 +    int iter;
  42.165 +
  42.166 +    /* Number of pages sent in the last iteration (live only).  */
  42.167 +    unsigned int sent_last_iter;
  42.168 +
  42.169 +    /* Number of pages sent (live only).  */
  42.170 +    unsigned int total_sent;
  42.171 +
  42.172 +    /* Size of the shadow bitmap (live only).  */
  42.173 +    unsigned int bitmap_size = 0;
  42.174 +
  42.175 +    /* True if last iteration.  */
  42.176 +    int last_iter;
  42.177 +
  42.178 +    /* Bitmap of pages to be sent.  */
  42.179 +    unsigned long *to_send = NULL;
  42.180 +    /* Bitmap of pages not to be sent (because dirtied).  */
  42.181 +    unsigned long *to_skip = NULL;
  42.182 +
  42.183 +    char *mem;
  42.184 +
  42.185 +    if (debug)
  42.186 +        fprintf (stderr, "xc_linux_save (ia64): started dom=%d\n", dom);
  42.187 +
  42.188 +    /* If no explicit control parameters given, use defaults */
  42.189 +    if (!max_iters)
  42.190 +        max_iters = DEF_MAX_ITERS;
  42.191 +    if (!max_factor)
  42.192 +        max_factor = DEF_MAX_FACTOR;
  42.193 +
  42.194 +    //initialize_mbit_rate();
  42.195 +
  42.196 +    if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
  42.197 +        ERR("Could not get domain info");
  42.198 +        return 1;
  42.199 +    }
  42.200 +
  42.201 +    shared_info_frame = info.shared_info_frame;
  42.202 +
  42.203 +#if 0
  42.204 +    /* cheesy sanity check */
  42.205 +    if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
  42.206 +        ERR("Invalid state record -- pfn count out of range: %lu",
  42.207 +            (info.max_memkb >> (PAGE_SHIFT - 10)));
  42.208 +        goto out;
  42.209 +     }
  42.210 +#endif
  42.211 +
  42.212 +    /* Map the shared info frame */
  42.213 +    live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  42.214 +                                       PROT_READ, shared_info_frame);
  42.215 +    if (!live_shinfo) {
  42.216 +        ERR("Couldn't map live_shinfo");
  42.217 +        goto out;
  42.218 +    }
  42.219 +
  42.220 +    max_pfn = info.max_memkb >> (PAGE_SHIFT - 10);
  42.221 +
  42.222 +    page_array = malloc(max_pfn * sizeof(unsigned long));
  42.223 +    if (page_array == NULL) {
  42.224 +        ERR("Could not allocate memory");
  42.225 +        goto out;
  42.226 +    }
  42.227 +
  42.228 +    /* This is expected by xm restore.  */
  42.229 +    if (!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
  42.230 +        ERR("write: max_pfn");
  42.231 +        goto out;
  42.232 +    }
  42.233 +
  42.234 +    /* xc_linux_restore starts to read here.  */
  42.235 +    /* Write a version number.  This can avoid searching for a stupid bug
  42.236 +       if the format change.
  42.237 +       The version is hard-coded, don't forget to change the restore code
  42.238 +       too!  */
  42.239 +    {
  42.240 +        unsigned long version = 1;
  42.241 +
  42.242 +        if (!write_exact(io_fd, &version, sizeof(unsigned long))) {
  42.243 +            ERR("write: version");
  42.244 +            goto out;
  42.245 +        }
  42.246 +    }
  42.247 +
  42.248 +    op.cmd = DOM0_DOMAIN_SETUP;
  42.249 +    op.u.domain_setup.domain = (domid_t)dom;
  42.250 +    op.u.domain_setup.flags = XEN_DOMAINSETUP_query;
  42.251 +    if (xc_dom0_op(xc_handle, &op) < 0) {
  42.252 +        ERR("Could not get domain setup");
  42.253 +        goto out;
  42.254 +    }
  42.255 +    op.u.domain_setup.domain = 0;
  42.256 +    if (!write_exact(io_fd, &op.u.domain_setup, sizeof(op.u.domain_setup))) {
  42.257 +        ERR("write: domain setup");
  42.258 +        goto out;
  42.259 +    }
  42.260 +
  42.261 +    /* Domain is still running at this point */
  42.262 +    if (live) {
  42.263 +
  42.264 +        if (xc_ia64_shadow_control(xc_handle, dom,
  42.265 +                                   DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
  42.266 +                                   NULL, 0, NULL ) < 0) {
  42.267 +            ERR("Couldn't enable shadow mode");
  42.268 +            goto out;
  42.269 +        }
  42.270 +
  42.271 +        last_iter = 0;
  42.272 +
  42.273 +        bitmap_size = ((max_pfn + BITS_PER_LONG-1) & ~(BITS_PER_LONG-1)) / 8;
  42.274 +        to_send = malloc(bitmap_size);
  42.275 +        to_skip = malloc(bitmap_size);
  42.276 +
  42.277 +        if (!to_send || !to_skip) {
  42.278 +            ERR("Couldn't allocate bitmap array");
  42.279 +            goto out;
  42.280 +        }
  42.281 +
  42.282 +        /* Initially all the pages must be sent.  */
  42.283 +        memset(to_send, 0xff, bitmap_size);
  42.284 +
  42.285 +        if (mlock(to_send, bitmap_size)) {
  42.286 +            ERR("Unable to mlock to_send");
  42.287 +            goto out;
  42.288 +        }
  42.289 +        if (mlock(to_skip, bitmap_size)) {
  42.290 +            ERR("Unable to mlock to_skip");
  42.291 +            goto out;
  42.292 +        }
  42.293 +        
  42.294 +    } else {
  42.295 +
  42.296 +        /* This is a non-live suspend. Issue the call back to get the
  42.297 +           domain suspended */
  42.298 +
  42.299 +        last_iter = 1;
  42.300 +
  42.301 +        if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
  42.302 +            ERR("Domain appears not to have suspended");
  42.303 +            goto out;
  42.304 +        }
  42.305 +
  42.306 +    }
  42.307 +
  42.308 +    sent_last_iter = max_pfn;
  42.309 +    total_sent = 0;
  42.310 +
  42.311 +    for (iter = 1; ; iter++) {
  42.312 +        unsigned int sent_this_iter, skip_this_iter;
  42.313 +        unsigned long N;
  42.314 +
  42.315 +        sent_this_iter = 0;
  42.316 +        skip_this_iter = 0;
  42.317 +
  42.318 +        /* Get the pfn list, as it may change.  */
  42.319 +        if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
  42.320 +                                 0, max_pfn) != max_pfn) {
  42.321 +            ERR("Could not get the page frame list");
  42.322 +            goto out;
  42.323 +        }
  42.324 +
  42.325 +        /* Dirtied pages won't be saved.
  42.326 +           slightly wasteful to peek the whole array evey time,
  42.327 +           but this is fast enough for the moment. */
  42.328 +        if (!last_iter) {
  42.329 +            if (xc_ia64_shadow_control(xc_handle, dom,
  42.330 +                                       DOM0_SHADOW_CONTROL_OP_PEEK,
  42.331 +                                       to_skip, max_pfn, NULL) != max_pfn) {
  42.332 +                ERR("Error peeking shadow bitmap");
  42.333 +                goto out;
  42.334 +            }
  42.335 +        }
  42.336 +
  42.337 +        /* Start writing out the saved-domain record. */
  42.338 +        for (N = 0; N < max_pfn; N++) {
  42.339 +            if (page_array[N] == INVALID_MFN)
  42.340 +                continue;
  42.341 +            if (!last_iter) {
  42.342 +                if (test_bit(N, to_skip) && test_bit(N, to_send))
  42.343 +                    skip_this_iter++;
  42.344 +                if (test_bit(N, to_skip) || !test_bit(N, to_send))
  42.345 +                    continue;
  42.346 +            }
  42.347 +
  42.348 +            if (debug)
  42.349 +                fprintf(stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
  42.350 +                        page_array[N], N, max_pfn);
  42.351 +
  42.352 +            mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  42.353 +                                       PROT_READ|PROT_WRITE, page_array[N]);
  42.354 +            if (mem == NULL) {
  42.355 +                /* The page may have move.
  42.356 +                   It will be remarked dirty.
  42.357 +                   FIXME: to be tracked.  */
  42.358 +                fprintf(stderr, "cannot map page %lx: %s\n",
  42.359 +                        page_array[N], strerror (errno));
  42.360 +                continue;
  42.361 +            }
  42.362 +
  42.363 +            if (!write_exact(io_fd, &N, sizeof(N))) {
  42.364 +                ERR("write: max_pfn");
  42.365 +                goto out;
  42.366 +            }
  42.367 +
  42.368 +            if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
  42.369 +                ERR("Error when writing to state file (5)");
  42.370 +                goto out;
  42.371 +            }
  42.372 +            munmap(mem, PAGE_SIZE);
  42.373 +            sent_this_iter++;
  42.374 +            total_sent++;
  42.375 +        }
  42.376 +
  42.377 +        if (last_iter)
  42.378 +            break;
  42.379 +
  42.380 +        DPRINTF(" %d: sent %d, skipped %d\n",
  42.381 +                iter, sent_this_iter, skip_this_iter );
  42.382 +
  42.383 +        if (live) {
  42.384 +            if ( /* ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) || */
  42.385 +                (iter >= max_iters) || (sent_this_iter+skip_this_iter < 50) ||
  42.386 +                (total_sent > max_pfn*max_factor)) {
  42.387 +                DPRINTF("Start last iteration\n");
  42.388 +                last_iter = 1;
  42.389 +
  42.390 +                if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
  42.391 +                    ERR("Domain appears not to have suspended");
  42.392 +                    goto out;
  42.393 +                }
  42.394 +            }
  42.395 +
  42.396 +            /* Pages to be sent are pages which were dirty.  */
  42.397 +            if (xc_ia64_shadow_control(xc_handle, dom,
  42.398 +                                       DOM0_SHADOW_CONTROL_OP_CLEAN,
  42.399 +                                       to_send, max_pfn, NULL ) != max_pfn) {
  42.400 +                ERR("Error flushing shadow PT");
  42.401 +                goto out;
  42.402 +            }
  42.403 +
  42.404 +            sent_last_iter = sent_this_iter;
  42.405 +
  42.406 +            //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
  42.407 +        }
  42.408 +
  42.409 +    }
  42.410 +
  42.411 +    fprintf (stderr, "All memory is saved\n");
  42.412 +
  42.413 +    /* terminate */
  42.414 +    {
  42.415 +        unsigned long pfn = INVALID_MFN;
  42.416 +        if (!write_exact(io_fd, &pfn, sizeof(pfn))) {
  42.417 +            ERR("Error when writing to state file (6)");
  42.418 +            goto out;
  42.419 +        }
  42.420 +    }
  42.421 +
  42.422 +    /* Send through a list of all the PFNs that were not in map at the close */
  42.423 +    {
  42.424 +        unsigned int i,j;
  42.425 +        unsigned long pfntab[1024];
  42.426 +
  42.427 +        for (i = 0, j = 0; i < max_pfn; i++) {
  42.428 +            if (page_array[i] == INVALID_MFN)
  42.429 +                j++;
  42.430 +        }
  42.431 +
  42.432 +        if (!write_exact(io_fd, &j, sizeof(unsigned int))) {
  42.433 +            ERR("Error when writing to state file (6a)");
  42.434 +            goto out;
  42.435 +        }
  42.436 +
  42.437 +        for (i = 0, j = 0; i < max_pfn; ) {
  42.438 +
  42.439 +            if (page_array[i] == INVALID_MFN)
  42.440 +                pfntab[j++] = i;
  42.441 +
  42.442 +            i++;
  42.443 +            if (j == 1024 || i == max_pfn) {
  42.444 +                if (!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) {
  42.445 +                    ERR("Error when writing to state file (6b)");
  42.446 +                    goto out;
  42.447 +                }
  42.448 +                j = 0;
  42.449 +            }
  42.450 +        }
  42.451 +
  42.452 +    }
  42.453 +
  42.454 +    if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
  42.455 +        ERR("Could not get vcpu context");
  42.456 +        goto out;
  42.457 +    }
  42.458 +
  42.459 +    if (!write_exact(io_fd, &ctxt, sizeof(ctxt))) {
  42.460 +        ERR("Error when writing to state file (1)");
  42.461 +        goto out;
  42.462 +    }
  42.463 +
  42.464 +    mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  42.465 +                               PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
  42.466 +    if (mem == NULL) {
  42.467 +        ERR("cannot map privreg page");
  42.468 +        goto out;
  42.469 +    }
  42.470 +    if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
  42.471 +        ERR("Error when writing privreg to state file (5)");
  42.472 +        goto out;
  42.473 +    }
  42.474 +    munmap(mem, PAGE_SIZE);    
  42.475 +
  42.476 +    if (!write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
  42.477 +        ERR("Error when writing to state file (1)");
  42.478 +        goto out;
  42.479 +    }
  42.480 +
  42.481 +    /* Success! */
  42.482 +    rc = 0;
  42.483 +
  42.484 + out:
  42.485 +
  42.486 +    if (live) {
  42.487 +        if (xc_ia64_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF,
  42.488 +                                   NULL, 0, NULL ) < 0) {
  42.489 +            DPRINTF("Warning - couldn't disable shadow mode");
  42.490 +        }
  42.491 +    }
  42.492 +
  42.493 +    free(page_array);
  42.494 +    free(to_send);
  42.495 +    free(to_skip);
  42.496 +    if (live_shinfo)
  42.497 +        munmap(live_shinfo, PAGE_SIZE);
  42.498 +
  42.499 +    fprintf(stderr,"Save exit rc=%d\n",rc);
  42.500 +
  42.501 +    return !!rc;
  42.502 +}
  42.503 +
  42.504 +/*
  42.505 + * Local variables:
  42.506 + * mode: C
  42.507 + * c-set-style: "BSD"
  42.508 + * c-basic-offset: 4
  42.509 + * tab-width: 4
  42.510 + * indent-tabs-mode: nil
  42.511 + * End:
  42.512 + */
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/tools/libxc/ia64/xc_ia64_stubs.c	Fri Jul 28 10:51:38 2006 +0100
    43.3 @@ -0,0 +1,106 @@
    43.4 +#include "xg_private.h"
    43.5 +#include "xenguest.h"
    43.6 +#include "xc_private.h"
    43.7 +#include "xc_elf.h"
    43.8 +#include <stdlib.h>
    43.9 +#include <zlib.h>
   43.10 +#include "xen/arch-ia64.h"
   43.11 +#include <xen/hvm/ioreq.h>
   43.12 +
   43.13 +/* this is a very ugly way of getting FPSR_DEFAULT.  struct ia64_fpreg is
   43.14 + * mysteriously declared in two places: /usr/include/asm/fpu.h and
   43.15 + * /usr/include/bits/sigcontext.h.  The former also defines FPSR_DEFAULT,
   43.16 + * the latter doesn't but is included (indirectly) by xg_private.h */
   43.17 +#define __ASSEMBLY__
   43.18 +#include <asm/fpu.h>
   43.19 +#undef __IA64_UL
   43.20 +#define __IA64_UL(x)           ((unsigned long)(x))
   43.21 +#undef __ASSEMBLY__
   43.22 +
   43.23 +unsigned long
   43.24 +xc_ia64_fpsr_default(void)
   43.25 +{
   43.26 +    return FPSR_DEFAULT;
   43.27 +}
   43.28 +
   43.29 +/*  
   43.30 +    VMM uses put_user to copy pfn_list to guest buffer, this maybe fail,
   43.31 +    VMM doesn't handle this now.
   43.32 +    This method will touch guest buffer to make sure the buffer's mapping
   43.33 +    is tracked by VMM,
   43.34 + */
   43.35 +int
   43.36 +xc_ia64_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
   43.37 +                     unsigned int start_page, unsigned int nr_pages)
   43.38 +{
   43.39 +    dom0_op_t op;
   43.40 +    int num_pfns,ret;
   43.41 +    unsigned int __start_page, __nr_pages;
   43.42 +    unsigned long max_pfns;
   43.43 +    xen_pfn_t *__pfn_buf;
   43.44 +
   43.45 +    __start_page = start_page;
   43.46 +    __nr_pages = nr_pages;
   43.47 +    __pfn_buf = pfn_buf;
   43.48 +  
   43.49 +    while (__nr_pages) {
   43.50 +        max_pfns = ((unsigned long)__start_page << 32) | __nr_pages;
   43.51 +        op.cmd = DOM0_GETMEMLIST;
   43.52 +        op.u.getmemlist.domain   = (domid_t)domid;
   43.53 +        op.u.getmemlist.max_pfns = max_pfns;
   43.54 +        op.u.getmemlist.num_pfns = 0;
   43.55 +        set_xen_guest_handle(op.u.getmemlist.buffer, __pfn_buf);
   43.56 +
   43.57 +        if ((max_pfns != -1UL)
   43.58 +            && mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0) {
   43.59 +            PERROR("Could not lock pfn list buffer");
   43.60 +            return -1;
   43.61 +        }
   43.62 +
   43.63 +        ret = do_dom0_op(xc_handle, &op);
   43.64 +
   43.65 +        if (max_pfns != -1UL)
   43.66 +            (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
   43.67 +
   43.68 +        if (max_pfns == -1UL)
   43.69 +            return 0;
   43.70 +        
   43.71 +        num_pfns = op.u.getmemlist.num_pfns;
   43.72 +        __start_page += num_pfns;
   43.73 +        __nr_pages -= num_pfns;
   43.74 +        __pfn_buf += num_pfns;
   43.75 +
   43.76 +        if (ret < 0)
   43.77 +            // dummy write to make sure this tlb mapping is tracked by VMM
   43.78 +            *__pfn_buf = 0;
   43.79 +        else
   43.80 +            return nr_pages;
   43.81 +    }
   43.82 +    return nr_pages;
   43.83 +}
   43.84 +
   43.85 +int
   43.86 +xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
   43.87 +                unsigned long max_pfns)
   43.88 +{
   43.89 +    return xc_ia64_get_pfn_list (xc_handle, domid, pfn_buf, 0, max_pfns);
   43.90 +}
   43.91 +
   43.92 +long
   43.93 +xc_get_max_pages(int xc_handle, uint32_t domid)
   43.94 +{
   43.95 +    dom0_op_t op;
   43.96 +    op.cmd = DOM0_GETDOMAININFO;
   43.97 +    op.u.getdomaininfo.domain = (domid_t)domid;
   43.98 +    return (do_dom0_op(xc_handle, &op) < 0) ? -1 : op.u.getdomaininfo.max_pages;
   43.99 +}
  43.100 +
  43.101 +/*
  43.102 + * Local variables:
  43.103 + * mode: C
  43.104 + * c-set-style: "BSD"
  43.105 + * c-basic-offset: 4
  43.106 + * tab-width: 4
  43.107 + * indent-tabs-mode: nil
  43.108 + * End:
  43.109 + */
    44.1 --- a/tools/libxc/xc_hvm_build.c	Thu Jul 27 17:44:14 2006 -0500
    44.2 +++ b/tools/libxc/xc_hvm_build.c	Fri Jul 28 10:51:38 2006 +0100
    44.3 @@ -15,12 +15,6 @@
    44.4  
    44.5  #define HVM_LOADER_ENTR_ADDR  0x00100000
    44.6  
    44.7 -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
    44.8 -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    44.9 -#ifdef __x86_64__
   44.10 -#define L3_PROT (_PAGE_PRESENT)
   44.11 -#endif
   44.12 -
   44.13  #define E820MAX     128
   44.14  
   44.15  #define E820_RAM          1
   44.16 @@ -41,9 +35,6 @@ struct e820entry {
   44.17      uint32_t type;
   44.18  } __attribute__((packed));
   44.19  
   44.20 -#define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
   44.21 -#define round_pgdown(_p)  ((_p)&PAGE_MASK)
   44.22 -
   44.23  static int
   44.24  parseelfimage(
   44.25      char *elfbase, unsigned long elfsize, struct domain_setup_info *dsi);
   44.26 @@ -52,7 +43,7 @@ loadelfimage(
   44.27      char *elfbase, int xch, uint32_t dom, unsigned long *parray,
   44.28      struct domain_setup_info *dsi);
   44.29  
   44.30 -static unsigned char build_e820map(void *e820_page, unsigned long long mem_size)
   44.31 +static void build_e820map(void *e820_page, unsigned long long mem_size)
   44.32  {
   44.33      struct e820entry *e820entry =
   44.34          (struct e820entry *)(((unsigned char *)e820_page) + E820_MAP_OFFSET);
   44.35 @@ -115,7 +106,7 @@ static unsigned char build_e820map(void 
   44.36      e820entry[nr_map].type = E820_IO;
   44.37      nr_map++;
   44.38  
   44.39 -    return (*(((unsigned char *)e820_page) + E820_MAP_NR_OFFSET) = nr_map);
   44.40 +    *(((unsigned char *)e820_page) + E820_MAP_NR_OFFSET) = nr_map;
   44.41  }
   44.42  
   44.43  static void set_hvm_info_checksum(struct hvm_info_table *t)
   44.44 @@ -186,7 +177,6 @@ static int setup_guest(int xc_handle,
   44.45  
   44.46      shared_info_t *shared_info;
   44.47      void *e820_page;
   44.48 -    unsigned char e820_map_nr;
   44.49  
   44.50      struct domain_setup_info dsi;
   44.51      uint64_t v_end;
   44.52 @@ -261,7 +251,7 @@ static int setup_guest(int xc_handle,
   44.53                page_array[E820_MAP_PAGE >> PAGE_SHIFT])) == 0 )
   44.54          goto error_out;
   44.55      memset(e820_page, 0, PAGE_SIZE);
   44.56 -    e820_map_nr = build_e820map(e820_page, v_end);
   44.57 +    build_e820map(e820_page, v_end);
   44.58      munmap(e820_page, PAGE_SIZE);
   44.59  
   44.60      /* shared_info page starts its life empty. */
   44.61 @@ -311,23 +301,7 @@ static int setup_guest(int xc_handle,
   44.62      /*
   44.63       * Initial register values:
   44.64       */
   44.65 -    ctxt->user_regs.ds = 0;
   44.66 -    ctxt->user_regs.es = 0;
   44.67 -    ctxt->user_regs.fs = 0;
   44.68 -    ctxt->user_regs.gs = 0;
   44.69 -    ctxt->user_regs.ss = 0;
   44.70 -    ctxt->user_regs.cs = 0;
   44.71      ctxt->user_regs.eip = dsi.v_kernentry;
   44.72 -    ctxt->user_regs.edx = 0;
   44.73 -    ctxt->user_regs.eax = 0;
   44.74 -    ctxt->user_regs.esp = 0;
   44.75 -    ctxt->user_regs.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */
   44.76 -    ctxt->user_regs.ecx = 0;
   44.77 -    ctxt->user_regs.esi = 0;
   44.78 -    ctxt->user_regs.edi = 0;
   44.79 -    ctxt->user_regs.ebp = 0;
   44.80 -
   44.81 -    ctxt->user_regs.eflags = 0;
   44.82  
   44.83      return 0;
   44.84  
    45.1 --- a/tools/libxc/xc_ia64_stubs.c	Thu Jul 27 17:44:14 2006 -0500
    45.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.3 @@ -1,756 +0,0 @@
    45.4 -#include "xg_private.h"
    45.5 -#include "xenguest.h"
    45.6 -#include "xc_private.h"
    45.7 -#include "xc_elf.h"
    45.8 -#include <stdlib.h>
    45.9 -#include <zlib.h>
   45.10 -#include "xen/arch-ia64.h"
   45.11 -#include <xen/hvm/ioreq.h>
   45.12 -
   45.13 -/* this is a very ugly way of getting FPSR_DEFAULT.  struct ia64_fpreg is
   45.14 - * mysteriously declared in two places: /usr/include/asm/fpu.h and
   45.15 - * /usr/include/bits/sigcontext.h.  The former also defines FPSR_DEFAULT,
   45.16 - * the latter doesn't but is included (indirectly) by xg_private.h */
   45.17 -#define __ASSEMBLY__
   45.18 -#include <asm/fpu.h>
   45.19 -#undef __IA64_UL
   45.20 -#define __IA64_UL(x)           ((unsigned long)(x))
   45.21 -#undef __ASSEMBLY__
   45.22 -
   45.23 -unsigned long xc_ia64_fpsr_default(void)
   45.24 -{
   45.25 -        return FPSR_DEFAULT;
   45.26 -}
   45.27 -
   45.28 -int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
   45.29 -                  uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
   45.30 -                  int (*suspend)(int domid))
   45.31 -{
   45.32 -    PERROR("xc_linux_save not implemented\n");
   45.33 -    return -1;
   45.34 -}
   45.35 -
   45.36 -int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, unsigned long nr_pfns,
   45.37 -                     unsigned int store_evtchn, unsigned long *store_mfn,
   45.38 -                     unsigned int console_evtchn, unsigned long *console_mfn)
   45.39 -{
   45.40 -    PERROR("xc_linux_restore not implemented\n");
   45.41 -    return -1;
   45.42 -}
   45.43 -
   45.44 -/*  
   45.45 -    VMM uses put_user to copy pfn_list to guest buffer, this maybe fail,
   45.46 -    VMM doesn't handle this now.
   45.47 -    This method will touch guest buffer to make sure the buffer's mapping
   45.48 -    is tracked by VMM,
   45.49 - */
   45.50 -int xc_ia64_get_pfn_list(int xc_handle,
   45.51 -                         uint32_t domid,
   45.52 -                         xen_pfn_t *pfn_buf,
   45.53 -                         unsigned int start_page,
   45.54 -                         unsigned int nr_pages)
   45.55 -{
   45.56 -    dom0_op_t op;
   45.57 -    int num_pfns,ret;
   45.58 -    unsigned int __start_page, __nr_pages;
   45.59 -    unsigned long max_pfns;
   45.60 -    xen_pfn_t *__pfn_buf;
   45.61 -
   45.62 -    __start_page = start_page;
   45.63 -    __nr_pages = nr_pages;
   45.64 -    __pfn_buf = pfn_buf;
   45.65 -  
   45.66 -    while(__nr_pages){
   45.67 -        max_pfns = ((unsigned long)__start_page << 32) | __nr_pages;
   45.68 -        op.cmd = DOM0_GETMEMLIST;
   45.69 -        op.u.getmemlist.domain   = (domid_t)domid;
   45.70 -        op.u.getmemlist.max_pfns = max_pfns;
   45.71 -        op.u.getmemlist.num_pfns = 0;
   45.72 -        set_xen_guest_handle(op.u.getmemlist.buffer, __pfn_buf);
   45.73 -
   45.74 -        if ( (max_pfns != -1UL)
   45.75 -            && mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0 )
   45.76 -        {
   45.77 -            PERROR("Could not lock pfn list buffer");
   45.78 -            return -1;
   45.79 -        }
   45.80 -
   45.81 -        ret = do_dom0_op(xc_handle, &op);
   45.82 -
   45.83 -        if (max_pfns != -1UL)
   45.84 -            (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
   45.85 -
   45.86 -        if (max_pfns == -1UL)
   45.87 -            return 0;
   45.88 -        
   45.89 -        num_pfns = op.u.getmemlist.num_pfns;
   45.90 -        __start_page += num_pfns;
   45.91 -        __nr_pages -= num_pfns;
   45.92 -        __pfn_buf += num_pfns;
   45.93 -
   45.94 -        if (ret < 0)
   45.95 -            // dummy write to make sure this tlb mapping is tracked by VMM
   45.96 -            *__pfn_buf = 0;
   45.97 -        else
   45.98 -            return nr_pages;
   45.99 -    }
  45.100 -    return nr_pages;
  45.101 -}
  45.102 -
  45.103 -long xc_get_max_pages(int xc_handle, uint32_t domid)
  45.104 -{
  45.105 -    dom0_op_t op;
  45.106 -    op.cmd = DOM0_GETDOMAININFO;
  45.107 -    op.u.getdomaininfo.domain = (domid_t)domid;
  45.108 -    return (do_dom0_op(xc_handle, &op) < 0) ?
  45.109 -        -1 : op.u.getdomaininfo.max_pages;
  45.110 -}
  45.111 -
  45.112 -int xc_ia64_copy_to_domain_pages(int xc_handle, uint32_t domid,
  45.113 -        void* src_page, unsigned long dst_pfn, int nr_pages)
  45.114 -{
  45.115 -    // N.B. gva should be page aligned
  45.116 -
  45.117 -    xen_pfn_t *page_array = NULL;
  45.118 -    int i;
  45.119 -
  45.120 -    if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL ){
  45.121 -        PERROR("Could not allocate memory");
  45.122 -        goto error_out;
  45.123 -    }
  45.124 -    if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array,
  45.125 -                dst_pfn, nr_pages) != nr_pages ){
  45.126 -        PERROR("Could not get the page frame list");
  45.127 -        goto error_out;
  45.128 -    }
  45.129 -
  45.130 -    for ( i=0; i< nr_pages; i++ ){
  45.131 -        if (xc_copy_to_domain_page(xc_handle, domid, page_array[i],
  45.132 -                    src_page + (i << PAGE_SHIFT)))
  45.133 -            goto error_out;
  45.134 -    }
  45.135 -    free(page_array);
  45.136 -    return 0;
  45.137 -
  45.138 -error_out:
  45.139 -    free(page_array);
  45.140 -    return -1;
  45.141 -}
  45.142 -
  45.143 -
  45.144 -#define HOB_SIGNATURE         0x3436474953424f48        // "HOBSIG64"
  45.145 -#define GFW_HOB_START         ((4UL<<30)-(14UL<<20))    // 4G - 14M
  45.146 -#define GFW_HOB_SIZE          (1UL<<20)                 // 1M
  45.147 -#define RAW_GFW_START_NR(s)   ((s) >> PAGE_SHIFT)
  45.148 -#define RAW_GFW_HOB_START_NR(s)                \
  45.149 -        (RAW_GFW_START_NR(s) + ((GFW_HOB_START - GFW_START) >> PAGE_SHIFT))
  45.150 -#define RAW_GFW_IMAGE_START_NR(s,i)            \
  45.151 -        (RAW_GFW_START_NR(s) + (((GFW_SIZE - (i))) >> PAGE_SHIFT))
  45.152 -#define RAW_IO_PAGE_START_NR(s)                \
  45.153 -        (RAW_GFW_START_NR(s) + (GFW_SIZE >> PAGE_SHIFT))
  45.154 -#define RAW_STORE_PAGE_START_NR(s)             \
  45.155 -        (RAW_IO_PAGE_START_NR(s) + (IO_PAGE_SIZE >> PAGE_SHFIT))
  45.156 -
  45.157 -typedef struct {
  45.158 -    unsigned long signature;
  45.159 -    unsigned int  type;
  45.160 -    unsigned int  length;
  45.161 -} HOB_GENERIC_HEADER;
  45.162 -
  45.163 -/*
  45.164 - * INFO HOB is the first data data in one HOB list
  45.165 - * it contains the control information of the HOB list
  45.166 - */
  45.167 -typedef struct {
  45.168 -    HOB_GENERIC_HEADER  header;
  45.169 -    unsigned long       length;    // current length of hob
  45.170 -    unsigned long       cur_pos;   // current poisiton of hob
  45.171 -    unsigned long       buf_size;  // size of hob buffer
  45.172 -}HOB_INFO;
  45.173 -
  45.174 -typedef struct{
  45.175 -    unsigned long start;
  45.176 -    unsigned long size;
  45.177 -}hob_mem_t;
  45.178 -
  45.179 -typedef enum {
  45.180 -    HOB_TYPE_INFO=0,
  45.181 -    HOB_TYPE_TERMINAL,
  45.182 -    HOB_TYPE_MEM,
  45.183 -    HOB_TYPE_PAL_BUS_GET_FEATURES_DATA,
  45.184 -    HOB_TYPE_PAL_CACHE_SUMMARY,
  45.185 -    HOB_TYPE_PAL_MEM_ATTRIB,
  45.186 -    HOB_TYPE_PAL_CACHE_INFO,
  45.187 -    HOB_TYPE_PAL_CACHE_PROT_INFO,
  45.188 -    HOB_TYPE_PAL_DEBUG_INFO,
  45.189 -    HOB_TYPE_PAL_FIXED_ADDR,
  45.190 -    HOB_TYPE_PAL_FREQ_BASE,
  45.191 -    HOB_TYPE_PAL_FREQ_RATIOS,
  45.192 -    HOB_TYPE_PAL_HALT_INFO,
  45.193 -    HOB_TYPE_PAL_PERF_MON_INFO,
  45.194 -    HOB_TYPE_PAL_PROC_GET_FEATURES,
  45.195 -    HOB_TYPE_PAL_PTCE_INFO,
  45.196 -    HOB_TYPE_PAL_REGISTER_INFO,
  45.197 -    HOB_TYPE_PAL_RSE_INFO,
  45.198 -    HOB_TYPE_PAL_TEST_INFO,
  45.199 -    HOB_TYPE_PAL_VM_SUMMARY,
  45.200 -    HOB_TYPE_PAL_VM_INFO,
  45.201 -    HOB_TYPE_PAL_VM_PAGE_SIZE,
  45.202 -    HOB_TYPE_NR_VCPU,
  45.203 -    HOB_TYPE_MAX
  45.204 -}hob_type_t;
  45.205 -
  45.206 -static int hob_init( void  *buffer ,unsigned long buf_size);
  45.207 -static int add_pal_hob(void* hob_buf);
  45.208 -static int add_mem_hob(void* hob_buf, unsigned long dom_mem_size);
  45.209 -static int add_vcpus_hob(void* hob_buf, unsigned long nr_vcpu);
  45.210 -static int build_hob (void* hob_buf, unsigned long hob_buf_size,
  45.211 -                  unsigned long dom_mem_size, unsigned long vcpus);
  45.212 -static int load_hob(int xc_handle,uint32_t dom, void *hob_buf,
  45.213 -		unsigned long dom_mem_size);
  45.214 -
  45.215 -int xc_ia64_build_hob(int xc_handle, uint32_t dom, unsigned long memsize, 
  45.216 -                      unsigned long vcpus){
  45.217 -
  45.218 -    char   *hob_buf;
  45.219 -
  45.220 -    hob_buf = malloc (GFW_HOB_SIZE);
  45.221 -    if (hob_buf == NULL) {
  45.222 -        PERROR("Could not allocate hob");
  45.223 -        return -1;
  45.224 -    }
  45.225 -
  45.226 -    if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize, vcpus) < 0){
  45.227 -        free (hob_buf);
  45.228 -        PERROR("Could not build hob");
  45.229 -        return -1;
  45.230 -    }
  45.231 -
  45.232 -    if ( load_hob( xc_handle, dom, hob_buf, memsize) < 0){
  45.233 -        free (hob_buf);
  45.234 -        PERROR("Could not load hob");
  45.235 -       return -1;
  45.236 -    }
  45.237 -    free (hob_buf);
  45.238 -    return 0;
  45.239 -
  45.240 -}
  45.241 -static int
  45.242 -hob_init( void  *buffer ,unsigned long buf_size)
  45.243 -{
  45.244 -    HOB_INFO *phit;
  45.245 -    HOB_GENERIC_HEADER     *terminal;
  45.246 -
  45.247 -    if (sizeof(HOB_INFO) + sizeof(HOB_GENERIC_HEADER) > buf_size){
  45.248 -        // buffer too small
  45.249 -        return -1;
  45.250 -    }
  45.251 -
  45.252 -    phit = (HOB_INFO*)buffer;
  45.253 -    phit->header.signature = HOB_SIGNATURE;
  45.254 -    phit->header.type = HOB_TYPE_INFO;
  45.255 -    phit->header.length = sizeof(HOB_INFO);
  45.256 -    phit->length = sizeof(HOB_INFO) + sizeof(HOB_GENERIC_HEADER);
  45.257 -    phit->cur_pos = 0;
  45.258 -    phit->buf_size = buf_size;
  45.259 -
  45.260 -    terminal = (HOB_GENERIC_HEADER*) (buffer + sizeof(HOB_INFO));
  45.261 -    terminal->signature= HOB_SIGNATURE;
  45.262 -    terminal->type = HOB_TYPE_TERMINAL;
  45.263 -    terminal->length = sizeof(HOB_GENERIC_HEADER);
  45.264 -
  45.265 -    return 0;
  45.266 -}
  45.267 -
  45.268 -/*
  45.269 - *  Add a new HOB to the HOB List.
  45.270 - *
  45.271 - *  hob_start  -  start address of hob buffer
  45.272 - *  type       -  type of the hob to be added
  45.273 - *  data       -  data of the hob to be added
  45.274 - *  data_size  -  size of the data
  45.275 - */
  45.276 -static int
  45.277 -hob_add(
  45.278 -     void*  hob_start,
  45.279 -     int    type,
  45.280 -     void*  data,
  45.281 -     int    data_size
  45.282 -)
  45.283 -{
  45.284 -    HOB_INFO *phit;
  45.285 -    HOB_GENERIC_HEADER     *newhob,*tail;
  45.286 -
  45.287 -    phit = (HOB_INFO*)hob_start;
  45.288 -
  45.289 -    if (phit->length + data_size > phit->buf_size){
  45.290 -        // no space for new hob
  45.291 -        return -1;
  45.292 -    }
  45.293 -
  45.294 -    //append new HOB
  45.295 -    newhob = (HOB_GENERIC_HEADER*)
  45.296 -        (hob_start + phit->length - sizeof(HOB_GENERIC_HEADER));
  45.297 -    newhob->signature = HOB_SIGNATURE;
  45.298 -    newhob->type = type;
  45.299 -    newhob->length = data_size + sizeof(HOB_GENERIC_HEADER);
  45.300 -    memcpy((void*)newhob + sizeof(HOB_GENERIC_HEADER), data, data_size);
  45.301 -
  45.302 -    // append terminal HOB
  45.303 -    tail = (HOB_GENERIC_HEADER*) ( hob_start + phit->length + data_size);
  45.304 -    tail->signature = HOB_SIGNATURE;
  45.305 -    tail->type = HOB_TYPE_TERMINAL;
  45.306 -    tail->length = sizeof(HOB_GENERIC_HEADER);
  45.307 -
  45.308 -    // adjust HOB list length
  45.309 -    phit->length += sizeof(HOB_GENERIC_HEADER)+ data_size;
  45.310 -
  45.311 -    return 0;
  45.312 -
  45.313 -}
  45.314 -
  45.315 -int get_hob_size(void* hob_buf){
  45.316 -
  45.317 -    HOB_INFO *phit = (HOB_INFO*)hob_buf;
  45.318 -
  45.319 -    if (phit->header.signature != HOB_SIGNATURE){
  45.320 -        PERROR("xc_get_hob_size:Incorrect signature");
  45.321 -        return -1;
  45.322 -    }
  45.323 -    return phit->length;
  45.324 -}
  45.325 -
  45.326 -int build_hob (void* hob_buf, unsigned long hob_buf_size,
  45.327 -                  unsigned long dom_mem_size, unsigned long vcpus)
  45.328 -{
  45.329 -    //Init HOB List
  45.330 -    if (hob_init (hob_buf, hob_buf_size)<0){
  45.331 -        PERROR("buffer too small");
  45.332 -        goto err_out;
  45.333 -    }
  45.334 -
  45.335 -    if ( add_mem_hob( hob_buf,dom_mem_size) < 0){
  45.336 -        PERROR("Add memory hob failed, buffer too small");
  45.337 -        goto err_out;
  45.338 -    }
  45.339 -
  45.340 -    if ( add_vcpus_hob(hob_buf, vcpus)<0){
  45.341 -        PERROR("Add NR_VCPU hob failed, buffer too small");
  45.342 -        goto err_out;
  45.343 -    }
  45.344 -
  45.345 -    if ( add_pal_hob( hob_buf ) < 0 ){
  45.346 -        PERROR("Add PAL hob failed, buffer too small");
  45.347 -        goto err_out;
  45.348 -    }
  45.349 -
  45.350 -    return 0;
  45.351 -
  45.352 -err_out:
  45.353 -    return -1;
  45.354 -}
  45.355 -
  45.356 -static int
  45.357 -load_hob(int xc_handle, uint32_t dom, void *hob_buf,
  45.358 -         unsigned long dom_mem_size)
  45.359 -{
  45.360 -    // hob_buf should be page aligned
  45.361 -    int hob_size;
  45.362 -    int nr_pages;
  45.363 -
  45.364 -    if ((hob_size = get_hob_size(hob_buf)) < 0){
  45.365 -        PERROR("Invalid hob data");
  45.366 -        return -1;
  45.367 -    }
  45.368 -
  45.369 -    if (hob_size > GFW_HOB_SIZE){
  45.370 -        PERROR("No enough memory for hob data");
  45.371 -        return -1;
  45.372 -    }
  45.373 -
  45.374 -    nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT;
  45.375 -
  45.376 -    return xc_ia64_copy_to_domain_pages(xc_handle, dom,
  45.377 -            hob_buf, RAW_GFW_HOB_START_NR(dom_mem_size), nr_pages );
  45.378 -}
  45.379 -
  45.380 -#define MIN(x, y) ((x) < (y)) ? (x) : (y)
  45.381 -static int
  45.382 -add_mem_hob(void* hob_buf, unsigned long dom_mem_size){
  45.383 -    hob_mem_t memhob;
  45.384 -
  45.385 -    // less than 3G
  45.386 -    memhob.start = 0;
  45.387 -    memhob.size = MIN(dom_mem_size, 0xC0000000);
  45.388 -
  45.389 -    if (hob_add(hob_buf, HOB_TYPE_MEM, &memhob, sizeof(memhob)) < 0){
  45.390 -        return -1;
  45.391 -    }
  45.392 -
  45.393 -    if (dom_mem_size > 0xC0000000) {
  45.394 -        // 4G ~ 4G+remain
  45.395 -        memhob.start = 0x100000000; //4G
  45.396 -        memhob.size = dom_mem_size - 0xC0000000;
  45.397 -        if (hob_add(hob_buf, HOB_TYPE_MEM, &memhob, sizeof(memhob)) < 0)
  45.398 -            return -1;
  45.399 -    }
  45.400 -    return 0;
  45.401 -}
  45.402 -
  45.403 -static int 
  45.404 -add_vcpus_hob(void* hob_buf, unsigned long vcpus)
  45.405 -{
  45.406 -    return hob_add(hob_buf, HOB_TYPE_NR_VCPU, &vcpus, sizeof(vcpus));
  45.407 -}
  45.408 -
  45.409 -unsigned char config_pal_bus_get_features_data[24] = {
  45.410 -    0, 0, 0, 32, 0, 0, 240, 189, 0, 0, 0, 0, 0, 0,
  45.411 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  45.412 -};
  45.413 -unsigned char config_pal_cache_summary[16] = {
  45.414 -    3, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0};
  45.415 -unsigned char config_pal_mem_attrib[8] =    {
  45.416 -    241, 0, 0, 0, 0, 0, 0, 0
  45.417 -};
  45.418 -unsigned char config_pal_cache_info[152] = {
  45.419 -    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.420 -    6, 4, 6, 7, 255, 1, 0, 1, 0, 64, 0, 0, 12, 12,
  45.421 -    49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 7, 0, 1,
  45.422 -    0, 1, 0, 64, 0, 0, 12, 12, 49, 0, 0, 0, 0, 0, 0,
  45.423 -    0, 0, 0, 6, 8, 7, 7, 255, 7, 0, 11, 0, 0, 16, 0,
  45.424 -    12, 17, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7,
  45.425 -    7, 7, 5, 9, 11, 0, 0, 4, 0, 12, 15, 49, 0, 254, 255,
  45.426 -    255, 255, 255, 255, 255, 255, 2, 8, 7, 7, 7, 5, 9,
  45.427 -    11, 0, 0, 4, 0, 12, 15, 49, 0, 0, 0, 0, 0, 0, 0, 0,
  45.428 -    0, 3, 12, 7, 7, 7, 14, 1, 3, 0, 0, 192, 0, 12, 20, 49, 0
  45.429 -};
  45.430 -unsigned char config_pal_cache_prot_info[200] = {
  45.431 -    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.432 -    45, 0, 16, 8, 0, 76, 12, 64, 0, 0, 0, 0, 0, 0, 0,
  45.433 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.434 -    8, 0, 16, 4, 0, 76, 44, 68, 0, 0, 0, 0, 0, 0, 0, 0,
  45.435 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32,
  45.436 -    0, 16, 8, 0, 81, 44, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.437 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0,
  45.438 -    112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.439 -    0, 0, 0, 0, 0, 0, 254, 255, 255, 255, 255, 255, 255, 255,
  45.440 -    32, 0, 112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.441 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 160,
  45.442 -    12, 0, 84, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.443 -    0, 0, 0
  45.444 -};
  45.445 -unsigned char config_pal_debug_info[16] = {
  45.446 -    2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
  45.447 -};
  45.448 -unsigned char config_pal_fixed_addr[8] = {
  45.449 -    0, 0, 0, 0, 0, 0, 0, 0
  45.450 -};
  45.451 -unsigned char config_pal_freq_base[8] = {
  45.452 -    109, 219, 182, 13, 0, 0, 0, 0
  45.453 -};
  45.454 -unsigned char config_pal_freq_ratios[24] = {
  45.455 -    11, 1, 0, 0, 77, 7, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 4,
  45.456 -    0, 0, 0, 7, 0, 0, 0
  45.457 -};
  45.458 -unsigned char config_pal_halt_info[64] = {
  45.459 -    0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0,
  45.460 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.461 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.462 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  45.463 -};
  45.464 -unsigned char config_pal_perf_mon_info[136] = {
  45.465 -    12, 47, 18, 8, 0, 0, 0, 0, 241, 255, 0, 0, 255, 7, 0, 0,
  45.466 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.467 -    0, 0, 0, 0, 0, 0, 0, 0, 241, 255, 0, 0, 223, 0, 255, 255,
  45.468 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.469 -    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
  45.470 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.471 -    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
  45.472 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.473 -    0, 0, 0, 0, 0, 0, 0, 0
  45.474 -};
  45.475 -unsigned char config_pal_proc_get_features[104] = {
  45.476 -    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.477 -    0, 0, 0, 0, 64, 6, 64, 49, 0, 0, 0, 0, 64, 6, 0, 0,
  45.478 -    0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0,
  45.479 -    231, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0,
  45.480 -    0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0,
  45.481 -    63, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0,
  45.482 -    0, 0, 0, 0, 0, 0, 0, 0
  45.483 -};
  45.484 -unsigned char config_pal_ptce_info[24] = {
  45.485 -    0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
  45.486 -    0, 0, 0, 0, 0, 0, 0, 0
  45.487 -};
  45.488 -unsigned char config_pal_register_info[64] = {
  45.489 -    255, 0, 47, 127, 17, 17, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0,
  45.490 -    255, 208, 128, 238, 238, 0, 0, 248, 255, 255, 255, 255, 255, 0, 0, 7, 3,
  45.491 -    251, 3, 0, 0, 0, 0, 255, 7, 3, 0, 0, 0, 0, 0, 248, 252, 4,
  45.492 -    252, 255, 255, 255, 255, 2, 248, 252, 255, 255, 255, 255, 255
  45.493 -};
  45.494 -unsigned char config_pal_rse_info[16] = {
  45.495 -    96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  45.496 -};
  45.497 -unsigned char config_pal_test_info[48] = {
  45.498 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.499 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.500 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  45.501 -};
  45.502 -unsigned char config_pal_vm_summary[16] = {
  45.503 -    101, 18, 15, 2, 7, 7, 4, 2, 59, 18, 0, 0, 0, 0, 0, 0
  45.504 -};
  45.505 -unsigned char config_pal_vm_info[104] = {
  45.506 -    2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
  45.507 -    32, 32, 0, 0, 0, 0, 0, 0, 112, 85, 21, 0, 0, 0, 0, 0, 0,
  45.508 -    0, 0, 0, 0, 0, 0, 1, 32, 32, 0, 0, 0, 0, 0, 0, 112, 85,
  45.509 -    21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 128, 128, 0,
  45.510 -    4, 0, 0, 0, 0, 112, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  45.511 -    0, 0, 0, 1, 128, 128, 0, 4, 0, 0, 0, 0, 112, 85, 0, 0, 0, 0, 0
  45.512 -};
  45.513 -unsigned char config_pal_vm_page_size[16] = {
  45.514 -    0, 112, 85, 21, 0, 0, 0, 0, 0, 112, 85, 21, 0, 0, 0, 0
  45.515 -};
  45.516 -
  45.517 -typedef struct{
  45.518 -    hob_type_t type;
  45.519 -    void* data;
  45.520 -    unsigned long size;
  45.521 -}hob_batch_t;
  45.522 -
  45.523 -hob_batch_t hob_batch[]={
  45.524 -    {  HOB_TYPE_PAL_BUS_GET_FEATURES_DATA,
  45.525 -        &config_pal_bus_get_features_data,
  45.526 -        sizeof(config_pal_bus_get_features_data)
  45.527 -    },
  45.528 -    {  HOB_TYPE_PAL_CACHE_SUMMARY,
  45.529 -        &config_pal_cache_summary,
  45.530 -        sizeof(config_pal_cache_summary)
  45.531 -    },
  45.532 -    {  HOB_TYPE_PAL_MEM_ATTRIB,
  45.533 -        &config_pal_mem_attrib,
  45.534 -        sizeof(config_pal_mem_attrib)
  45.535 -    },
  45.536 -    {  HOB_TYPE_PAL_CACHE_INFO,
  45.537 -        &config_pal_cache_info,
  45.538 -        sizeof(config_pal_cache_info)
  45.539 -    },
  45.540 -    {  HOB_TYPE_PAL_CACHE_PROT_INFO,
  45.541 -        &config_pal_cache_prot_info,
  45.542 -        sizeof(config_pal_cache_prot_info)
  45.543 -    },
  45.544 -    {  HOB_TYPE_PAL_DEBUG_INFO,
  45.545 -        &config_pal_debug_info,
  45.546 -        sizeof(config_pal_debug_info)
  45.547 -    },
  45.548 -    {  HOB_TYPE_PAL_FIXED_ADDR,
  45.549 -        &config_pal_fixed_addr,
  45.550 -        sizeof(config_pal_fixed_addr)
  45.551 -    },
  45.552 -    {  HOB_TYPE_PAL_FREQ_BASE,
  45.553 -        &config_pal_freq_base,
  45.554 -        sizeof(config_pal_freq_base)
  45.555 -    },
  45.556 -    {  HOB_TYPE_PAL_FREQ_RATIOS,
  45.557 -        &config_pal_freq_ratios,
  45.558 -        sizeof(config_pal_freq_ratios)
  45.559 -    },
  45.560 -    {  HOB_TYPE_PAL_HALT_INFO,
  45.561 -        &config_pal_halt_info,
  45.562 -        sizeof(config_pal_halt_info)
  45.563 -    },
  45.564 -    {  HOB_TYPE_PAL_PERF_MON_INFO,
  45.565 -        &config_pal_perf_mon_info,
  45.566 -        sizeof(config_pal_perf_mon_info)
  45.567 -    },
  45.568 -    {  HOB_TYPE_PAL_PROC_GET_FEATURES,
  45.569 -        &config_pal_proc_get_features,
  45.570 -        sizeof(config_pal_proc_get_features)
  45.571 -    },
  45.572 -    {  HOB_TYPE_PAL_PTCE_INFO,
  45.573 -        &config_pal_ptce_info,
  45.574 -        sizeof(config_pal_ptce_info)
  45.575 -    },
  45.576 -    {  HOB_TYPE_PAL_REGISTER_INFO,
  45.577 -        &config_pal_register_info,
  45.578 -        sizeof(config_pal_register_info)
  45.579 -    },
  45.580 -    {  HOB_TYPE_PAL_RSE_INFO,
  45.581 -        &config_pal_rse_info,
  45.582 -        sizeof(config_pal_rse_info)
  45.583 -    },
  45.584 -    {  HOB_TYPE_PAL_TEST_INFO,
  45.585 -        &config_pal_test_info,
  45.586 -        sizeof(config_pal_test_info)
  45.587 -    },
  45.588 -    {  HOB_TYPE_PAL_VM_SUMMARY,
  45.589 -        &config_pal_vm_summary,
  45.590 -        sizeof(config_pal_vm_summary)
  45.591 -    },
  45.592 -    {  HOB_TYPE_PAL_VM_INFO,
  45.593 -        &config_pal_vm_info,
  45.594 -        sizeof(config_pal_vm_info)
  45.595 -    },
  45.596 -    {  HOB_TYPE_PAL_VM_PAGE_SIZE,
  45.597 -        &config_pal_vm_page_size,
  45.598 -        sizeof(config_pal_vm_page_size)
  45.599 -    },
  45.600 -};
  45.601 -
  45.602 -static int add_pal_hob(void* hob_buf){
  45.603 -    int i;
  45.604 -    for (i=0; i<sizeof(hob_batch)/sizeof(hob_batch_t); i++){
  45.605 -        if (hob_add(hob_buf, hob_batch[i].type,
  45.606 -                    hob_batch[i].data,
  45.607 -                    hob_batch[i].size)<0)
  45.608 -            return -1;
  45.609 -    }
  45.610 -    return 0;
  45.611 -}
  45.612 -
  45.613 -static int setup_guest(  int xc_handle,
  45.614 -                         uint32_t dom, unsigned long memsize,
  45.615 -                         char *image, unsigned long image_size,
  45.616 -                         uint32_t vcpus,
  45.617 -                         unsigned int store_evtchn,
  45.618 -                         unsigned long *store_mfn)
  45.619 -{
  45.620 -    unsigned long page_array[2];
  45.621 -    shared_iopage_t *sp;
  45.622 -    int i;
  45.623 -    unsigned long dom_memsize = (memsize << 20);
  45.624 -
  45.625 -    if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
  45.626 -        PERROR("Guest firmware size is incorrect [%ld]?", image_size);
  45.627 -        return -1;
  45.628 -    }
  45.629 -
  45.630 -    /* Load guest firmware */
  45.631 -    if( xc_ia64_copy_to_domain_pages( xc_handle, dom,
  45.632 -            image, RAW_GFW_IMAGE_START_NR(dom_memsize, image_size),
  45.633 -            image_size>>PAGE_SHIFT)) {
  45.634 -        PERROR("Could not load guest firmware into domain");
  45.635 -        goto error_out;
  45.636 -    }
  45.637 -
  45.638 -    /* Hand-off state passed to guest firmware */
  45.639 -    if (xc_ia64_build_hob(xc_handle, dom, dom_memsize, (unsigned long)vcpus) < 0){
  45.640 -        PERROR("Could not build hob\n");
  45.641 -       goto error_out;
  45.642 -    }
  45.643 -
  45.644 -    /* Retrieve special pages like io, xenstore, etc. */
  45.645 -    if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
  45.646 -                              RAW_IO_PAGE_START_NR(dom_memsize), 2) != 2 )
  45.647 -    {
  45.648 -        PERROR("Could not get the page frame list");
  45.649 -        goto error_out;
  45.650 -    }
  45.651 -
  45.652 -    *store_mfn = page_array[1];
  45.653 -    if ((sp = (shared_iopage_t *) xc_map_foreign_range(
  45.654 -                    xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
  45.655 -                    page_array[0])) == 0)
  45.656 -        goto error_out;
  45.657 -    memset(sp, 0, PAGE_SIZE);
  45.658 -
  45.659 -    for (i = 0; i < vcpus; i++) {
  45.660 -        uint32_t vp_eport;
  45.661 -
  45.662 -        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
  45.663 -        if (vp_eport < 0) {
  45.664 -            DPRINTF("Couldn't get unbound port from VMX guest.\n");
  45.665 -            goto error_out;
  45.666 -        }
  45.667 -        sp->vcpu_iodata[i].vp_eport = vp_eport;
  45.668 -    }
  45.669 -
  45.670 -    munmap(sp, PAGE_SIZE);
  45.671 -
  45.672 -    return 0;
  45.673 -
  45.674 - error_out:
  45.675 -    return -1;
  45.676 -}
  45.677 -
  45.678 -int xc_hvm_build(int xc_handle,
  45.679 -                 uint32_t domid,
  45.680 -                 int memsize,
  45.681 -                 const char *image_name,
  45.682 -                 unsigned int vcpus,
  45.683 -                 unsigned int pae,
  45.684 -                 unsigned int acpi,
  45.685 -                 unsigned int apic,
  45.686 -                 unsigned int store_evtchn,
  45.687 -                 unsigned long *store_mfn)
  45.688 -{
  45.689 -    dom0_op_t launch_op, op;
  45.690 -    int rc ;
  45.691 -    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
  45.692 -    char         *image = NULL;
  45.693 -    unsigned long image_size;
  45.694 -    unsigned long nr_pages;
  45.695 -
  45.696 -    if ( (nr_pages = xc_get_max_pages(xc_handle, domid)) < 0 )
  45.697 -    {
  45.698 -        PERROR("Could not find total pages for domain");
  45.699 -        goto error_out;
  45.700 -    }
  45.701 -
  45.702 -    if ( (image = xc_read_image(image_name, &image_size)) == NULL ){
  45.703 -        PERROR("Could not read guest firmware image %s",image_name);
  45.704 -        goto error_out;
  45.705 -    }
  45.706 -
  45.707 -    image_size = (image_size + PAGE_SIZE - 1) & PAGE_MASK;
  45.708 -
  45.709 -    if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ){
  45.710 -        PERROR("Unable to mlock ctxt");
  45.711 -        return 1;
  45.712 -    }
  45.713 -
  45.714 -    op.cmd = DOM0_GETDOMAININFO;
  45.715 -    op.u.getdomaininfo.domain = (domid_t)domid;
  45.716 -    if ( (do_dom0_op(xc_handle, &op) < 0) ||
  45.717 -         ((uint16_t)op.u.getdomaininfo.domain != domid) ) {
  45.718 -        PERROR("Could not get info on domain");
  45.719 -        goto error_out;
  45.720 -    }
  45.721 -
  45.722 -    memset(ctxt, 0, sizeof(*ctxt));
  45.723 -
  45.724 -    if ( setup_guest(xc_handle, domid, (unsigned long)memsize, image,
  45.725 -                     image_size, vcpus, store_evtchn, store_mfn ) < 0 ){
  45.726 -        ERROR("Error constructing guest OS");
  45.727 -        goto error_out;
  45.728 -    }
  45.729 -
  45.730 -    free(image);
  45.731 -
  45.732 -    ctxt->flags = VGCF_VMX_GUEST;
  45.733 -    ctxt->user_regs.cr_iip = 0x80000000ffffffb0UL;
  45.734 -    ctxt->privregs = 0;
  45.735 -
  45.736 -    memset( &launch_op, 0, sizeof(launch_op) );
  45.737 -
  45.738 -    launch_op.u.setvcpucontext.domain = (domid_t)domid;
  45.739 -    launch_op.u.setvcpucontext.vcpu   = 0;
  45.740 -    set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
  45.741 -
  45.742 -    launch_op.cmd = DOM0_SETVCPUCONTEXT;
  45.743 -    rc = do_dom0_op(xc_handle, &launch_op);
  45.744 -    return rc;
  45.745 -
  45.746 - error_out:
  45.747 -    free(image);
  45.748 -    return -1;
  45.749 -}
  45.750 -
  45.751 -/*
  45.752 - * Local variables:
  45.753 - * mode: C
  45.754 - * c-set-style: "BSD"
  45.755 - * c-basic-offset: 4
  45.756 - * tab-width: 4
  45.757 - * indent-tabs-mode: nil
  45.758 - * End:
  45.759 - */
    46.1 --- a/tools/libxc/xc_linux_build.c	Thu Jul 27 17:44:14 2006 -0500
    46.2 +++ b/tools/libxc/xc_linux_build.c	Fri Jul 28 10:51:38 2006 +0100
    46.3 @@ -2,6 +2,7 @@
    46.4   * xc_linux_build.c
    46.5   */
    46.6  
    46.7 +#include <stddef.h>
    46.8  #include "xg_private.h"
    46.9  #include "xc_private.h"
   46.10  #include <xenctrl.h>
   46.11 @@ -473,6 +474,11 @@ static int setup_guest(int xc_handle,
   46.12      unsigned long v_end;
   46.13      unsigned long start_page, pgnr;
   46.14      start_info_t *start_info;
   46.15 +    unsigned long start_info_mpa;
   46.16 +    struct xen_ia64_boot_param *bp;
   46.17 +    shared_info_t *shared_info;
   46.18 +    int i;
   46.19 +    DECLARE_DOM0_OP;
   46.20      int rc;
   46.21  
   46.22      rc = probeimageformat(image, image_size, &load_funcs);
   46.23 @@ -489,6 +495,18 @@ static int setup_guest(int xc_handle,
   46.24      vinitrd_start    = round_pgup(dsi.v_end);
   46.25      vinitrd_end      = vinitrd_start + initrd->len;
   46.26      v_end            = round_pgup(vinitrd_end);
   46.27 +    start_info_mpa = (nr_pages - 3) << PAGE_SHIFT;
   46.28 +
   46.29 +    /* Build firmware.  */
   46.30 +    memset(&op.u.domain_setup, 0, sizeof(op.u.domain_setup));
   46.31 +    op.u.domain_setup.flags = 0;
   46.32 +    op.u.domain_setup.domain = (domid_t)dom;
   46.33 +    op.u.domain_setup.bp = start_info_mpa + sizeof (start_info_t);
   46.34 +    op.u.domain_setup.maxmem = (nr_pages - 3) << PAGE_SHIFT;
   46.35 +    
   46.36 +    op.cmd = DOM0_DOMAIN_SETUP;
   46.37 +    if ( xc_dom0_op(xc_handle, &op) )
   46.38 +        goto error_out;
   46.39  
   46.40      start_page = dsi.v_start >> PAGE_SHIFT;
   46.41      pgnr = (v_end - dsi.v_start) >> PAGE_SHIFT;
   46.42 @@ -539,7 +557,7 @@ static int setup_guest(int xc_handle,
   46.43      IPRINTF("start_info: 0x%lx at 0x%lx, "
   46.44             "store_mfn: 0x%lx at 0x%lx, "
   46.45             "console_mfn: 0x%lx at 0x%lx\n",
   46.46 -           page_array[0], nr_pages,
   46.47 +           page_array[0], nr_pages - 3,
   46.48             *store_mfn,    nr_pages - 2,
   46.49             *console_mfn,  nr_pages - 1);
   46.50  
   46.51 @@ -554,23 +572,35 @@ static int setup_guest(int xc_handle,
   46.52      start_info->console_mfn   = nr_pages - 1;
   46.53      start_info->console_evtchn = console_evtchn;
   46.54      start_info->nr_pages       = nr_pages; // FIXME?: nr_pages - 2 ????
   46.55 +
   46.56 +    bp = (struct xen_ia64_boot_param *)(start_info + 1);
   46.57 +    bp->command_line = start_info_mpa + offsetof(start_info_t, cmd_line);
   46.58 +    if ( cmdline != NULL )
   46.59 +    {
   46.60 +        strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
   46.61 +        start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = 0;
   46.62 +    }
   46.63      if ( initrd->len != 0 )
   46.64      {
   46.65 -        ctxt->initrd.start    = vinitrd_start;
   46.66 -        ctxt->initrd.size     = initrd->len;
   46.67 +        bp->initrd_start    = vinitrd_start;
   46.68 +        bp->initrd_size     = initrd->len;
   46.69      }
   46.70 -    else
   46.71 -    {
   46.72 -        ctxt->initrd.start    = 0;
   46.73 -        ctxt->initrd.size     = 0;
   46.74 -    }
   46.75 -    if ( cmdline != NULL )
   46.76 -    {
   46.77 -        strncpy((char *)ctxt->cmdline, cmdline, IA64_COMMAND_LINE_SIZE);
   46.78 -        ctxt->cmdline[IA64_COMMAND_LINE_SIZE-1] = '\0';
   46.79 -    }
   46.80 +    ctxt->user_regs.r28 = start_info_mpa + sizeof (start_info_t);
   46.81      munmap(start_info, PAGE_SIZE);
   46.82  
   46.83 +    /* shared_info page starts its life empty. */
   46.84 +    shared_info = xc_map_foreign_range(
   46.85 +        xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
   46.86 +    printf("shared_info = %p, err=%s frame=%lx\n",
   46.87 +           shared_info, strerror (errno), shared_info_frame);
   46.88 +    //memset(shared_info, 0, sizeof(shared_info_t));
   46.89 +    /* Mask all upcalls... */
   46.90 +    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   46.91 +        shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
   46.92 +    shared_info->arch.start_info_pfn = nr_pages - 3;
   46.93 +
   46.94 +    munmap(shared_info, PAGE_SIZE);
   46.95 +
   46.96      free(page_array);
   46.97      return 0;
   46.98  
   46.99 @@ -1150,16 +1180,10 @@ static int xc_linux_build_internal(int x
  46.100  #ifdef __ia64__
  46.101      /* based on new_thread in xen/arch/ia64/domain.c */
  46.102      ctxt->flags = 0;
  46.103 -    ctxt->shared.flags = flags;
  46.104 -    ctxt->shared.start_info_pfn = nr_pages - 3; /* metaphysical */
  46.105      ctxt->user_regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
  46.106      ctxt->user_regs.cr_iip = vkern_entry;
  46.107      ctxt->user_regs.cr_ifs = 1UL << 63;
  46.108      ctxt->user_regs.ar_fpsr = xc_ia64_fpsr_default();
  46.109 -    /* currently done by hypervisor, should move here */
  46.110 -    /* ctxt->regs.r28 = dom_fw_setup(); */
  46.111 -    ctxt->privregs = 0;
  46.112 -    ctxt->sys_pgnr = 3;
  46.113      i = 0; /* silence unused variable warning */
  46.114  #else /* x86 */
  46.115      /*
    47.1 --- a/tools/libxc/xc_private.c	Thu Jul 27 17:44:14 2006 -0500
    47.2 +++ b/tools/libxc/xc_private.c	Fri Jul 28 10:51:38 2006 +0100
    47.3 @@ -262,6 +262,7 @@ long long xc_domain_get_cpu_usage( int x
    47.4  }
    47.5  
    47.6  
    47.7 +#ifndef __ia64__
    47.8  int xc_get_pfn_list(int xc_handle,
    47.9                      uint32_t domid,
   47.10                      xen_pfn_t *pfn_buf,
   47.11 @@ -305,6 +306,7 @@ int xc_get_pfn_list(int xc_handle,
   47.12  
   47.13      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
   47.14  }
   47.15 +#endif
   47.16  
   47.17  long xc_get_tot_pages(int xc_handle, uint32_t domid)
   47.18  {
    48.1 --- a/tools/libxc/xenctrl.h	Thu Jul 27 17:44:14 2006 -0500
    48.2 +++ b/tools/libxc/xenctrl.h	Fri Jul 28 10:51:38 2006 +0100
    48.3 @@ -524,9 +524,6 @@ int xc_copy_to_domain_page(int xc_handle
    48.4  int xc_clear_domain_page(int xc_handle, uint32_t domid,
    48.5                           unsigned long dst_pfn);
    48.6  
    48.7 -int xc_ia64_copy_to_domain_pages(int xc_handle, uint32_t domid,
    48.8 -        void* src_page, unsigned long dst_pfn, int nr_pages);
    48.9 -
   48.10  long xc_get_max_pages(int xc_handle, uint32_t domid);
   48.11  
   48.12  int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
    49.1 --- a/xen/arch/ia64/Makefile	Thu Jul 27 17:44:14 2006 -0500
    49.2 +++ b/xen/arch/ia64/Makefile	Fri Jul 28 10:51:38 2006 +0100
    49.3 @@ -50,22 +50,22 @@ asm-xsi-offsets.s: asm-xsi-offsets.c $(H
    49.4  $(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
    49.5  # Need such symbol link to make linux headers available
    49.6  	[ -e $(BASEDIR)/include/linux ] \
    49.7 -	 || ln -s $(BASEDIR)/include/xen $(BASEDIR)/include/linux
    49.8 +	 || ln -sf $(BASEDIR)/include/xen $(BASEDIR)/include/linux
    49.9  	[ -e $(BASEDIR)/include/asm-ia64/xen ] \
   49.10 -	 || ln -s $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen
   49.11 +	 || ln -sf $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen
   49.12  # Link to HVM files in Xen for ia64/vti
   49.13  	[ -e $(BASEDIR)/include/asm-ia64/hvm ] \
   49.14  	 || mkdir $(BASEDIR)/include/asm-ia64/hvm
   49.15  	[ -e $(BASEDIR)/include/asm-ia64/hvm/support.h ] \
   49.16 -	 || ln -s ../../../include/asm-x86/hvm/support.h $(BASEDIR)/include/asm-ia64/hvm/support.h
   49.17 +	 || ln -sf ../../../include/asm-x86/hvm/support.h $(BASEDIR)/include/asm-ia64/hvm/support.h
   49.18  	[ -e $(BASEDIR)/include/asm-ia64/hvm/io.h ] \
   49.19 -	 || ln -s ../../../include/asm-x86/hvm/io.h $(BASEDIR)/include/asm-ia64/hvm/io.h
   49.20 +	 || ln -sf ../../../include/asm-x86/hvm/io.h $(BASEDIR)/include/asm-ia64/hvm/io.h
   49.21  	[ -e $(BASEDIR)/include/asm-ia64/hvm/vpic.h ] \
   49.22 -	 || ln -s ../../../include/asm-x86/hvm/vpic.h $(BASEDIR)/include/asm-ia64/hvm/vpic.h
   49.23 +	 || ln -sf ../../../include/asm-x86/hvm/vpic.h $(BASEDIR)/include/asm-ia64/hvm/vpic.h
   49.24  	[ -e $(BASEDIR)/include/asm-ia64/hvm/vioapic.h ] \
   49.25 -	 || ln -s ../../../include/asm-x86/hvm/vioapic.h $(BASEDIR)/include/asm-ia64/hvm/vioapic.h
   49.26 +	 || ln -sf ../../../include/asm-x86/hvm/vioapic.h $(BASEDIR)/include/asm-ia64/hvm/vioapic.h
   49.27  	[ -e $(BASEDIR)/arch/ia64/vmx/hvm_vioapic.c ] \
   49.28 -	 || ln -s ../../../arch/x86/hvm/vioapic.c $(BASEDIR)/arch/ia64/vmx/hvm_vioapic.c
   49.29 +	 || ln -sf ../../../arch/x86/hvm/vioapic.c $(BASEDIR)/arch/ia64/vmx/hvm_vioapic.c
   49.30  
   49.31  # I'm sure a Makefile wizard would know a better way to do this
   49.32  xen.lds.s: xen/xen.lds.S
    50.1 --- a/xen/arch/ia64/asm-offsets.c	Thu Jul 27 17:44:14 2006 -0500
    50.2 +++ b/xen/arch/ia64/asm-offsets.c	Fri Jul 28 10:51:38 2006 +0100
    50.3 @@ -8,6 +8,7 @@
    50.4  #include <xen/sched.h>
    50.5  #include <asm/processor.h>
    50.6  #include <asm/ptrace.h>
    50.7 +#include <asm/mca.h>
    50.8  #include <public/xen.h>
    50.9  #include <asm/tlb.h>
   50.10  #include <asm/regs.h>
   50.11 @@ -33,6 +34,9 @@ void foo(void)
   50.12  	DEFINE(SHARED_INFO_SIZE, sizeof (struct shared_info));
   50.13  
   50.14  	BLANK();
   50.15 +	DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack));
   50.16 +
   50.17 +	BLANK();
   50.18  #ifdef   VTI_DEBUG
   50.19  	DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
   50.20  	DEFINE(IVT_DBG_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_debug));
   50.21 @@ -61,6 +65,11 @@ void foo(void)
   50.22  	DEFINE(IA64_VCPU_DTLB_OFFSET, offsetof (struct vcpu, arch.dtlb));
   50.23  
   50.24  	BLANK();
   50.25 +
   50.26 +	DEFINE(IA64_DOMAIN_SHADOW_BITMAP_OFFSET, offsetof (struct domain, arch.shadow_bitmap));
   50.27 +
   50.28 +	BLANK();
   50.29 +
   50.30  	DEFINE(IA64_CPUINFO_ITM_NEXT_OFFSET, offsetof (struct cpuinfo_ia64, itm_next));
   50.31  	DEFINE(IA64_CPUINFO_KSOFTIRQD_OFFSET, offsetof (struct cpuinfo_ia64, ksoftirqd));
   50.32  
   50.33 @@ -123,7 +132,6 @@ void foo(void)
   50.34  	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
   50.35  	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
   50.36  	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
   50.37 -	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
   50.38  	DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_iipa));
   50.39  	DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_isr));
   50.40  	DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cause));
   50.41 @@ -180,6 +188,7 @@ void foo(void)
   50.42  	BLANK();
   50.43  
   50.44  	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
   50.45 +	DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
   50.46   	DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0]));
   50.47  	DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
   50.48  	DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
    51.1 --- a/xen/arch/ia64/linux-xen/Makefile	Thu Jul 27 17:44:14 2006 -0500
    51.2 +++ b/xen/arch/ia64/linux-xen/Makefile	Fri Jul 28 10:51:38 2006 +0100
    51.3 @@ -1,6 +1,8 @@
    51.4  obj-y += efi.o
    51.5  obj-y += entry.o
    51.6  obj-y += irq_ia64.o
    51.7 +obj-y += mca.o
    51.8 +obj-y += mca_asm.o
    51.9  obj-y += mm_contig.o
   51.10  obj-y += pal.o
   51.11  obj-y += process-linux-xen.o
    52.1 --- a/xen/arch/ia64/linux-xen/README.origin	Thu Jul 27 17:44:14 2006 -0500
    52.2 +++ b/xen/arch/ia64/linux-xen/README.origin	Fri Jul 28 10:51:38 2006 +0100
    52.3 @@ -11,6 +11,8 @@ entry.S			-> linux/arch/ia64/kernel/entr
    52.4  head.S			-> linux/arch/ia64/kernel/head.S
    52.5  hpsim_ssc.h		-> linux/arch/ia64/hp/sim/hpsim_ssc.h
    52.6  irq_ia64.c		-> linux/arch/ia64/kernel/irq_ia64.c
    52.7 +mca.c			-> linux/arch/ia64/kernel/mca.c
    52.8 +mca_asm.S		-> linux/arch/ia64/kernel/mca_asm.S
    52.9  minstate.h		-> linux/arch/ia64/kernel/minstate.h
   52.10  mm_contig.c		-> linux/arch/ia64/mm/contig.c
   52.11  pal.S			-> linux/arch/ia64/kernel/pal.S
    53.1 --- a/xen/arch/ia64/linux-xen/entry.S	Thu Jul 27 17:44:14 2006 -0500
    53.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Fri Jul 28 10:51:38 2006 +0100
    53.3 @@ -652,17 +652,8 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
    53.4      ld8 r16 = [r16]
    53.5      ;;
    53.6      cmp.ne p6,p7 = r16, r0
    53.7 - (p6) br.cond.spnt ia64_leave_hypervisor
    53.8 - (p7) br.cond.spnt ia64_leave_kernel
    53.9 -    ;;
   53.10 -//    adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
   53.11 -//    ;;
   53.12 -//    ld8 r16 = [r16]
   53.13 -//    ;;
   53.14 -//    cmp.ne p6,p7 = r16, r0
   53.15 -//	(p6) br.cond.spnt ia64_leave_hypervisor
   53.16 -//	(p7) br.cond.spnt ia64_leave_kernel
   53.17 -//    ;;
   53.18 + (p6) br.cond.spnt ia64_leave_hypervisor	/* VTi */
   53.19 + (p7) br.cond.spnt ia64_leave_kernel		/* !VTi */
   53.20  #else
   53.21  .ret8:
   53.22  	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
   53.23 @@ -901,7 +892,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
   53.24  #ifdef XEN
   53.25  	;;
   53.26  (pUStk) ssm psr.i
   53.27 -(pUStk)    br.call.sptk.many b0=process_soft_irq
   53.28 +(pUStk)    br.call.sptk.many b0=do_softirq
   53.29  (pUStk) rsm psr.i
   53.30      ;;
   53.31  	alloc loc0=ar.pfs,0,1,1,0
    54.1 --- a/xen/arch/ia64/linux-xen/iosapic.c	Thu Jul 27 17:44:14 2006 -0500
    54.2 +++ b/xen/arch/ia64/linux-xen/iosapic.c	Fri Jul 28 10:51:38 2006 +0100
    54.3 @@ -1155,7 +1155,7 @@ int iosapic_guest_read(unsigned long phy
    54.4  
    54.5  int iosapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
    54.6  {
    54.7 -	unsigned int id, gsi, vec, dest, high32;
    54.8 +	unsigned int id, gsi, vec, xen_vec, dest, high32;
    54.9  	char rte_index;
   54.10  	struct iosapic *ios;
   54.11  	struct iosapic_intr_info *info;
   54.12 @@ -1185,13 +1185,17 @@ int iosapic_guest_write(unsigned long ph
   54.13  
   54.14  	/* Sanity check. Vector should be allocated before this update */
   54.15  	if ((rte_index > ios->num_rte) ||
   54.16 -	    test_bit(vec, ia64_xen_vector) ||
   54.17  	    ((vec > IA64_FIRST_DEVICE_VECTOR) &&
   54.18  	     (vec < IA64_LAST_DEVICE_VECTOR) &&
   54.19  	     (!test_bit(vec - IA64_FIRST_DEVICE_VECTOR, ia64_vector_mask))))
   54.20  	    return -EINVAL;
   54.21  
   54.22  	gsi = ios->gsi_base + rte_index;
   54.23 +	xen_vec = gsi_to_vector(gsi);
   54.24 +	if (xen_vec >= 0 && test_bit(xen_vec, ia64_xen_vector)) {
   54.25 +		printk("WARN: GSI %d in use by Xen.\n", gsi);
   54.26 +		return -EINVAL;
   54.27 +	}
   54.28  	info = &iosapic_intr_info[vec];
   54.29  	spin_lock_irqsave(&irq_descp(vec)->lock, flags);
   54.30  	spin_lock(&iosapic_lock);
    55.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    55.2 +++ b/xen/arch/ia64/linux-xen/mca.c	Fri Jul 28 10:51:38 2006 +0100
    55.3 @@ -0,0 +1,1600 @@
    55.4 +/*
    55.5 + * File:	mca.c
    55.6 + * Purpose:	Generic MCA handling layer
    55.7 + *
    55.8 + * Updated for latest kernel
    55.9 + * Copyright (C) 2003 Hewlett-Packard Co
   55.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   55.11 + *
   55.12 + * Copyright (C) 2002 Dell Inc.
   55.13 + * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
   55.14 + *
   55.15 + * Copyright (C) 2002 Intel
   55.16 + * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
   55.17 + *
   55.18 + * Copyright (C) 2001 Intel
   55.19 + * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
   55.20 + *
   55.21 + * Copyright (C) 2000 Intel
   55.22 + * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
   55.23 + *
   55.24 + * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
   55.25 + * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
   55.26 + *
   55.27 + * 03/04/15 D. Mosberger Added INIT backtrace support.
   55.28 + * 02/03/25 M. Domsch	GUID cleanups
   55.29 + *
   55.30 + * 02/01/04 J. Hall	Aligned MCA stack to 16 bytes, added platform vs. CPU
   55.31 + *			error flag, set SAL default return values, changed
   55.32 + *			error record structure to linked list, added init call
   55.33 + *			to sal_get_state_info_size().
   55.34 + *
   55.35 + * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected
   55.36 + *                      platform errors, completed code for logging of
   55.37 + *                      corrected & uncorrected machine check errors, and
   55.38 + *                      updated for conformance with Nov. 2000 revision of the
   55.39 + *                      SAL 3.0 spec.
   55.40 + * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
   55.41 + *                           added min save state dump, added INIT handler.
   55.42 + *
   55.43 + * 2003-12-08 Keith Owens <kaos@sgi.com>
   55.44 + *            smp_call_function() must not be called from interrupt context (can
   55.45 + *            deadlock on tasklist_lock).  Use keventd to call smp_call_function().
   55.46 + *
   55.47 + * 2004-02-01 Keith Owens <kaos@sgi.com>
   55.48 + *            Avoid deadlock when using printk() for MCA and INIT records.
   55.49 + *            Delete all record printing code, moved to salinfo_decode in user space.
   55.50 + *            Mark variables and functions static where possible.
   55.51 + *            Delete dead variables and functions.
   55.52 + *            Reorder to remove the need for forward declarations and to consolidate
   55.53 + *            related code.
   55.54 + */
   55.55 +#include <linux/config.h>
   55.56 +#include <linux/types.h>
   55.57 +#include <linux/init.h>
   55.58 +#include <linux/sched.h>
   55.59 +#include <linux/interrupt.h>
   55.60 +#include <linux/irq.h>
   55.61 +#include <linux/kallsyms.h>
   55.62 +#include <linux/smp_lock.h>
   55.63 +#include <linux/bootmem.h>
   55.64 +#include <linux/acpi.h>
   55.65 +#include <linux/timer.h>
   55.66 +#include <linux/module.h>
   55.67 +#include <linux/kernel.h>
   55.68 +#include <linux/smp.h>
   55.69 +#include <linux/workqueue.h>
   55.70 +
   55.71 +#include <asm/delay.h>
   55.72 +#include <asm/machvec.h>
   55.73 +#include <asm/meminit.h>
   55.74 +#include <asm/page.h>
   55.75 +#include <asm/ptrace.h>
   55.76 +#include <asm/system.h>
   55.77 +#include <asm/sal.h>
   55.78 +#include <asm/mca.h>
   55.79 +
   55.80 +#include <asm/irq.h>
   55.81 +#include <asm/hw_irq.h>
   55.82 +
   55.83 +#ifdef XEN
   55.84 +#include <xen/symbols.h>
   55.85 +#endif
   55.86 +
   55.87 +#if defined(IA64_MCA_DEBUG_INFO)
   55.88 +# define IA64_MCA_DEBUG(fmt...)	printk(fmt)
   55.89 +#else
   55.90 +# define IA64_MCA_DEBUG(fmt...)
   55.91 +#endif
   55.92 +
   55.93 +/* Used by mca_asm.S */
   55.94 +#ifndef XEN
   55.95 +ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state;
   55.96 +#else
   55.97 +ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state[NR_CPUS];
   55.98 +DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr); 
   55.99 +#endif
  55.100 +ia64_mca_os_to_sal_state_t	ia64_os_to_sal_handoff_state;
  55.101 +u64				ia64_mca_serialize;
  55.102 +DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
  55.103 +DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
  55.104 +DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
  55.105 +DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
  55.106 +
  55.107 +unsigned long __per_cpu_mca[NR_CPUS];
  55.108 +
  55.109 +/* In mca_asm.S */
  55.110 +extern void			ia64_monarch_init_handler (void);
  55.111 +extern void			ia64_slave_init_handler (void);
  55.112 +
  55.113 +static ia64_mc_info_t		ia64_mc_info;
  55.114 +
  55.115 +#ifndef XEN
  55.116 +#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
  55.117 +#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
  55.118 +#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
  55.119 +#define CPE_HISTORY_LENGTH    5
  55.120 +#define CMC_HISTORY_LENGTH    5
  55.121 +
  55.122 +static struct timer_list cpe_poll_timer;
  55.123 +static struct timer_list cmc_poll_timer;
  55.124 +/*
  55.125 + * This variable tells whether we are currently in polling mode.
  55.126 + * Start with this in the wrong state so we won't play w/ timers
  55.127 + * before the system is ready.
  55.128 + */
  55.129 +static int cmc_polling_enabled = 1;
  55.130 +
  55.131 +/*
  55.132 + * Clearing this variable prevents CPE polling from getting activated
  55.133 + * in mca_late_init.  Use it if your system doesn't provide a CPEI,
  55.134 + * but encounters problems retrieving CPE logs.  This should only be
  55.135 + * necessary for debugging.
  55.136 + */
  55.137 +static int cpe_poll_enabled = 1;
  55.138 +
  55.139 +extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
  55.140 +#endif /* !XEN */
  55.141 +
  55.142 +static int mca_init;
  55.143 +
  55.144 +#ifndef XEN
  55.145 +/*
  55.146 + * IA64_MCA log support
  55.147 + */
  55.148 +#define IA64_MAX_LOGS		2	/* Double-buffering for nested MCAs */
  55.149 +#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
  55.150 +
  55.151 +typedef struct ia64_state_log_s
  55.152 +{
  55.153 +	spinlock_t	isl_lock;
  55.154 +	int		isl_index;
  55.155 +	unsigned long	isl_count;
  55.156 +	ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
  55.157 +} ia64_state_log_t;
  55.158 +
  55.159 +static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
  55.160 +
  55.161 +#define IA64_LOG_ALLOCATE(it, size) \
  55.162 +	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
  55.163 +		(ia64_err_rec_t *)alloc_bootmem(size); \
  55.164 +	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
  55.165 +		(ia64_err_rec_t *)alloc_bootmem(size);}
  55.166 +#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
  55.167 +#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
  55.168 +#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
  55.169 +#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
  55.170 +#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
  55.171 +#define IA64_LOG_INDEX_INC(it) \
  55.172 +    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
  55.173 +    ia64_state_log[it].isl_count++;}
  55.174 +#define IA64_LOG_INDEX_DEC(it) \
  55.175 +    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
  55.176 +#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
  55.177 +#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
  55.178 +#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
  55.179 +
  55.180 +/*
  55.181 + * ia64_log_init
  55.182 + *	Reset the OS ia64 log buffer
  55.183 + * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  55.184 + * Outputs	:	None
  55.185 + */
  55.186 +static void
  55.187 +ia64_log_init(int sal_info_type)
  55.188 +{
  55.189 +	u64	max_size = 0;
  55.190 +
  55.191 +	IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
  55.192 +	IA64_LOG_LOCK_INIT(sal_info_type);
  55.193 +
  55.194 +	// SAL will tell us the maximum size of any error record of this type
  55.195 +	max_size = ia64_sal_get_state_info_size(sal_info_type);
  55.196 +	if (!max_size)
  55.197 +		/* alloc_bootmem() doesn't like zero-sized allocations! */
  55.198 +		return;
  55.199 +
  55.200 +	// set up OS data structures to hold error info
  55.201 +	IA64_LOG_ALLOCATE(sal_info_type, max_size);
  55.202 +	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
  55.203 +	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
  55.204 +}
  55.205 +
  55.206 +/*
  55.207 + * ia64_log_get
  55.208 + *
  55.209 + *	Get the current MCA log from SAL and copy it into the OS log buffer.
  55.210 + *
  55.211 + *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  55.212 + *              irq_safe    whether you can use printk at this point
  55.213 + *  Outputs :   size        (total record length)
  55.214 + *              *buffer     (ptr to error record)
  55.215 + *
  55.216 + */
  55.217 +static u64
  55.218 +ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
  55.219 +{
  55.220 +	sal_log_record_header_t     *log_buffer;
  55.221 +	u64                         total_len = 0;
  55.222 +	int                         s;
  55.223 +
  55.224 +	IA64_LOG_LOCK(sal_info_type);
  55.225 +
  55.226 +	/* Get the process state information */
  55.227 +	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
  55.228 +
  55.229 +	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
  55.230 +
  55.231 +	if (total_len) {
  55.232 +		IA64_LOG_INDEX_INC(sal_info_type);
  55.233 +		IA64_LOG_UNLOCK(sal_info_type);
  55.234 +		if (irq_safe) {
  55.235 +			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
  55.236 +				       "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
  55.237 +		}
  55.238 +		*buffer = (u8 *) log_buffer;
  55.239 +		return total_len;
  55.240 +	} else {
  55.241 +		IA64_LOG_UNLOCK(sal_info_type);
  55.242 +		return 0;
  55.243 +	}
  55.244 +}
  55.245 +
  55.246 +/*
  55.247 + *  ia64_mca_log_sal_error_record
  55.248 + *
  55.249 + *  This function retrieves a specified error record type from SAL
  55.250 + *  and wakes up any processes waiting for error records.
  55.251 + *
  55.252 + *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
  55.253 + */
  55.254 +static void
  55.255 +ia64_mca_log_sal_error_record(int sal_info_type)
  55.256 +{
  55.257 +	u8 *buffer;
  55.258 +	sal_log_record_header_t *rh;
  55.259 +	u64 size;
  55.260 +	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
  55.261 +#ifdef IA64_MCA_DEBUG_INFO
  55.262 +	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
  55.263 +#endif
  55.264 +
  55.265 +	size = ia64_log_get(sal_info_type, &buffer, irq_safe);
  55.266 +	if (!size)
  55.267 +		return;
  55.268 +
  55.269 +	salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
  55.270 +
  55.271 +	if (irq_safe)
  55.272 +		IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
  55.273 +			smp_processor_id(),
  55.274 +			sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
  55.275 +
  55.276 +	/* Clear logs from corrected errors in case there's no user-level logger */
  55.277 +	rh = (sal_log_record_header_t *)buffer;
  55.278 +	if (rh->severity == sal_log_severity_corrected)
  55.279 +		ia64_sal_clear_state_info(sal_info_type);
  55.280 +}
  55.281 +
  55.282 +/*
  55.283 + * platform dependent error handling
  55.284 + */
  55.285 +#endif /* !XEN */
  55.286 +#ifndef PLATFORM_MCA_HANDLERS
  55.287 +#ifndef XEN
  55.288 +
  55.289 +#ifdef CONFIG_ACPI
  55.290 +
  55.291 +int cpe_vector = -1;
  55.292 +
  55.293 +static irqreturn_t
  55.294 +ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
  55.295 +{
  55.296 +	static unsigned long	cpe_history[CPE_HISTORY_LENGTH];
  55.297 +	static int		index;
  55.298 +	static DEFINE_SPINLOCK(cpe_history_lock);
  55.299 +
  55.300 +	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  55.301 +		       __FUNCTION__, cpe_irq, smp_processor_id());
  55.302 +
  55.303 +	/* SAL spec states this should run w/ interrupts enabled */
  55.304 +	local_irq_enable();
  55.305 +
  55.306 +	/* Get the CPE error record and log it */
  55.307 +	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
  55.308 +
  55.309 +	spin_lock(&cpe_history_lock);
  55.310 +	if (!cpe_poll_enabled && cpe_vector >= 0) {
  55.311 +
  55.312 +		int i, count = 1; /* we know 1 happened now */
  55.313 +		unsigned long now = jiffies;
  55.314 +
  55.315 +		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
  55.316 +			if (now - cpe_history[i] <= HZ)
  55.317 +				count++;
  55.318 +		}
  55.319 +
  55.320 +		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
  55.321 +		if (count >= CPE_HISTORY_LENGTH) {
  55.322 +
  55.323 +			cpe_poll_enabled = 1;
  55.324 +			spin_unlock(&cpe_history_lock);
  55.325 +			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
  55.326 +
  55.327 +			/*
  55.328 +			 * Corrected errors will still be corrected, but
  55.329 +			 * make sure there's a log somewhere that indicates
  55.330 +			 * something is generating more than we can handle.
  55.331 +			 */
  55.332 +			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
  55.333 +
  55.334 +			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
  55.335 +
  55.336 +			/* lock already released, get out now */
  55.337 +			return IRQ_HANDLED;
  55.338 +		} else {
  55.339 +			cpe_history[index++] = now;
  55.340 +			if (index == CPE_HISTORY_LENGTH)
  55.341 +				index = 0;
  55.342 +		}
  55.343 +	}
  55.344 +	spin_unlock(&cpe_history_lock);
  55.345 +	return IRQ_HANDLED;
  55.346 +}
  55.347 +
  55.348 +#endif /* CONFIG_ACPI */
  55.349 +#endif /* !XEN */
  55.350 +
  55.351 +static void
  55.352 +show_min_state (pal_min_state_area_t *minstate)
  55.353 +{
  55.354 +	u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
  55.355 +	u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
  55.356 +
  55.357 +	printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
  55.358 +	printk("pr\t\t%016lx\n", minstate->pmsa_pr);
  55.359 +	printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
  55.360 +	printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
  55.361 +	printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
  55.362 +	printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
  55.363 +	printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
  55.364 +	printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
  55.365 +	printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
  55.366 +	printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
  55.367 +	printk("b1\t\t%016lx ", minstate->pmsa_br1);
  55.368 +	print_symbol("%s\n", minstate->pmsa_br1);
  55.369 +
  55.370 +	printk("\nstatic registers r0-r15:\n");
  55.371 +	printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
  55.372 +	       0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
  55.373 +	printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
  55.374 +	       minstate->pmsa_gr[3], minstate->pmsa_gr[4],
  55.375 +	       minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
  55.376 +	printk(" r8-11 %016lx %016lx %016lx %016lx\n",
  55.377 +	       minstate->pmsa_gr[7], minstate->pmsa_gr[8],
  55.378 +	       minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
  55.379 +	printk("r12-15 %016lx %016lx %016lx %016lx\n",
  55.380 +	       minstate->pmsa_gr[11], minstate->pmsa_gr[12],
  55.381 +	       minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
  55.382 +
  55.383 +	printk("\nbank 0:\n");
  55.384 +	printk("r16-19 %016lx %016lx %016lx %016lx\n",
  55.385 +	       minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
  55.386 +	       minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
  55.387 +	printk("r20-23 %016lx %016lx %016lx %016lx\n",
  55.388 +	       minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
  55.389 +	       minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
  55.390 +	printk("r24-27 %016lx %016lx %016lx %016lx\n",
  55.391 +	       minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
  55.392 +	       minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
  55.393 +	printk("r28-31 %016lx %016lx %016lx %016lx\n",
  55.394 +	       minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
  55.395 +	       minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
  55.396 +
  55.397 +	printk("\nbank 1:\n");
  55.398 +	printk("r16-19 %016lx %016lx %016lx %016lx\n",
  55.399 +	       minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
  55.400 +	       minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
  55.401 +	printk("r20-23 %016lx %016lx %016lx %016lx\n",
  55.402 +	       minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
  55.403 +	       minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
  55.404 +	printk("r24-27 %016lx %016lx %016lx %016lx\n",
  55.405 +	       minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
  55.406 +	       minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
  55.407 +	printk("r28-31 %016lx %016lx %016lx %016lx\n",
  55.408 +	       minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
  55.409 +	       minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
  55.410 +}
  55.411 +
  55.412 +static void
  55.413 +fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
  55.414 +{
  55.415 +	u64 *dst_banked, *src_banked, bit, shift, nat_bits;
  55.416 +	int i;
  55.417 +
  55.418 +	/*
  55.419 +	 * First, update the pt-regs and switch-stack structures with the contents stored
  55.420 +	 * in the min-state area:
  55.421 +	 */
  55.422 +	if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
  55.423 +		pt->cr_ipsr = ms->pmsa_xpsr;
  55.424 +		pt->cr_iip = ms->pmsa_xip;
  55.425 +		pt->cr_ifs = ms->pmsa_xfs;
  55.426 +	} else {
  55.427 +		pt->cr_ipsr = ms->pmsa_ipsr;
  55.428 +		pt->cr_iip = ms->pmsa_iip;
  55.429 +		pt->cr_ifs = ms->pmsa_ifs;
  55.430 +	}
  55.431 +	pt->ar_rsc = ms->pmsa_rsc;
  55.432 +	pt->pr = ms->pmsa_pr;
  55.433 +	pt->r1 = ms->pmsa_gr[0];
  55.434 +	pt->r2 = ms->pmsa_gr[1];
  55.435 +	pt->r3 = ms->pmsa_gr[2];
  55.436 +	sw->r4 = ms->pmsa_gr[3];
  55.437 +	sw->r5 = ms->pmsa_gr[4];
  55.438 +	sw->r6 = ms->pmsa_gr[5];
  55.439 +	sw->r7 = ms->pmsa_gr[6];
  55.440 +	pt->r8 = ms->pmsa_gr[7];
  55.441 +	pt->r9 = ms->pmsa_gr[8];
  55.442 +	pt->r10 = ms->pmsa_gr[9];
  55.443 +	pt->r11 = ms->pmsa_gr[10];
  55.444 +	pt->r12 = ms->pmsa_gr[11];
  55.445 +	pt->r13 = ms->pmsa_gr[12];
  55.446 +	pt->r14 = ms->pmsa_gr[13];
  55.447 +	pt->r15 = ms->pmsa_gr[14];
  55.448 +	dst_banked = &pt->r16;		/* r16-r31 are contiguous in struct pt_regs */
  55.449 +	src_banked = ms->pmsa_bank1_gr;
  55.450 +	for (i = 0; i < 16; ++i)
  55.451 +		dst_banked[i] = src_banked[i];
  55.452 +	pt->b0 = ms->pmsa_br0;
  55.453 +	sw->b1 = ms->pmsa_br1;
  55.454 +
  55.455 +	/* construct the NaT bits for the pt-regs structure: */
  55.456 +#	define PUT_NAT_BIT(dst, addr)					\
  55.457 +	do {								\
  55.458 +		bit = nat_bits & 1; nat_bits >>= 1;			\
  55.459 +		shift = ((unsigned long) addr >> 3) & 0x3f;		\
  55.460 +		dst = ((dst) & ~(1UL << shift)) | (bit << shift);	\
  55.461 +	} while (0)
  55.462 +
  55.463 +	/* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
  55.464 +	shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
  55.465 +	nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
  55.466 +
  55.467 +	PUT_NAT_BIT(sw->caller_unat, &pt->r1);
  55.468 +	PUT_NAT_BIT(sw->caller_unat, &pt->r2);
  55.469 +	PUT_NAT_BIT(sw->caller_unat, &pt->r3);
  55.470 +	PUT_NAT_BIT(sw->ar_unat, &sw->r4);
  55.471 +	PUT_NAT_BIT(sw->ar_unat, &sw->r5);
  55.472 +	PUT_NAT_BIT(sw->ar_unat, &sw->r6);
  55.473 +	PUT_NAT_BIT(sw->ar_unat, &sw->r7);
  55.474 +	PUT_NAT_BIT(sw->caller_unat, &pt->r8);	PUT_NAT_BIT(sw->caller_unat, &pt->r9);
  55.475 +	PUT_NAT_BIT(sw->caller_unat, &pt->r10);	PUT_NAT_BIT(sw->caller_unat, &pt->r11);
  55.476 +	PUT_NAT_BIT(sw->caller_unat, &pt->r12);	PUT_NAT_BIT(sw->caller_unat, &pt->r13);
  55.477 +	PUT_NAT_BIT(sw->caller_unat, &pt->r14);	PUT_NAT_BIT(sw->caller_unat, &pt->r15);
  55.478 +	nat_bits >>= 16;	/* skip over bank0 NaT bits */
  55.479 +	PUT_NAT_BIT(sw->caller_unat, &pt->r16);	PUT_NAT_BIT(sw->caller_unat, &pt->r17);
  55.480 +	PUT_NAT_BIT(sw->caller_unat, &pt->r18);	PUT_NAT_BIT(sw->caller_unat, &pt->r19);
  55.481 +	PUT_NAT_BIT(sw->caller_unat, &pt->r20);	PUT_NAT_BIT(sw->caller_unat, &pt->r21);
  55.482 +	PUT_NAT_BIT(sw->caller_unat, &pt->r22);	PUT_NAT_BIT(sw->caller_unat, &pt->r23);
  55.483 +	PUT_NAT_BIT(sw->caller_unat, &pt->r24);	PUT_NAT_BIT(sw->caller_unat, &pt->r25);
  55.484 +	PUT_NAT_BIT(sw->caller_unat, &pt->r26);	PUT_NAT_BIT(sw->caller_unat, &pt->r27);
  55.485 +	PUT_NAT_BIT(sw->caller_unat, &pt->r28);	PUT_NAT_BIT(sw->caller_unat, &pt->r29);
  55.486 +	PUT_NAT_BIT(sw->caller_unat, &pt->r30);	PUT_NAT_BIT(sw->caller_unat, &pt->r31);
  55.487 +}
  55.488 +
  55.489 +#ifdef XEN
  55.490 +static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
  55.491 +static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
  55.492 +
  55.493 +static void
  55.494 +save_ksp (struct unw_frame_info *info, void *arg)
  55.495 +{
  55.496 +	current->arch._thread.ksp = (__u64)(info->sw) - 16;
  55.497 +	wmb();
  55.498 +}
  55.499 +
  55.500 +/* FIXME */
  55.501 +int try_crashdump(struct pt_regs *a) { return 0; }
  55.502 +
  55.503 +#define CPU_FLUSH_RETRY_MAX 5
  55.504 +static void
  55.505 +init_cache_flush (void)
  55.506 +{
  55.507 +	unsigned long flags;
  55.508 +	int i;
  55.509 +	s64 rval = 0;
  55.510 +	u64 vector, progress = 0;
  55.511 +
  55.512 +	for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
  55.513 +		local_irq_save(flags);
  55.514 +		rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
  55.515 +		                            0, &progress, &vector);
  55.516 +		local_irq_restore(flags);
  55.517 +		if (rval == 0){
  55.518 +			printk("\nPAL cache flush success\n");
  55.519 +			return;
  55.520 +		}
  55.521 +	}
  55.522 +	printk("\nPAL cache flush failed. status=%ld\n",rval);
  55.523 +}
  55.524 +#endif /* XEN */
  55.525 +
  55.526 +static void
  55.527 +init_handler_platform (pal_min_state_area_t *ms,
  55.528 +		       struct pt_regs *pt, struct switch_stack *sw)
  55.529 +{
  55.530 +	struct unw_frame_info info;
  55.531 +
  55.532 +	/* if a kernel debugger is available call it here else just dump the registers */
  55.533 +
  55.534 +	/*
  55.535 +	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
  55.536 +	 * generated via the BMC's command-line interface, but since the console is on the
  55.537 +	 * same serial line, the user will need some time to switch out of the BMC before
  55.538 +	 * the dump begins.
  55.539 +	 */
  55.540 +	printk("Delaying for 5 seconds...\n");
  55.541 +	udelay(5*1000000);
  55.542 +#ifdef XEN
  55.543 +	fetch_min_state(ms, pt, sw);
  55.544 +	spin_lock(&show_stack_lock);
  55.545 +#endif
  55.546 +	show_min_state(ms);
  55.547 +
  55.548 +#ifdef XEN
  55.549 +	printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
  55.550 +#else
  55.551 +	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
  55.552 +	fetch_min_state(ms, pt, sw);
  55.553 +#endif
  55.554 +	unw_init_from_interruption(&info, current, pt, sw);
  55.555 +	ia64_do_show_stack(&info, NULL);
  55.556 +#ifdef XEN
  55.557 +	unw_init_running(save_ksp, NULL);
  55.558 +	spin_unlock(&show_stack_lock);
  55.559 +	wmb();
  55.560 +	init_cache_flush();
  55.561 +
  55.562 +	if (spin_trylock(&init_dump_lock)) {
  55.563 +#ifdef CONFIG_SMP
  55.564 +		udelay(5*1000000);
  55.565 +#endif
  55.566 +		if (try_crashdump(pt) == 0)
  55.567 +			printk("\nINIT dump complete.  Please reboot now.\n");
  55.568 +	}
  55.569 +	printk("%s: CPU%d init handler done\n",
  55.570 +	       __FUNCTION__, smp_processor_id());
  55.571 +#else /* XEN */
  55.572 +#ifdef CONFIG_SMP
  55.573 +	/* read_trylock() would be handy... */
  55.574 +	if (!tasklist_lock.write_lock)
  55.575 +		read_lock(&tasklist_lock);
  55.576 +#endif
  55.577 +	{
  55.578 +		struct task_struct *g, *t;
  55.579 +		do_each_thread (g, t) {
  55.580 +			if (t == current)
  55.581 +				continue;
  55.582 +
  55.583 +			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
  55.584 +			show_stack(t, NULL);
  55.585 +		} while_each_thread (g, t);
  55.586 +	}
  55.587 +#ifdef CONFIG_SMP
  55.588 +	if (!tasklist_lock.write_lock)
  55.589 +		read_unlock(&tasklist_lock);
  55.590 +#endif
  55.591 +
  55.592 +	printk("\nINIT dump complete.  Please reboot now.\n");
  55.593 +#endif /* XEN */
  55.594 +	while (1);			/* hang city if no debugger */
  55.595 +}
  55.596 +
  55.597 +#ifndef XEN
  55.598 +#ifdef CONFIG_ACPI
  55.599 +/*
  55.600 + * ia64_mca_register_cpev
  55.601 + *
  55.602 + *  Register the corrected platform error vector with SAL.
  55.603 + *
  55.604 + *  Inputs
  55.605 + *      cpev        Corrected Platform Error Vector number
  55.606 + *
  55.607 + *  Outputs
  55.608 + *      None
  55.609 + */
  55.610 +static void
  55.611 +ia64_mca_register_cpev (int cpev)
  55.612 +{
  55.613 +	/* Register the CPE interrupt vector with SAL */
  55.614 +	struct ia64_sal_retval isrv;
  55.615 +
  55.616 +	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
  55.617 +	if (isrv.status) {
  55.618 +		printk(KERN_ERR "Failed to register Corrected Platform "
  55.619 +		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
  55.620 +		return;
  55.621 +	}
  55.622 +
  55.623 +	IA64_MCA_DEBUG("%s: corrected platform error "
  55.624 +		       "vector %#x registered\n", __FUNCTION__, cpev);
  55.625 +}
  55.626 +#endif /* CONFIG_ACPI */
  55.627 +
  55.628 +#endif /* !XEN */
  55.629 +#endif /* PLATFORM_MCA_HANDLERS */
  55.630 +#ifndef XEN
  55.631 +
  55.632 +/*
  55.633 + * ia64_mca_cmc_vector_setup
  55.634 + *
  55.635 + *  Setup the corrected machine check vector register in the processor.
  55.636 + *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
  55.637 + *  This function is invoked on a per-processor basis.
  55.638 + *
  55.639 + * Inputs
  55.640 + *      None
  55.641 + *
  55.642 + * Outputs
  55.643 + *	None
  55.644 + */
  55.645 +void
  55.646 +ia64_mca_cmc_vector_setup (void)
  55.647 +{
  55.648 +	cmcv_reg_t	cmcv;
  55.649 +
  55.650 +	cmcv.cmcv_regval	= 0;
  55.651 +	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */
  55.652 +	cmcv.cmcv_vector	= IA64_CMC_VECTOR;
  55.653 +	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  55.654 +
  55.655 +	IA64_MCA_DEBUG("%s: CPU %d corrected "
  55.656 +		       "machine check vector %#x registered.\n",
  55.657 +		       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
  55.658 +
  55.659 +	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
  55.660 +		       __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
  55.661 +}
  55.662 +
  55.663 +/*
  55.664 + * ia64_mca_cmc_vector_disable
  55.665 + *
  55.666 + *  Mask the corrected machine check vector register in the processor.
  55.667 + *  This function is invoked on a per-processor basis.
  55.668 + *
  55.669 + * Inputs
  55.670 + *      dummy(unused)
  55.671 + *
  55.672 + * Outputs
  55.673 + *	None
  55.674 + */
  55.675 +static void
  55.676 +ia64_mca_cmc_vector_disable (void *dummy)
  55.677 +{
  55.678 +	cmcv_reg_t	cmcv;
  55.679 +
  55.680 +	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  55.681 +
  55.682 +	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
  55.683 +	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  55.684 +
  55.685 +	IA64_MCA_DEBUG("%s: CPU %d corrected "
  55.686 +		       "machine check vector %#x disabled.\n",
  55.687 +		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
  55.688 +}
  55.689 +
  55.690 +/*
  55.691 + * ia64_mca_cmc_vector_enable
  55.692 + *
  55.693 + *  Unmask the corrected machine check vector register in the processor.
  55.694 + *  This function is invoked on a per-processor basis.
  55.695 + *
  55.696 + * Inputs
  55.697 + *      dummy(unused)
  55.698 + *
  55.699 + * Outputs
  55.700 + *	None
  55.701 + */
  55.702 +static void
  55.703 +ia64_mca_cmc_vector_enable (void *dummy)
  55.704 +{
  55.705 +	cmcv_reg_t	cmcv;
  55.706 +
  55.707 +	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  55.708 +
  55.709 +	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
  55.710 +	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  55.711 +
  55.712 +	IA64_MCA_DEBUG("%s: CPU %d corrected "
  55.713 +		       "machine check vector %#x enabled.\n",
  55.714 +		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
  55.715 +}
  55.716 +
  55.717 +/*
  55.718 + * ia64_mca_cmc_vector_disable_keventd
  55.719 + *
  55.720 + * Called via keventd (smp_call_function() is not safe in interrupt context) to
  55.721 + * disable the cmc interrupt vector.
  55.722 + */
  55.723 +static void
  55.724 +ia64_mca_cmc_vector_disable_keventd(void *unused)
  55.725 +{
  55.726 +	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
  55.727 +}
  55.728 +
  55.729 +/*
  55.730 + * ia64_mca_cmc_vector_enable_keventd
  55.731 + *
  55.732 + * Called via keventd (smp_call_function() is not safe in interrupt context) to
  55.733 + * enable the cmc interrupt vector.
  55.734 + */
  55.735 +static void
  55.736 +ia64_mca_cmc_vector_enable_keventd(void *unused)
  55.737 +{
  55.738 +	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
  55.739 +}
  55.740 +
  55.741 +/*
  55.742 + * ia64_mca_wakeup_ipi_wait
  55.743 + *
  55.744 + *	Wait for the inter-cpu interrupt to be sent by the
  55.745 + *	monarch processor once it is done with handling the
  55.746 + *	MCA.
  55.747 + *
  55.748 + *  Inputs  :   None
  55.749 + *  Outputs :   None
  55.750 + */
  55.751 +static void
  55.752 +ia64_mca_wakeup_ipi_wait(void)
  55.753 +{
  55.754 +	int	irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
  55.755 +	int	irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
  55.756 +	u64	irr = 0;
  55.757 +
  55.758 +	do {
  55.759 +		switch(irr_num) {
  55.760 +		      case 0:
  55.761 +			irr = ia64_getreg(_IA64_REG_CR_IRR0);
  55.762 +			break;
  55.763 +		      case 1:
  55.764 +			irr = ia64_getreg(_IA64_REG_CR_IRR1);
  55.765 +			break;
  55.766 +		      case 2:
  55.767 +			irr = ia64_getreg(_IA64_REG_CR_IRR2);
  55.768 +			break;
  55.769 +		      case 3:
  55.770 +			irr = ia64_getreg(_IA64_REG_CR_IRR3);
  55.771 +			break;
  55.772 +		}
  55.773 +		cpu_relax();
  55.774 +	} while (!(irr & (1UL << irr_bit))) ;
  55.775 +}
  55.776 +
  55.777 +/*
  55.778 + * ia64_mca_wakeup
  55.779 + *
  55.780 + *	Send an inter-cpu interrupt to wake-up a particular cpu
  55.781 + *	and mark that cpu to be out of rendez.
  55.782 + *
  55.783 + *  Inputs  :   cpuid
  55.784 + *  Outputs :   None
  55.785 + */
  55.786 +static void
  55.787 +ia64_mca_wakeup(int cpu)
  55.788 +{
  55.789 +	platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
  55.790 +	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  55.791 +
  55.792 +}
  55.793 +
  55.794 +/*
  55.795 + * ia64_mca_wakeup_all
  55.796 + *
  55.797 + *	Wakeup all the cpus which have rendez'ed previously.
  55.798 + *
  55.799 + *  Inputs  :   None
  55.800 + *  Outputs :   None
  55.801 + */
  55.802 +static void
  55.803 +ia64_mca_wakeup_all(void)
  55.804 +{
  55.805 +	int cpu;
  55.806 +
  55.807 +	/* Clear the Rendez checkin flag for all cpus */
  55.808 +	for(cpu = 0; cpu < NR_CPUS; cpu++) {
  55.809 +		if (!cpu_online(cpu))
  55.810 +			continue;
  55.811 +		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
  55.812 +			ia64_mca_wakeup(cpu);
  55.813 +	}
  55.814 +
  55.815 +}
  55.816 +
  55.817 +/*
  55.818 + * ia64_mca_rendez_interrupt_handler
  55.819 + *
  55.820 + *	This is handler used to put slave processors into spinloop
  55.821 + *	while the monarch processor does the mca handling and later
  55.822 + *	wake each slave up once the monarch is done.
  55.823 + *
  55.824 + *  Inputs  :   None
  55.825 + *  Outputs :   None
  55.826 + */
  55.827 +static irqreturn_t
  55.828 +ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
  55.829 +{
  55.830 +	unsigned long flags;
  55.831 +	int cpu = smp_processor_id();
  55.832 +
  55.833 +	/* Mask all interrupts */
  55.834 +	local_irq_save(flags);
  55.835 +
  55.836 +	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
  55.837 +	/* Register with the SAL monarch that the slave has
  55.838 +	 * reached SAL
  55.839 +	 */
  55.840 +	ia64_sal_mc_rendez();
  55.841 +
  55.842 +	/* Wait for the wakeup IPI from the monarch
  55.843 +	 * This waiting is done by polling on the wakeup-interrupt
  55.844 +	 * vector bit in the processor's IRRs
  55.845 +	 */
  55.846 +	ia64_mca_wakeup_ipi_wait();
  55.847 +
  55.848 +	/* Enable all interrupts */
  55.849 +	local_irq_restore(flags);
  55.850 +	return IRQ_HANDLED;
  55.851 +}
  55.852 +
  55.853 +/*
  55.854 + * ia64_mca_wakeup_int_handler
  55.855 + *
  55.856 + *	The interrupt handler for processing the inter-cpu interrupt to the
  55.857 + *	slave cpu which was spinning in the rendez loop.
  55.858 + *	Since this spinning is done by turning off the interrupts and
  55.859 + *	polling on the wakeup-interrupt bit in the IRR, there is
  55.860 + *	nothing useful to be done in the handler.
  55.861 + *
  55.862 + *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
  55.863 + *	arg		(Interrupt handler specific argument)
  55.864 + *	ptregs		(Exception frame at the time of the interrupt)
  55.865 + *  Outputs :   None
  55.866 + *
  55.867 + */
  55.868 +static irqreturn_t
  55.869 +ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
  55.870 +{
  55.871 +	return IRQ_HANDLED;
  55.872 +}
  55.873 +
  55.874 +/*
  55.875 + * ia64_return_to_sal_check
  55.876 + *
  55.877 + *	This is function called before going back from the OS_MCA handler
  55.878 + *	to the OS_MCA dispatch code which finally takes the control back
  55.879 + *	to the SAL.
  55.880 + *	The main purpose of this routine is to setup the OS_MCA to SAL
  55.881 + *	return state which can be used by the OS_MCA dispatch code
  55.882 + *	just before going back to SAL.
  55.883 + *
  55.884 + *  Inputs  :   None
  55.885 + *  Outputs :   None
  55.886 + */
  55.887 +
  55.888 +static void
  55.889 +ia64_return_to_sal_check(int recover)
  55.890 +{
  55.891 +
  55.892 +	/* Copy over some relevant stuff from the sal_to_os_mca_handoff
  55.893 +	 * so that it can be used at the time of os_mca_to_sal_handoff
  55.894 +	 */
  55.895 +	ia64_os_to_sal_handoff_state.imots_sal_gp =
  55.896 +		ia64_sal_to_os_handoff_state.imsto_sal_gp;
  55.897 +
  55.898 +	ia64_os_to_sal_handoff_state.imots_sal_check_ra =
  55.899 +		ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
  55.900 +
  55.901 +	if (recover)
  55.902 +		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
  55.903 +	else
  55.904 +		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
  55.905 +
  55.906 +	/* Default = tell SAL to return to same context */
  55.907 +	ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
  55.908 +
  55.909 +	ia64_os_to_sal_handoff_state.imots_new_min_state =
  55.910 +		(u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
  55.911 +
  55.912 +}
  55.913 +
  55.914 +/* Function pointer for extra MCA recovery */
  55.915 +int (*ia64_mca_ucmc_extension)
  55.916 +	(void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
  55.917 +	= NULL;
  55.918 +
  55.919 +int
  55.920 +ia64_reg_MCA_extension(void *fn)
  55.921 +{
  55.922 +	if (ia64_mca_ucmc_extension)
  55.923 +		return 1;
  55.924 +
  55.925 +	ia64_mca_ucmc_extension = fn;
  55.926 +	return 0;
  55.927 +}
  55.928 +
  55.929 +void
  55.930 +ia64_unreg_MCA_extension(void)
  55.931 +{
  55.932 +	if (ia64_mca_ucmc_extension)
  55.933 +		ia64_mca_ucmc_extension = NULL;
  55.934 +}
  55.935 +
  55.936 +EXPORT_SYMBOL(ia64_reg_MCA_extension);
  55.937 +EXPORT_SYMBOL(ia64_unreg_MCA_extension);
  55.938 +
  55.939 +/*
  55.940 + * ia64_mca_ucmc_handler
  55.941 + *
  55.942 + *	This is uncorrectable machine check handler called from OS_MCA
  55.943 + *	dispatch code which is in turn called from SAL_CHECK().
  55.944 + *	This is the place where the core of OS MCA handling is done.
  55.945 + *	Right now the logs are extracted and displayed in a well-defined
  55.946 + *	format. This handler code is supposed to be run only on the
  55.947 + *	monarch processor. Once the monarch is done with MCA handling
  55.948 + *	further MCA logging is enabled by clearing logs.
  55.949 + *	Monarch also has the duty of sending wakeup-IPIs to pull the
  55.950 + *	slave processors out of rendezvous spinloop.
  55.951 + *
  55.952 + *  Inputs  :   None
  55.953 + *  Outputs :   None
  55.954 + */
  55.955 +void
  55.956 +ia64_mca_ucmc_handler(void)
  55.957 +{
  55.958 +	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
  55.959 +		&ia64_sal_to_os_handoff_state.proc_state_param;
  55.960 +	int recover; 
  55.961 +
  55.962 +	/* Get the MCA error record and log it */
  55.963 +	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
  55.964 +
  55.965 +	/* TLB error is only exist in this SAL error record */
  55.966 +	recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
  55.967 +	/* other error recovery */
  55.968 +	   || (ia64_mca_ucmc_extension 
  55.969 +		&& ia64_mca_ucmc_extension(
  55.970 +			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
  55.971 +			&ia64_sal_to_os_handoff_state,
  55.972 +			&ia64_os_to_sal_handoff_state)); 
  55.973 +
  55.974 +	if (recover) {
  55.975 +		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
  55.976 +		rh->severity = sal_log_severity_corrected;
  55.977 +		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
  55.978 +	}
  55.979 +	/*
  55.980 +	 *  Wakeup all the processors which are spinning in the rendezvous
  55.981 +	 *  loop.
  55.982 +	 */
  55.983 +	ia64_mca_wakeup_all();
  55.984 +
  55.985 +	/* Return to SAL */
  55.986 +	ia64_return_to_sal_check(recover);
  55.987 +}
  55.988 +
  55.989 +static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
  55.990 +static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
  55.991 +
  55.992 +/*
  55.993 + * ia64_mca_cmc_int_handler
  55.994 + *
  55.995 + *  This is corrected machine check interrupt handler.
  55.996 + *	Right now the logs are extracted and displayed in a well-defined
  55.997 + *	format.
  55.998 + *
  55.999 + * Inputs
 55.1000 + *      interrupt number
 55.1001 + *      client data arg ptr
 55.1002 + *      saved registers ptr
 55.1003 + *
 55.1004 + * Outputs
 55.1005 + *	None
 55.1006 + */
 55.1007 +static irqreturn_t
 55.1008 +ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
 55.1009 +{
 55.1010 +	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];
 55.1011 +	static int		index;
 55.1012 +	static DEFINE_SPINLOCK(cmc_history_lock);
 55.1013 +
 55.1014 +	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
 55.1015 +		       __FUNCTION__, cmc_irq, smp_processor_id());
 55.1016 +
 55.1017 +	/* SAL spec states this should run w/ interrupts enabled */
 55.1018 +	local_irq_enable();
 55.1019 +
 55.1020 +	/* Get the CMC error record and log it */
 55.1021 +	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
 55.1022 +
 55.1023 +	spin_lock(&cmc_history_lock);
 55.1024 +	if (!cmc_polling_enabled) {
 55.1025 +		int i, count = 1; /* we know 1 happened now */
 55.1026 +		unsigned long now = jiffies;
 55.1027 +
 55.1028 +		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
 55.1029 +			if (now - cmc_history[i] <= HZ)
 55.1030 +				count++;
 55.1031 +		}
 55.1032 +
 55.1033 +		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
 55.1034 +		if (count >= CMC_HISTORY_LENGTH) {
 55.1035 +
 55.1036 +			cmc_polling_enabled = 1;
 55.1037 +			spin_unlock(&cmc_history_lock);
 55.1038 +			schedule_work(&cmc_disable_work);
 55.1039 +
 55.1040 +			/*
 55.1041 +			 * Corrected errors will still be corrected, but
 55.1042 +			 * make sure there's a log somewhere that indicates
 55.1043 +			 * something is generating more than we can handle.
 55.1044 +			 */
 55.1045 +			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
 55.1046 +
 55.1047 +			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
 55.1048 +
 55.1049 +			/* lock already released, get out now */
 55.1050 +			return IRQ_HANDLED;
 55.1051 +		} else {
 55.1052 +			cmc_history[index++] = now;
 55.1053 +			if (index == CMC_HISTORY_LENGTH)
 55.1054 +				index = 0;
 55.1055 +		}
 55.1056 +	}
 55.1057 +	spin_unlock(&cmc_history_lock);
 55.1058 +	return IRQ_HANDLED;
 55.1059 +}
 55.1060 +
 55.1061 +/*
 55.1062 + *  ia64_mca_cmc_int_caller
 55.1063 + *
 55.1064 + * 	Triggered by sw interrupt from CMC polling routine.  Calls
 55.1065 + * 	real interrupt handler and either triggers a sw interrupt
 55.1066 + * 	on the next cpu or does cleanup at the end.
 55.1067 + *
 55.1068 + * Inputs
 55.1069 + *	interrupt number
 55.1070 + *	client data arg ptr
 55.1071 + *	saved registers ptr
 55.1072 + * Outputs
 55.1073 + * 	handled
 55.1074 + */
 55.1075 +static irqreturn_t
 55.1076 +ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
 55.1077 +{
 55.1078 +	static int start_count = -1;
 55.1079 +	unsigned int cpuid;
 55.1080 +
 55.1081 +	cpuid = smp_processor_id();
 55.1082 +
 55.1083 +	/* If first cpu, update count */
 55.1084 +	if (start_count == -1)
 55.1085 +		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
 55.1086 +
 55.1087 +	ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
 55.1088 +
 55.1089 +	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 55.1090 +
 55.1091 +	if (cpuid < NR_CPUS) {
 55.1092 +		platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
 55.1093 +	} else {
 55.1094 +		/* If no log record, switch out of polling mode */
 55.1095 +		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
 55.1096 +
 55.1097 +			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
 55.1098 +			schedule_work(&cmc_enable_work);
 55.1099 +			cmc_polling_enabled = 0;
 55.1100 +
 55.1101 +		} else {
 55.1102 +
 55.1103 +			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
 55.1104 +		}
 55.1105 +
 55.1106 +		start_count = -1;
 55.1107 +	}
 55.1108 +
 55.1109 +	return IRQ_HANDLED;
 55.1110 +}
 55.1111 +
 55.1112 +/*
 55.1113 + *  ia64_mca_cmc_poll
 55.1114 + *
 55.1115 + *	Poll for Corrected Machine Checks (CMCs)
 55.1116 + *
 55.1117 + * Inputs   :   dummy(unused)
 55.1118 + * Outputs  :   None
 55.1119 + *
 55.1120 + */
 55.1121 +static void
 55.1122 +ia64_mca_cmc_poll (unsigned long dummy)
 55.1123 +{
 55.1124 +	/* Trigger a CMC interrupt cascade  */
 55.1125 +	platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
 55.1126 +}
 55.1127 +
 55.1128 +/*
 55.1129 + *  ia64_mca_cpe_int_caller
 55.1130 + *
 55.1131 + * 	Triggered by sw interrupt from CPE polling routine.  Calls
 55.1132 + * 	real interrupt handler and either triggers a sw interrupt
 55.1133 + * 	on the next cpu or does cleanup at the end.
 55.1134 + *
 55.1135 + * Inputs
 55.1136 + *	interrupt number
 55.1137 + *	client data arg ptr
 55.1138 + *	saved registers ptr
 55.1139 + * Outputs
 55.1140 + * 	handled
 55.1141 + */
 55.1142 +#ifdef CONFIG_ACPI
 55.1143 +
 55.1144 +static irqreturn_t
 55.1145 +ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
 55.1146 +{
 55.1147 +	static int start_count = -1;
 55.1148 +	static int poll_time = MIN_CPE_POLL_INTERVAL;
 55.1149 +	unsigned int cpuid;
 55.1150 +
 55.1151 +	cpuid = smp_processor_id();
 55.1152 +
 55.1153 +	/* If first cpu, update count */
 55.1154 +	if (start_count == -1)
 55.1155 +		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
 55.1156 +
 55.1157 +	ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
 55.1158 +
 55.1159 +	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 55.1160 +
 55.1161 +	if (cpuid < NR_CPUS) {
 55.1162 +		platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
 55.1163 +	} else {
 55.1164 +		/*
 55.1165 +		 * If a log was recorded, increase our polling frequency,
 55.1166 +		 * otherwise, backoff or return to interrupt mode.
 55.1167 +		 */
 55.1168 +		if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
 55.1169 +			poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
 55.1170 +		} else if (cpe_vector < 0) {
 55.1171 +			poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
 55.1172 +		} else {
 55.1173 +			poll_time = MIN_CPE_POLL_INTERVAL;
 55.1174 +
 55.1175 +			printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
 55.1176 +			enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
 55.1177 +			cpe_poll_enabled = 0;
 55.1178 +		}
 55.1179 +
 55.1180 +		if (cpe_poll_enabled)
 55.1181 +			mod_timer(&cpe_poll_timer, jiffies + poll_time);
 55.1182 +		start_count = -1;
 55.1183 +	}
 55.1184 +
 55.1185 +	return IRQ_HANDLED;
 55.1186 +}
 55.1187 +
 55.1188 +/*
 55.1189 + *  ia64_mca_cpe_poll
 55.1190 + *
 55.1191 + *	Poll for Corrected Platform Errors (CPEs), trigger interrupt
 55.1192 + *	on first cpu, from there it will trickle through all the cpus.
 55.1193 + *
 55.1194 + * Inputs   :   dummy(unused)
 55.1195 + * Outputs  :   None
 55.1196 + *
 55.1197 + */
 55.1198 +static void
 55.1199 +ia64_mca_cpe_poll (unsigned long dummy)
 55.1200 +{
 55.1201 +	/* Trigger a CPE interrupt cascade  */
 55.1202 +	platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
 55.1203 +}
 55.1204 +
 55.1205 +#endif /* CONFIG_ACPI */
 55.1206 +#endif /* !XEN */
 55.1207 +
 55.1208 +/*
 55.1209 + * C portion of the OS INIT handler
 55.1210 + *
 55.1211 + * Called from ia64_monarch_init_handler
 55.1212 + *
 55.1213 + * Inputs: pointer to pt_regs where processor info was saved.
 55.1214 + *
 55.1215 + * Returns:
 55.1216 + *   0 if SAL must warm boot the System
 55.1217 + *   1 if SAL must return to interrupted context using PAL_MC_RESUME
 55.1218 + *
 55.1219 + */
 55.1220 +void
 55.1221 +ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
 55.1222 +{
 55.1223 +	pal_min_state_area_t *ms;
 55.1224 +#ifdef XEN
 55.1225 +	int cpu = smp_processor_id();
 55.1226 +
 55.1227 +	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
 55.1228 +	       ia64_sal_to_os_handoff_state[cpu].proc_state_param);
 55.1229 +#endif
 55.1230 +
 55.1231 +#ifndef XEN
 55.1232 +	oops_in_progress = 1;	/* avoid deadlock in printk, but it makes recovery dodgy */
 55.1233 +	console_loglevel = 15;	/* make sure printks make it to console */
 55.1234 +
 55.1235 +	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
 55.1236 +		ia64_sal_to_os_handoff_state.proc_state_param);
 55.1237 +
 55.1238 +	/*
 55.1239 +	 * Address of minstate area provided by PAL is physical,
 55.1240 +	 * uncacheable (bit 63 set). Convert to Linux virtual
 55.1241 +	 * address in region 6.
 55.1242 +	 */
 55.1243 +	ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
 55.1244 +#else
 55.1245 +	/* Xen virtual address in region 7. */
 55.1246 +	ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
 55.1247 +#endif
 55.1248 +
 55.1249 +	init_handler_platform(ms, pt, sw);	/* call platform specific routines */
 55.1250 +}
 55.1251 +
 55.1252 +#ifndef XEN
 55.1253 +static int __init
 55.1254 +ia64_mca_disable_cpe_polling(char *str)
 55.1255 +{
 55.1256 +	cpe_poll_enabled = 0;
 55.1257 +	return 1;
 55.1258 +}
 55.1259 +
 55.1260 +__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
 55.1261 +
 55.1262 +static struct irqaction cmci_irqaction = {
 55.1263 +	.handler =	ia64_mca_cmc_int_handler,
 55.1264 +	.flags =	SA_INTERRUPT,
 55.1265 +	.name =		"cmc_hndlr"
 55.1266 +};
 55.1267 +
 55.1268 +static struct irqaction cmcp_irqaction = {
 55.1269 +	.handler =	ia64_mca_cmc_int_caller,
 55.1270 +	.flags =	SA_INTERRUPT,
 55.1271 +	.name =		"cmc_poll"
 55.1272 +};
 55.1273 +
 55.1274 +static struct irqaction mca_rdzv_irqaction = {
 55.1275 +	.handler =	ia64_mca_rendez_int_handler,
 55.1276 +	.flags =	SA_INTERRUPT,
 55.1277 +	.name =		"mca_rdzv"
 55.1278 +};
 55.1279 +
 55.1280 +static struct irqaction mca_wkup_irqaction = {
 55.1281 +	.handler =	ia64_mca_wakeup_int_handler,
 55.1282 +	.flags =	SA_INTERRUPT,
 55.1283 +	.name =		"mca_wkup"
 55.1284 +};
 55.1285 +
 55.1286 +#ifdef CONFIG_ACPI
 55.1287 +static struct irqaction mca_cpe_irqaction = {
 55.1288 +	.handler =	ia64_mca_cpe_int_handler,
 55.1289 +	.flags =	SA_INTERRUPT,
 55.1290 +	.name =		"cpe_hndlr"
 55.1291 +};
 55.1292 +
 55.1293 +static struct irqaction mca_cpep_irqaction = {
 55.1294 +	.handler =	ia64_mca_cpe_int_caller,
 55.1295 +	.flags =	SA_INTERRUPT,
 55.1296 +	.name =		"cpe_poll"
 55.1297 +};
 55.1298 +#endif /* CONFIG_ACPI */
 55.1299 +#endif /* !XEN */
 55.1300 +
 55.1301 +/* Do per-CPU MCA-related initialization.  */
 55.1302 +
 55.1303 +void __devinit
 55.1304 +ia64_mca_cpu_init(void *cpu_data)
 55.1305 +{
 55.1306 +	void *pal_vaddr;
 55.1307 +
 55.1308 +	if (smp_processor_id() == 0) {
 55.1309 +		void *mca_data;
 55.1310 +		int cpu;
 55.1311 +
 55.1312 +#ifdef XEN
 55.1313 +		unsigned int pageorder;
 55.1314 +		pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
 55.1315 +#else
 55.1316 +		mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
 55.1317 +					 * NR_CPUS);
 55.1318 +#endif
 55.1319 +		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 55.1320 +#ifdef XEN
 55.1321 +			mca_data = alloc_xenheap_pages(pageorder);
 55.1322 +			__per_cpu_mca[cpu] = __pa(mca_data);
 55.1323 +			IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
 55.1324 +			               "(mca_data[%d]=%lx)\n",
 55.1325 +				       __FUNCTION__, cpu, __per_cpu_mca[cpu],
 55.1326 +				       cpu, (u64)mca_data);
 55.1327 +#else
 55.1328 +			__per_cpu_mca[cpu] = __pa(mca_data);
 55.1329 +			mca_data += sizeof(struct ia64_mca_cpu);
 55.1330 +#endif
 55.1331 +		}
 55.1332 +	}
 55.1333 +
 55.1334 +        /*
 55.1335 +         * The MCA info structure was allocated earlier and its
 55.1336 +         * physical address saved in __per_cpu_mca[cpu].  Copy that
 55.1337 +         * address * to ia64_mca_data so we can access it as a per-CPU
 55.1338 +         * variable.
 55.1339 +         */
 55.1340 +	__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
 55.1341 +#ifdef XEN
 55.1342 +	IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
 55.1343 +	               smp_processor_id(), __get_cpu_var(ia64_mca_data));
 55.1344 +
 55.1345 +	/* sal_to_os_handoff for smp support */
 55.1346 +	__get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
 55.1347 +	              __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
 55.1348 +	IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
 55.1349 +	               smp_processor_id(),
 55.1350 +		       __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
 55.1351 +#endif
 55.1352 +
 55.1353 +	/*
 55.1354 +	 * Stash away a copy of the PTE needed to map the per-CPU page.
 55.1355 +	 * We may need it during MCA recovery.
 55.1356 +	 */
 55.1357 +	__get_cpu_var(ia64_mca_per_cpu_pte) =
 55.1358 +		pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
 55.1359 +
 55.1360 +        /*
 55.1361 +         * Also, stash away a copy of the PAL address and the PTE
 55.1362 +         * needed to map it.
 55.1363 +         */
 55.1364 +        pal_vaddr = efi_get_pal_addr();
 55.1365 +	if (!pal_vaddr)
 55.1366 +		return;
 55.1367 +	__get_cpu_var(ia64_mca_pal_base) =
 55.1368 +		GRANULEROUNDDOWN((unsigned long) pal_vaddr);
 55.1369 +	__get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
 55.1370 +							      PAGE_KERNEL));
 55.1371 +}
 55.1372 +
 55.1373 +/*
 55.1374 + * ia64_mca_init
 55.1375 + *
 55.1376 + *  Do all the system level mca specific initialization.
 55.1377 + *
 55.1378 + *	1. Register spinloop and wakeup request interrupt vectors
 55.1379 + *
 55.1380 + *	2. Register OS_MCA handler entry point
 55.1381 + *
 55.1382 + *	3. Register OS_INIT handler entry point
 55.1383 + *
 55.1384 + *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
 55.1385 + *
 55.1386 + *  Note that this initialization is done very early before some kernel
 55.1387 + *  services are available.
 55.1388 + *
 55.1389 + *  Inputs  :   None
 55.1390 + *
 55.1391 + *  Outputs :   None
 55.1392 + */
 55.1393 +void __init
 55.1394 +ia64_mca_init(void)
 55.1395 +{
 55.1396 +	ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
 55.1397 +	ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
 55.1398 +	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
 55.1399 +#ifdef XEN
 55.1400 +	s64 rc;
 55.1401 +
 55.1402 +	slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
 55.1403 +
 55.1404 +	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
 55.1405 +#else
 55.1406 +	int i;
 55.1407 +	s64 rc;
 55.1408 +	struct ia64_sal_retval isrv;
 55.1409 +	u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;	/* platform specific */
 55.1410 +
 55.1411 +	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
 55.1412 +
 55.1413 +	/* Clear the Rendez checkin flag for all cpus */
 55.1414 +	for(i = 0 ; i < NR_CPUS; i++)
 55.1415 +		ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
 55.1416 +
 55.1417 +	/*
 55.1418 +	 * Register the rendezvous spinloop and wakeup mechanism with SAL
 55.1419 +	 */
 55.1420 +
 55.1421 +	/* Register the rendezvous interrupt vector with SAL */
 55.1422 +	while (1) {
 55.1423 +		isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
 55.1424 +					      SAL_MC_PARAM_MECHANISM_INT,
 55.1425 +					      IA64_MCA_RENDEZ_VECTOR,
 55.1426 +					      timeout,
 55.1427 +					      SAL_MC_PARAM_RZ_ALWAYS);
 55.1428 +		rc = isrv.status;
 55.1429 +		if (rc == 0)
 55.1430 +			break;
 55.1431 +		if (rc == -2) {
 55.1432 +			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
 55.1433 +				"%ld to %ld milliseconds\n", timeout, isrv.v0);
 55.1434 +			timeout = isrv.v0;
 55.1435 +			continue;
 55.1436 +		}
 55.1437 +		printk(KERN_ERR "Failed to register rendezvous interrupt "
 55.1438 +		       "with SAL (status %ld)\n", rc);
 55.1439 +		return;
 55.1440 +	}
 55.1441 +
 55.1442 +	/* Register the wakeup interrupt vector with SAL */
 55.1443 +	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
 55.1444 +				      SAL_MC_PARAM_MECHANISM_INT,
 55.1445 +				      IA64_MCA_WAKEUP_VECTOR,
 55.1446 +				      0, 0);
 55.1447 +	rc = isrv.status;
 55.1448 +	if (rc) {
 55.1449 +		printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
 55.1450 +		       "(status %ld)\n", rc);
 55.1451 +		return;
 55.1452 +	}
 55.1453 +
 55.1454 +	IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
 55.1455 +#endif /* !XEN */
 55.1456 +
 55.1457 +	ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
 55.1458 +	/*
 55.1459 +	 * XXX - disable SAL checksum by setting size to 0; should be
 55.1460 +	 *	ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
 55.1461 +	 */
 55.1462 +	ia64_mc_info.imi_mca_handler_size	= 0;
 55.1463 +
 55.1464 +	/* Register the os mca handler with SAL */
 55.1465 +	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
 55.1466 +				       ia64_mc_info.imi_mca_handler,
 55.1467 +				       ia64_tpa(mca_hldlr_ptr->gp),
 55.1468 +				       ia64_mc_info.imi_mca_handler_size,
 55.1469 +				       0, 0, 0)))
 55.1470 +	{
 55.1471 +		printk(KERN_ERR "Failed to register OS MCA handler with SAL "
 55.1472 +		       "(status %ld)\n", rc);
 55.1473 +		return;
 55.1474 +	}
 55.1475 +
 55.1476 +	IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
 55.1477 +		       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
 55.1478 +
 55.1479 +	/*
 55.1480 +	 * XXX - disable SAL checksum by setting size to 0, should be
 55.1481 +	 * size of the actual init handler in mca_asm.S.
 55.1482 +	 */
 55.1483 +	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(mon_init_ptr->fp);
 55.1484 +	ia64_mc_info.imi_monarch_init_handler_size	= 0;
 55.1485 +	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(slave_init_ptr->fp);
 55.1486 +	ia64_mc_info.imi_slave_init_handler_size	= 0;
 55.1487 +
 55.1488 +	IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
 55.1489 +		       ia64_mc_info.imi_monarch_init_handler);
 55.1490 +
 55.1491 +	/* Register the os init handler with SAL */
 55.1492 +	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
 55.1493 +				       ia64_mc_info.imi_monarch_init_handler,
 55.1494 +				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
 55.1495 +				       ia64_mc_info.imi_monarch_init_handler_size,
 55.1496 +				       ia64_mc_info.imi_slave_init_handler,
 55.1497 +				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
 55.1498 +				       ia64_mc_info.imi_slave_init_handler_size)))
 55.1499 +	{
 55.1500 +		printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
 55.1501 +		       "(status %ld)\n", rc);
 55.1502 +		return;
 55.1503 +	}
 55.1504 +
 55.1505 +	IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
 55.1506 +
 55.1507 +#ifndef XEN
 55.1508 +	/*
 55.1509 +	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are
 55.1510 +	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
 55.1511 +	 */
 55.1512 +	register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
 55.1513 +	register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
 55.1514 +	ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
 55.1515 +
 55.1516 +	/* Setup the MCA rendezvous interrupt vector */
 55.1517 +	register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
 55.1518 +
 55.1519 +	/* Setup the MCA wakeup interrupt vector */
 55.1520 +	register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
 55.1521 +
 55.1522 +#ifdef CONFIG_ACPI
 55.1523 +	/* Setup the CPEI/P handler */
 55.1524 +	register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
 55.1525 +#endif
 55.1526 +
 55.1527 +	/* Initialize the areas set aside by the OS to buffer the
 55.1528 +	 * platform/processor error states for MCA/INIT/CMC
 55.1529 +	 * handling.
 55.1530 +	 */
 55.1531 +	ia64_log_init(SAL_INFO_TYPE_MCA);
 55.1532 +	ia64_log_init(SAL_INFO_TYPE_INIT);
 55.1533 +	ia64_log_init(SAL_INFO_TYPE_CMC);
 55.1534 +	ia64_log_init(SAL_INFO_TYPE_CPE);
 55.1535 +#endif /* !XEN */
 55.1536 +
 55.1537 +	mca_init = 1;
 55.1538 +	printk(KERN_INFO "MCA related initialization done\n");
 55.1539 +}
 55.1540 +
 55.1541 +#ifndef XEN
 55.1542 +/*
 55.1543 + * ia64_mca_late_init
 55.1544 + *
 55.1545 + *	Opportunity to setup things that require initialization later
 55.1546 + *	than ia64_mca_init.  Setup a timer to poll for CPEs if the
 55.1547 + *	platform doesn't support an interrupt driven mechanism.
 55.1548 + *
 55.1549 + *  Inputs  :   None
 55.1550 + *  Outputs :   Status
 55.1551 + */
 55.1552 +static int __init
 55.1553 +ia64_mca_late_init(void)
 55.1554 +{
 55.1555 +	if (!mca_init)
 55.1556 +		return 0;
 55.1557 +
 55.1558 +	/* Setup the CMCI/P vector and handler */
 55.1559 +	init_timer(&cmc_poll_timer);
 55.1560 +	cmc_poll_timer.function = ia64_mca_cmc_poll;
 55.1561 +
 55.1562 +	/* Unmask/enable the vector */
 55.1563 +	cmc_polling_enabled = 0;
 55.1564 +	schedule_work(&cmc_enable_work);
 55.1565 +
 55.1566 +	IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
 55.1567 +
 55.1568 +#ifdef CONFIG_ACPI
 55.1569 +	/* Setup the CPEI/P vector and handler */
 55.1570 +	cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
 55.1571 +	init_timer(&cpe_poll_timer);
 55.1572 +	cpe_poll_timer.function = ia64_mca_cpe_poll;
 55.1573 +
 55.1574 +	{
 55.1575 +		irq_desc_t *desc;
 55.1576 +		unsigned int irq;
 55.1577 +
 55.1578 +		if (cpe_vector >= 0) {
 55.1579 +			/* If platform supports CPEI, enable the irq. */
 55.1580 +			cpe_poll_enabled = 0;
 55.1581 +			for (irq = 0; irq < NR_IRQS; ++irq)
 55.1582 +				if (irq_to_vector(irq) == cpe_vector) {
 55.1583 +					desc = irq_descp(irq);
 55.1584 +					desc->status |= IRQ_PER_CPU;
 55.1585 +					setup_irq(irq, &mca_cpe_irqaction);
 55.1586 +				}
 55.1587 +			ia64_mca_register_cpev(cpe_vector);
 55.1588 +			IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
 55.1589 +		} else {
 55.1590 +			/* If platform doesn't support CPEI, get the timer going. */
 55.1591 +			if (cpe_poll_enabled) {
 55.1592 +				ia64_mca_cpe_poll(0UL);
 55.1593 +				IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
 55.1594 +			}
 55.1595 +		}
 55.1596 +	}
 55.1597 +#endif
 55.1598 +
 55.1599 +	return 0;
 55.1600 +}
 55.1601 +
 55.1602 +device_initcall(ia64_mca_late_init);
 55.1603 +#endif /* !XEN */
    56.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    56.2 +++ b/xen/arch/ia64/linux-xen/mca_asm.S	Fri Jul 28 10:51:38 2006 +0100
    56.3 @@ -0,0 +1,970 @@
    56.4 +//
    56.5 +// assembly portion of the IA64 MCA handling
    56.6 +//
    56.7 +// Mods by cfleck to integrate into kernel build
    56.8 +// 00/03/15 davidm Added various stop bits to get a clean compile
    56.9 +//
   56.10 +// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
   56.11 +//		   kstack, switch modes, jump to C INIT handler
   56.12 +//
   56.13 +// 02/01/04 J.Hall <jenna.s.hall@intel.com>
   56.14 +//		   Before entering virtual mode code:
   56.15 +//		   1. Check for TLB CPU error
   56.16 +//		   2. Restore current thread pointer to kr6
   56.17 +//		   3. Move stack ptr 16 bytes to conform to C calling convention
   56.18 +//
   56.19 +// 04/11/12 Russ Anderson <rja@sgi.com>
   56.20 +//		   Added per cpu MCA/INIT stack save areas.
   56.21 +//
   56.22 +#include <linux/config.h>
   56.23 +#include <linux/threads.h>
   56.24 +
   56.25 +#include <asm/asmmacro.h>
   56.26 +#include <asm/pgtable.h>
   56.27 +#include <asm/processor.h>
   56.28 +#include <asm/mca_asm.h>
   56.29 +#include <asm/mca.h>
   56.30 +
   56.31 +/*
   56.32 + * When we get a machine check, the kernel stack pointer is no longer
   56.33 + * valid, so we need to set a new stack pointer.
   56.34 + */
   56.35 +#define	MINSTATE_PHYS	/* Make sure stack access is physical for MINSTATE */
   56.36 +
   56.37 +/*
   56.38 + * Needed for return context to SAL
   56.39 + */
   56.40 +#define IA64_MCA_SAME_CONTEXT	0
   56.41 +#define IA64_MCA_COLD_BOOT	-2
   56.42 +
   56.43 +#include "minstate.h"
   56.44 +
   56.45 +/*
   56.46 + * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
   56.47 + *		1. GR1 = OS GP
   56.48 + *		2. GR8 = PAL_PROC physical address
   56.49 + *		3. GR9 = SAL_PROC physical address
   56.50 + *		4. GR10 = SAL GP (physical)
   56.51 + *		5. GR11 = Rendez state
   56.52 + *		6. GR12 = Return address to location within SAL_CHECK
   56.53 + */
   56.54 +#ifdef XEN
   56.55 +#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
   56.56 +	movl	_tmp=THIS_CPU(ia64_sal_to_os_handoff_state_addr);;	\
   56.57 +	tpa	_tmp=_tmp;;				\
   56.58 +	ld8	_tmp=[_tmp];;				\
   56.59 +	st8	[_tmp]=r1,0x08;;			\
   56.60 +	st8	[_tmp]=r8,0x08;;			\
   56.61 +	st8	[_tmp]=r9,0x08;;			\
   56.62 +	st8	[_tmp]=r10,0x08;;			\
   56.63 +	st8	[_tmp]=r11,0x08;;			\
   56.64 +	st8	[_tmp]=r12,0x08;;			\
   56.65 +	st8	[_tmp]=r17,0x08;;			\
   56.66 +	st8	[_tmp]=r18,0x08
   56.67 +#else
   56.68 +#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
   56.69 +	LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
   56.70 +	st8	[_tmp]=r1,0x08;;			\
   56.71 +	st8	[_tmp]=r8,0x08;;			\
   56.72 +	st8	[_tmp]=r9,0x08;;			\
   56.73 +	st8	[_tmp]=r10,0x08;;			\
   56.74 +	st8	[_tmp]=r11,0x08;;			\
   56.75 +	st8	[_tmp]=r12,0x08;;			\
   56.76 +	st8	[_tmp]=r17,0x08;;			\
   56.77 +	st8	[_tmp]=r18,0x08
   56.78 +
   56.79 +/*
   56.80 + * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
   56.81 + * (p6) is executed if we never entered virtual mode (TLB error)
   56.82 + * (p7) is executed if we entered virtual mode as expected (normal case)
   56.83 + *	1. GR8 = OS_MCA return status
   56.84 + *	2. GR9 = SAL GP (physical)
   56.85 + *	3. GR10 = 0/1 returning same/new context
   56.86 + *	4. GR22 = New min state save area pointer
   56.87 + *	returns ptr to SAL rtn save loc in _tmp
   56.88 + */
   56.89 +#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)	\
   56.90 +	movl	_tmp=ia64_os_to_sal_handoff_state;;	\
   56.91 +	DATA_VA_TO_PA(_tmp);;				\
   56.92 +	ld8	r8=[_tmp],0x08;;			\
   56.93 +	ld8	r9=[_tmp],0x08;;			\
   56.94 +	ld8	r10=[_tmp],0x08;;			\
   56.95 +	ld8	r22=[_tmp],0x08;;
   56.96 +	// now _tmp is pointing to SAL rtn save location
   56.97 +
   56.98 +/*
   56.99 + * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
  56.100 + *	imots_os_status=IA64_MCA_COLD_BOOT
  56.101 + *	imots_sal_gp=SAL GP
  56.102 + *	imots_context=IA64_MCA_SAME_CONTEXT
  56.103 + *	imots_new_min_state=Min state save area pointer
  56.104 + *	imots_sal_check_ra=Return address to location within SAL_CHECK
  56.105 + *
  56.106 + */
  56.107 +#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
  56.108 +	movl	tmp=IA64_MCA_COLD_BOOT;					\
  56.109 +	movl	sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);	\
  56.110 +	movl	os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;	\
  56.111 +	st8	[os_to_sal_handoff]=tmp,8;;				\
  56.112 +	ld8	tmp=[sal_to_os_handoff],48;;				\
  56.113 +	st8	[os_to_sal_handoff]=tmp,8;;				\
  56.114 +	movl	tmp=IA64_MCA_SAME_CONTEXT;;				\
  56.115 +	st8	[os_to_sal_handoff]=tmp,8;;				\
  56.116 +	ld8	tmp=[sal_to_os_handoff],-8;;				\
  56.117 +	st8     [os_to_sal_handoff]=tmp,8;;				\
  56.118 +	ld8	tmp=[sal_to_os_handoff];;				\
  56.119 +	st8     [os_to_sal_handoff]=tmp;;
  56.120 +
  56.121 +#define GET_IA64_MCA_DATA(reg)						\
  56.122 +	GET_THIS_PADDR(reg, ia64_mca_data)				\
  56.123 +	;;								\
  56.124 +	ld8 reg=[reg]
  56.125 +
  56.126 +#endif /* XEN */
  56.127 +	.global ia64_os_mca_dispatch
  56.128 +	.global ia64_os_mca_dispatch_end
  56.129 +#ifndef XEN
  56.130 +	.global ia64_sal_to_os_handoff_state
  56.131 +	.global	ia64_os_to_sal_handoff_state
  56.132 +	.global ia64_do_tlb_purge
  56.133 +#endif
  56.134 +
  56.135 +	.text
  56.136 +	.align 16
  56.137 +
  56.138 +#ifndef XEN
  56.139 +/*
  56.140 + * Just the TLB purge part is moved to a separate function
  56.141 + * so we can re-use the code for cpu hotplug code as well
  56.142 + * Caller should now setup b1, so we can branch once the
  56.143 + * tlb flush is complete.
  56.144 + */
  56.145 +
  56.146 +ia64_do_tlb_purge:
  56.147 +#define O(member)	IA64_CPUINFO_##member##_OFFSET
  56.148 +
  56.149 +	GET_THIS_PADDR(r2, cpu_info)	// load phys addr of cpu_info into r2
  56.150 +	;;
  56.151 +	addl r17=O(PTCE_STRIDE),r2
  56.152 +	addl r2=O(PTCE_BASE),r2
  56.153 +	;;
  56.154 +	ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;	// r18=ptce_base
  56.155 +	ld4 r19=[r2],4					// r19=ptce_count[0]
  56.156 +	ld4 r21=[r17],4					// r21=ptce_stride[0]
  56.157 +	;;
  56.158 +	ld4 r20=[r2]					// r20=ptce_count[1]
  56.159 +	ld4 r22=[r17]					// r22=ptce_stride[1]
  56.160 +	mov r24=0
  56.161 +	;;
  56.162 +	adds r20=-1,r20
  56.163 +	;;
  56.164 +#undef O
  56.165 +
  56.166 +2:
  56.167 +	cmp.ltu p6,p7=r24,r19
  56.168 +(p7)	br.cond.dpnt.few 4f
  56.169 +	mov ar.lc=r20
  56.170 +3:
  56.171 +	ptc.e r18
  56.172 +	;;
  56.173 +	add r18=r22,r18
  56.174 +	br.cloop.sptk.few 3b
  56.175 +	;;
  56.176 +	add r18=r21,r18
  56.177 +	add r24=1,r24
  56.178 +	;;
  56.179 +	br.sptk.few 2b
  56.180 +4:
  56.181 +	srlz.i 			// srlz.i implies srlz.d
  56.182 +	;;
  56.183 +
  56.184 +        // Now purge addresses formerly mapped by TR registers
  56.185 +	// 1. Purge ITR&DTR for kernel.
  56.186 +	movl r16=KERNEL_START
  56.187 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
  56.188 +	;;
  56.189 +	ptr.i r16, r18
  56.190 +	ptr.d r16, r18
  56.191 +	;;
  56.192 +	srlz.i
  56.193 +	;;
  56.194 +	srlz.d
  56.195 +	;;
  56.196 +	// 2. Purge DTR for PERCPU data.
  56.197 +	movl r16=PERCPU_ADDR
  56.198 +	mov r18=PERCPU_PAGE_SHIFT<<2
  56.199 +	;;
  56.200 +	ptr.d r16,r18
  56.201 +	;;
  56.202 +	srlz.d
  56.203 +	;;
  56.204 +	// 3. Purge ITR for PAL code.
  56.205 +	GET_THIS_PADDR(r2, ia64_mca_pal_base)
  56.206 +	;;
  56.207 +	ld8 r16=[r2]
  56.208 +	mov r18=IA64_GRANULE_SHIFT<<2
  56.209 +	;;
  56.210 +	ptr.i r16,r18
  56.211 +	;;
  56.212 +	srlz.i
  56.213 +	;;
  56.214 +	// 4. Purge DTR for stack.
  56.215 +	mov r16=IA64_KR(CURRENT_STACK)
  56.216 +	;;
  56.217 +	shl r16=r16,IA64_GRANULE_SHIFT
  56.218 +	movl r19=PAGE_OFFSET
  56.219 +	;;
  56.220 +	add r16=r19,r16
  56.221 +	mov r18=IA64_GRANULE_SHIFT<<2
  56.222 +	;;
  56.223 +	ptr.d r16,r18
  56.224 +	;;
  56.225 +	srlz.i
  56.226 +	;;
  56.227 +	// Now branch away to caller.
  56.228 +	br.sptk.many b1
  56.229 +	;;
  56.230 +
  56.231 +ia64_os_mca_dispatch:
  56.232 +
  56.233 +	// Serialize all MCA processing
  56.234 +	mov	r3=1;;
  56.235 +	LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
  56.236 +ia64_os_mca_spin:
  56.237 +	xchg8	r4=[r2],r3;;
  56.238 +	cmp.ne	p6,p0=r4,r0
  56.239 +(p6)	br ia64_os_mca_spin
  56.240 +
  56.241 +	// Save the SAL to OS MCA handoff state as defined
  56.242 +	// by SAL SPEC 3.0
  56.243 +	// NOTE : The order in which the state gets saved
  56.244 +	//	  is dependent on the way the C-structure
  56.245 +	//	  for ia64_mca_sal_to_os_state_t has been
  56.246 +	//	  defined in include/asm/mca.h
  56.247 +	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
  56.248 +	;;
  56.249 +
  56.250 +	// LOG PROCESSOR STATE INFO FROM HERE ON..
  56.251 +begin_os_mca_dump:
  56.252 +	br	ia64_os_mca_proc_state_dump;;
  56.253 +
  56.254 +ia64_os_mca_done_dump:
  56.255 +
  56.256 +	LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
  56.257 +	;;
  56.258 +	ld8 r18=[r16]		// Get processor state parameter on existing PALE_CHECK.
  56.259 +	;;
  56.260 +	tbit.nz p6,p7=r18,60
  56.261 +(p7)	br.spnt done_tlb_purge_and_reload
  56.262 +
  56.263 +	// The following code purges TC and TR entries. Then reload all TC entries.
  56.264 +	// Purge percpu data TC entries.
  56.265 +begin_tlb_purge_and_reload:
  56.266 +	movl r18=ia64_reload_tr;;
  56.267 +	LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
  56.268 +	mov b1=r18;;
  56.269 +	br.sptk.many ia64_do_tlb_purge;;
  56.270 +
  56.271 +ia64_reload_tr:
  56.272 +	// Finally reload the TR registers.
  56.273 +	// 1. Reload DTR/ITR registers for kernel.
  56.274 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
  56.275 +	movl r17=KERNEL_START
  56.276 +	;;
  56.277 +	mov cr.itir=r18
  56.278 +	mov cr.ifa=r17
  56.279 +        mov r16=IA64_TR_KERNEL
  56.280 +	mov r19=ip
  56.281 +	movl r18=PAGE_KERNEL
  56.282 +	;;
  56.283 +        dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
  56.284 +	;;
  56.285 +	or r18=r17,r18
  56.286 +	;;
  56.287 +        itr.i itr[r16]=r18
  56.288 +	;;
  56.289 +        itr.d dtr[r16]=r18
  56.290 +        ;;
  56.291 +	srlz.i
  56.292 +	srlz.d
  56.293 +	;;
  56.294 +	// 2. Reload DTR register for PERCPU data.
  56.295 +	GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
  56.296 +	;;
  56.297 +	movl r16=PERCPU_ADDR		// vaddr
  56.298 +	movl r18=PERCPU_PAGE_SHIFT<<2
  56.299 +	;;
  56.300 +	mov cr.itir=r18
  56.301 +	mov cr.ifa=r16
  56.302 +	;;
  56.303 +	ld8 r18=[r2]			// load per-CPU PTE
  56.304 +	mov r16=IA64_TR_PERCPU_DATA;
  56.305 +	;;
  56.306 +	itr.d dtr[r16]=r18
  56.307 +	;;
  56.308 +	srlz.d
  56.309 +	;;
  56.310 +	// 3. Reload ITR for PAL code.
  56.311 +	GET_THIS_PADDR(r2, ia64_mca_pal_pte)
  56.312 +	;;
  56.313 +	ld8 r18=[r2]			// load PAL PTE
  56.314 +	;;
  56.315 +	GET_THIS_PADDR(r2, ia64_mca_pal_base)
  56.316 +	;;
  56.317 +	ld8 r16=[r2]			// load PAL vaddr
  56.318 +	mov r19=IA64_GRANULE_SHIFT<<2
  56.319 +	;;
  56.320 +	mov cr.itir=r19
  56.321 +	mov cr.ifa=r16
  56.322 +	mov r20=IA64_TR_PALCODE
  56.323 +	;;
  56.324 +	itr.i itr[r20]=r18
  56.325 +	;;
  56.326 +	srlz.i
  56.327 +	;;
  56.328 +	// 4. Reload DTR for stack.
  56.329 +	mov r16=IA64_KR(CURRENT_STACK)
  56.330 +	;;
  56.331 +	shl r16=r16,IA64_GRANULE_SHIFT
  56.332 +	movl r19=PAGE_OFFSET
  56.333 +	;;
  56.334 +	add r18=r19,r16
  56.335 +	movl r20=PAGE_KERNEL
  56.336 +	;;
  56.337 +	add r16=r20,r16
  56.338 +	mov r19=IA64_GRANULE_SHIFT<<2
  56.339 +	;;
  56.340 +	mov cr.itir=r19
  56.341 +	mov cr.ifa=r18
  56.342 +	mov r20=IA64_TR_CURRENT_STACK
  56.343 +	;;
  56.344 +	itr.d dtr[r20]=r16
  56.345 +	;;
  56.346 +	srlz.d
  56.347 +	;;
  56.348 +	br.sptk.many done_tlb_purge_and_reload
  56.349 +err:
  56.350 +	COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
  56.351 +	br.sptk.many ia64_os_mca_done_restore
  56.352 +
  56.353 +done_tlb_purge_and_reload:
  56.354 +
  56.355 +	// Setup new stack frame for OS_MCA handling
  56.356 +	GET_IA64_MCA_DATA(r2)
  56.357 +	;;
  56.358 +	add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
  56.359 +	add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
  56.360 +	;;
  56.361 +	rse_switch_context(r6,r3,r2);;	// RSC management in this new context
  56.362 +
  56.363 +	GET_IA64_MCA_DATA(r2)
  56.364 +	;;
  56.365 +	add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
  56.366 +	;;
  56.367 +	mov r12=r2		// establish new stack-pointer
  56.368 +
  56.369 +        // Enter virtual mode from physical mode
  56.370 +	VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
  56.371 +ia64_os_mca_virtual_begin:
  56.372 +
  56.373 +	// Call virtual mode handler
  56.374 +	movl		r2=ia64_mca_ucmc_handler;;
  56.375 +	mov		b6=r2;;
  56.376 +	br.call.sptk.many    b0=b6;;
  56.377 +.ret0:
  56.378 +	// Revert back to physical mode before going back to SAL
  56.379 +	PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
  56.380 +ia64_os_mca_virtual_end:
  56.381 +
  56.382 +	// restore the original stack frame here
  56.383 +	GET_IA64_MCA_DATA(r2)
  56.384 +	;;
  56.385 +	add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
  56.386 +	;;
  56.387 +	movl    r4=IA64_PSR_MC
  56.388 +	;;
  56.389 +	rse_return_context(r4,r3,r2)	// switch from interrupt context for RSE
  56.390 +
  56.391 +	// let us restore all the registers from our PSI structure
  56.392 +	mov	r8=gp
  56.393 +	;;
  56.394 +begin_os_mca_restore:
  56.395 +	br	ia64_os_mca_proc_state_restore;;
  56.396 +
  56.397 +ia64_os_mca_done_restore:
  56.398 +	OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
  56.399 +	// branch back to SALE_CHECK
  56.400 +	ld8		r3=[r2];;
  56.401 +	mov		b0=r3;;		// SAL_CHECK return address
  56.402 +
  56.403 +	// release lock
  56.404 +	movl		r3=ia64_mca_serialize;;
  56.405 +	DATA_VA_TO_PA(r3);;
  56.406 +	st8.rel		[r3]=r0
  56.407 +
  56.408 +	br		b0
  56.409 +	;;
  56.410 +ia64_os_mca_dispatch_end:
  56.411 +//EndMain//////////////////////////////////////////////////////////////////////
  56.412 +
  56.413 +
  56.414 +//++
  56.415 +// Name:
  56.416 +//      ia64_os_mca_proc_state_dump()
  56.417 +//
  56.418 +// Stub Description:
  56.419 +//
  56.420 +//       This stub dumps the processor state during MCHK to a data area
  56.421 +//
  56.422 +//--
  56.423 +
  56.424 +ia64_os_mca_proc_state_dump:
  56.425 +// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
  56.426 +//  to virtual addressing mode.
  56.427 +	GET_IA64_MCA_DATA(r2)
  56.428 +	;;
  56.429 +	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
  56.430 +	;;
  56.431 +// save ar.NaT
  56.432 +	mov		r5=ar.unat                  // ar.unat
  56.433 +
  56.434 +// save banked GRs 16-31 along with NaT bits
  56.435 +	bsw.1;;
  56.436 +	st8.spill	[r2]=r16,8;;
  56.437 +	st8.spill	[r2]=r17,8;;
  56.438 +	st8.spill	[r2]=r18,8;;
  56.439 +	st8.spill	[r2]=r19,8;;
  56.440 +	st8.spill	[r2]=r20,8;;
  56.441 +	st8.spill	[r2]=r21,8;;
  56.442 +	st8.spill	[r2]=r22,8;;
  56.443 +	st8.spill	[r2]=r23,8;;
  56.444 +	st8.spill	[r2]=r24,8;;
  56.445 +	st8.spill	[r2]=r25,8;;
  56.446 +	st8.spill	[r2]=r26,8;;
  56.447 +	st8.spill	[r2]=r27,8;;
  56.448 +	st8.spill	[r2]=r28,8;;
  56.449 +	st8.spill	[r2]=r29,8;;
  56.450 +	st8.spill	[r2]=r30,8;;
  56.451 +	st8.spill	[r2]=r31,8;;
  56.452 +
  56.453 +	mov		r4=ar.unat;;
  56.454 +	st8		[r2]=r4,8                // save User NaT bits for r16-r31
  56.455 +	mov		ar.unat=r5                  // restore original unat
  56.456 +	bsw.0;;
  56.457 +
  56.458 +//save BRs
  56.459 +	add		r4=8,r2                  // duplicate r2 in r4
  56.460 +	add		r6=2*8,r2                // duplicate r2 in r4
  56.461 +
  56.462 +	mov		r3=b0
  56.463 +	mov		r5=b1
  56.464 +	mov		r7=b2;;
  56.465 +	st8		[r2]=r3,3*8
  56.466 +	st8		[r4]=r5,3*8
  56.467 +	st8		[r6]=r7,3*8;;
  56.468 +
  56.469 +	mov		r3=b3
  56.470 +	mov		r5=b4
  56.471 +	mov		r7=b5;;
  56.472 +	st8		[r2]=r3,3*8
  56.473 +	st8		[r4]=r5,3*8
  56.474 +	st8		[r6]=r7,3*8;;
  56.475 +
  56.476 +	mov		r3=b6
  56.477 +	mov		r5=b7;;
  56.478 +	st8		[r2]=r3,2*8
  56.479 +	st8		[r4]=r5,2*8;;
  56.480 +
  56.481 +cSaveCRs:
  56.482 +// save CRs
  56.483 +	add		r4=8,r2                  // duplicate r2 in r4
  56.484 +	add		r6=2*8,r2                // duplicate r2 in r4
  56.485 +
  56.486 +	mov		r3=cr.dcr
  56.487 +	mov		r5=cr.itm
  56.488 +	mov		r7=cr.iva;;
  56.489 +
  56.490 +	st8		[r2]=r3,8*8
  56.491 +	st8		[r4]=r5,3*8
  56.492 +	st8		[r6]=r7,3*8;;            // 48 byte rements
  56.493 +
  56.494 +	mov		r3=cr.pta;;
  56.495 +	st8		[r2]=r3,8*8;;            // 64 byte rements
  56.496 +
  56.497 +// if PSR.ic=0, reading interruption registers causes an illegal operation fault
  56.498 +	mov		r3=psr;;
  56.499 +	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
  56.500 +(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
  56.501 +begin_skip_intr_regs:
  56.502 +(p6)	br		SkipIntrRegs;;
  56.503 +
  56.504 +	add		r4=8,r2                  // duplicate r2 in r4
  56.505 +	add		r6=2*8,r2                // duplicate r2 in r6
  56.506 +
  56.507 +	mov		r3=cr.ipsr
  56.508 +	mov		r5=cr.isr
  56.509 +	mov		r7=r0;;
  56.510 +	st8		[r2]=r3,3*8
  56.511 +	st8		[r4]=r5,3*8
  56.512 +	st8		[r6]=r7,3*8;;
  56.513 +
  56.514 +	mov		r3=cr.iip
  56.515 +	mov		r5=cr.ifa
  56.516 +	mov		r7=cr.itir;;
  56.517 +	st8		[r2]=r3,3*8
  56.518 +	st8		[r4]=r5,3*8
  56.519 +	st8		[r6]=r7,3*8;;
  56.520 +
  56.521 +	mov		r3=cr.iipa
  56.522 +	mov		r5=cr.ifs
  56.523 +	mov		r7=cr.iim;;
  56.524 +	st8		[r2]=r3,3*8
  56.525 +	st8		[r4]=r5,3*8
  56.526 +	st8		[r6]=r7,3*8;;
  56.527 +
  56.528 +	mov		r3=cr25;;                   // cr.iha
  56.529 +	st8		[r2]=r3,160;;               // 160 byte rement
  56.530 +
  56.531 +SkipIntrRegs:
  56.532 +	st8		[r2]=r0,152;;               // another 152 byte .
  56.533 +
  56.534 +	add		r4=8,r2                     // duplicate r2 in r4
  56.535 +	add		r6=2*8,r2                   // duplicate r2 in r6
  56.536 +
  56.537 +	mov		r3=cr.lid
  56.538 +//	mov		r5=cr.ivr                     // cr.ivr, don't read it
  56.539 +	mov		r7=cr.tpr;;
  56.540 +	st8		[r2]=r3,3*8
  56.541 +	st8		[r4]=r5,3*8
  56.542 +	st8		[r6]=r7,3*8;;
  56.543 +
  56.544 +	mov		r3=r0                       // cr.eoi => cr67
  56.545 +	mov		r5=r0                       // cr.irr0 => cr68
  56.546 +	mov		r7=r0;;                     // cr.irr1 => cr69
  56.547 +	st8		[r2]=r3,3*8
  56.548 +	st8		[r4]=r5,3*8
  56.549 +	st8		[r6]=r7,3*8;;
  56.550 +
  56.551 +	mov		r3=r0                       // cr.irr2 => cr70
  56.552 +	mov		r5=r0                       // cr.irr3 => cr71
  56.553 +	mov		r7=cr.itv;;
  56.554 +	st8		[r2]=r3,3*8
  56.555 +	st8		[r4]=r5,3*8
  56.556 +	st8		[r6]=r7,3*8;;
  56.557 +
  56.558 +	mov		r3=cr.pmv
  56.559 +	mov		r5=cr.cmcv;;
  56.560 +	st8		[r2]=r3,7*8
  56.561 +	st8		[r4]=r5,7*8;;
  56.562 +
  56.563 +	mov		r3=r0                       // cr.lrr0 => cr80
  56.564 +	mov		r5=r0;;                     // cr.lrr1 => cr81
  56.565 +	st8		[r2]=r3,23*8
  56.566 +	st8		[r4]=r5,23*8;;
  56.567 +
  56.568 +	adds		r2=25*8,r2;;
  56.569 +
  56.570 +cSaveARs:
  56.571 +// save ARs
  56.572 +	add		r4=8,r2                  // duplicate r2 in r4
  56.573 +	add		r6=2*8,r2                // duplicate r2 in r6
  56.574 +
  56.575 +	mov		r3=ar.k0
  56.576 +	mov		r5=ar.k1
  56.577 +	mov		r7=ar.k2;;
  56.578 +	st8		[r2]=r3,3*8
  56.579 +	st8		[r4]=r5,3*8
  56.580 +	st8		[r6]=r7,3*8;;
  56.581 +
  56.582 +	mov		r3=ar.k3
  56.583 +	mov		r5=ar.k4
  56.584 +	mov		r7=ar.k5;;
  56.585 +	st8		[r2]=r3,3*8
  56.586 +	st8		[r4]=r5,3*8
  56.587 +	st8		[r6]=r7,3*8;;
  56.588 +
  56.589 +	mov		r3=ar.k6
  56.590 +	mov		r5=ar.k7
  56.591 +	mov		r7=r0;;                     // ar.kr8
  56.592 +	st8		[r2]=r3,10*8
  56.593 +	st8		[r4]=r5,10*8
  56.594 +	st8		[r6]=r7,10*8;;           // rement by 72 bytes
  56.595 +
  56.596 +	mov		r3=ar.rsc
  56.597 +	mov		ar.rsc=r0			    // put RSE in enforced lazy mode
  56.598 +	mov		r5=ar.bsp
  56.599 +	;;
  56.600 +	mov		r7=ar.bspstore;;
  56.601 +	st8		[r2]=r3,3*8
  56.602 +	st8		[r4]=r5,3*8
  56.603 +	st8		[r6]=r7,3*8;;
  56.604 +
  56.605 +	mov		r3=ar.rnat;;
  56.606 +	st8		[r2]=r3,8*13             // increment by 13x8 bytes
  56.607 +
  56.608 +	mov		r3=ar.ccv;;
  56.609 +	st8		[r2]=r3,8*4
  56.610 +
  56.611 +	mov		r3=ar.unat;;
  56.612 +	st8		[r2]=r3,8*4
  56.613 +
  56.614 +	mov		r3=ar.fpsr;;
  56.615 +	st8		[r2]=r3,8*4
  56.616 +
  56.617 +	mov		r3=ar.itc;;
  56.618 +	st8		[r2]=r3,160                 // 160
  56.619 +
  56.620 +	mov		r3=ar.pfs;;
  56.621 +	st8		[r2]=r3,8
  56.622 +
  56.623 +	mov		r3=ar.lc;;
  56.624 +	st8		[r2]=r3,8
  56.625 +
  56.626 +	mov		r3=ar.ec;;
  56.627 +	st8		[r2]=r3
  56.628 +	add		r2=8*62,r2               //padding
  56.629 +
  56.630 +// save RRs
  56.631 +	mov		ar.lc=0x08-1
  56.632 +	movl		r4=0x00;;
  56.633 +
  56.634 +cStRR:
  56.635 +	dep.z		r5=r4,61,3;;
  56.636 +	mov		r3=rr[r5];;
  56.637 +	st8		[r2]=r3,8
  56.638 +	add		r4=1,r4
  56.639 +	br.cloop.sptk.few	cStRR
  56.640 +	;;
  56.641 +end_os_mca_dump:
  56.642 +	br	ia64_os_mca_done_dump;;
  56.643 +
  56.644 +//EndStub//////////////////////////////////////////////////////////////////////
  56.645 +
  56.646 +
  56.647 +//++
  56.648 +// Name:
  56.649 +//       ia64_os_mca_proc_state_restore()
  56.650 +//
  56.651 +// Stub Description:
  56.652 +//
  56.653 +//       This is a stub to restore the saved processor state during MCHK
  56.654 +//
  56.655 +//--
  56.656 +
  56.657 +ia64_os_mca_proc_state_restore:
  56.658 +
  56.659 +// Restore bank1 GR16-31
  56.660 +	GET_IA64_MCA_DATA(r2)
  56.661 +	;;
  56.662 +	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
  56.663 +
  56.664 +restore_GRs:                                    // restore bank-1 GRs 16-31
  56.665 +	bsw.1;;
  56.666 +	add		r3=16*8,r2;;                // to get to NaT of GR 16-31
  56.667 +	ld8		r3=[r3];;
  56.668 +	mov		ar.unat=r3;;                // first restore NaT
  56.669 +
  56.670 +	ld8.fill	r16=[r2],8;;
  56.671 +	ld8.fill	r17=[r2],8;;
  56.672 +	ld8.fill	r18=[r2],8;;
  56.673 +	ld8.fill	r19=[r2],8;;
  56.674 +	ld8.fill	r20=[r2],8;;
  56.675 +	ld8.fill	r21=[r2],8;;
  56.676 +	ld8.fill	r22=[r2],8;;
  56.677 +	ld8.fill	r23=[r2],8;;
  56.678 +	ld8.fill	r24=[r2],8;;
  56.679 +	ld8.fill	r25=[r2],8;;
  56.680 +	ld8.fill	r26=[r2],8;;
  56.681 +	ld8.fill	r27=[r2],8;;
  56.682 +	ld8.fill	r28=[r2],8;;
  56.683 +	ld8.fill	r29=[r2],8;;
  56.684 +	ld8.fill	r30=[r2],8;;
  56.685 +	ld8.fill	r31=[r2],8;;
  56.686 +
  56.687 +	ld8		r3=[r2],8;;              // increment to skip NaT
  56.688 +	bsw.0;;
  56.689 +
  56.690 +restore_BRs:
  56.691 +	add		r4=8,r2                  // duplicate r2 in r4
  56.692 +	add		r6=2*8,r2;;              // duplicate r2 in r4
  56.693 +
  56.694 +	ld8		r3=[r2],3*8
  56.695 +	ld8		r5=[r4],3*8
  56.696 +	ld8		r7=[r6],3*8;;
  56.697 +	mov		b0=r3
  56.698 +	mov		b1=r5
  56.699 +	mov		b2=r7;;
  56.700 +
  56.701 +	ld8		r3=[r2],3*8
  56.702 +	ld8		r5=[r4],3*8
  56.703 +	ld8		r7=[r6],3*8;;
  56.704 +	mov		b3=r3
  56.705 +	mov		b4=r5
  56.706 +	mov		b5=r7;;
  56.707 +
  56.708 +	ld8		r3=[r2],2*8
  56.709 +	ld8		r5=[r4],2*8;;
  56.710 +	mov		b6=r3
  56.711 +	mov		b7=r5;;
  56.712 +
  56.713 +restore_CRs:
  56.714 +	add		r4=8,r2                  // duplicate r2 in r4
  56.715 +	add		r6=2*8,r2;;              // duplicate r2 in r4
  56.716 +
  56.717 +	ld8		r3=[r2],8*8
  56.718 +	ld8		r5=[r4],3*8
  56.719 +	ld8		r7=[r6],3*8;;            // 48 byte increments
  56.720 +	mov		cr.dcr=r3
  56.721 +	mov		cr.itm=r5
  56.722 +	mov		cr.iva=r7;;
  56.723 +
  56.724 +	ld8		r3=[r2],8*8;;            // 64 byte increments
  56.725 +//      mov		cr.pta=r3
  56.726 +
  56.727 +
  56.728 +// if PSR.ic=1, reading interruption registers causes an illegal operation fault
  56.729 +	mov		r3=psr;;
  56.730 +	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
  56.731 +(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
  56.732 +
  56.733 +begin_rskip_intr_regs:
  56.734 +(p6)	br		rSkipIntrRegs;;
  56.735 +
  56.736 +	add		r4=8,r2                  // duplicate r2 in r4
  56.737 +	add		r6=2*8,r2;;              // duplicate r2 in r4
  56.738 +
  56.739 +	ld8		r3=[r2],3*8
  56.740 +	ld8		r5=[r4],3*8
  56.741 +	ld8		r7=[r6],3*8;;
  56.742 +	mov		cr.ipsr=r3
  56.743 +//	mov		cr.isr=r5                   // cr.isr is read only
  56.744 +
  56.745 +	ld8		r3=[r2],3*8
  56.746 +	ld8		r5=[r4],3*8
  56.747 +	ld8		r7=[r6],3*8;;
  56.748 +	mov		cr.iip=r3
  56.749 +	mov		cr.ifa=r5
  56.750 +	mov		cr.itir=r7;;
  56.751 +
  56.752 +	ld8		r3=[r2],3*8
  56.753 +	ld8		r5=[r4],3*8
  56.754 +	ld8		r7=[r6],3*8;;
  56.755 +	mov		cr.iipa=r3
  56.756 +	mov		cr.ifs=r5
  56.757 +	mov		cr.iim=r7
  56.758 +
  56.759 +	ld8		r3=[r2],160;;               // 160 byte increment
  56.760 +	mov		cr.iha=r3
  56.761 +
  56.762 +rSkipIntrRegs:
  56.763 +	ld8		r3=[r2],152;;               // another 152 byte inc.
  56.764 +
  56.765 +	add		r4=8,r2                     // duplicate r2 in r4
  56.766 +	add		r6=2*8,r2;;                 // duplicate r2 in r6
  56.767 +
  56.768 +	ld8		r3=[r2],8*3
  56.769 +	ld8		r5=[r4],8*3
  56.770 +	ld8		r7=[r6],8*3;;
  56.771 +	mov		cr.lid=r3
  56.772 +//	mov		cr.ivr=r5                   // cr.ivr is read only
  56.773 +	mov		cr.tpr=r7;;
  56.774 +
  56.775 +	ld8		r3=[r2],8*3
  56.776 +	ld8		r5=[r4],8*3
  56.777 +	ld8		r7=[r6],8*3;;
  56.778 +//	mov		cr.eoi=r3
  56.779 +//	mov		cr.irr0=r5                  // cr.irr0 is read only
  56.780 +//	mov		cr.irr1=r7;;                // cr.irr1 is read only
  56.781 +
  56.782 +	ld8		r3=[r2],8*3
  56.783 +	ld8		r5=[r4],8*3
  56.784 +	ld8		r7=[r6],8*3;;
  56.785 +//	mov		cr.irr2=r3                  // cr.irr2 is read only
  56.786 +//	mov		cr.irr3=r5                  // cr.irr3 is read only
  56.787 +	mov		cr.itv=r7;;
  56.788 +
  56.789 +	ld8		r3=[r2],8*7
  56.790 +	ld8		r5=[r4],8*7;;
  56.791 +	mov		cr.pmv=r3
  56.792 +	mov		cr.cmcv=r5;;
  56.793 +
  56.794 +	ld8		r3=[r2],8*23
  56.795 +	ld8		r5=[r4],8*23;;
  56.796 +	adds		r2=8*23,r2
  56.797 +	adds		r4=8*23,r4;;
  56.798 +//	mov		cr.lrr0=r3
  56.799 +//	mov		cr.lrr1=r5
  56.800 +
  56.801 +	adds		r2=8*2,r2;;
  56.802 +
  56.803 +restore_ARs:
  56.804 +	add		r4=8,r2                  // duplicate r2 in r4
  56.805 +	add		r6=2*8,r2;;              // duplicate r2 in r4
  56.806 +
  56.807 +	ld8		r3=[r2],3*8
  56.808 +	ld8		r5=[r4],3*8
  56.809 +	ld8		r7=[r6],3*8;;
  56.810 +	mov		ar.k0=r3
  56.811 +	mov		ar.k1=r5
  56.812 +	mov		ar.k2=r7;;
  56.813 +
  56.814 +	ld8		r3=[r2],3*8
  56.815 +	ld8		r5=[r4],3*8
  56.816 +	ld8		r7=[r6],3*8;;
  56.817 +	mov		ar.k3=r3
  56.818 +	mov		ar.k4=r5
  56.819 +	mov		ar.k5=r7;;
  56.820 +
  56.821 +	ld8		r3=[r2],10*8
  56.822 +	ld8		r5=[r4],10*8
  56.823 +	ld8		r7=[r6],10*8;;
  56.824 +	mov		ar.k6=r3
  56.825 +	mov		ar.k7=r5
  56.826 +	;;
  56.827 +
  56.828 +	ld8		r3=[r2],3*8
  56.829 +	ld8		r5=[r4],3*8
  56.830 +	ld8		r7=[r6],3*8;;
  56.831 +//	mov		ar.rsc=r3
  56.832 +//	mov		ar.bsp=r5                   // ar.bsp is read only
  56.833 +	mov		ar.rsc=r0			    // make sure that RSE is in enforced lazy mode
  56.834 +	;;
  56.835 +	mov		ar.bspstore=r7;;
  56.836 +
  56.837 +	ld8		r9=[r2],8*13;;
  56.838 +	mov		ar.rnat=r9
  56.839 +
  56.840 +	mov		ar.rsc=r3
  56.841 +	ld8		r3=[r2],8*4;;
  56.842 +	mov		ar.ccv=r3
  56.843 +
  56.844 +	ld8		r3=[r2],8*4;;
  56.845 +	mov		ar.unat=r3
  56.846 +
  56.847 +	ld8		r3=[r2],8*4;;
  56.848 +	mov		ar.fpsr=r3
  56.849 +
  56.850 +	ld8		r3=[r2],160;;               // 160
  56.851 +//      mov		ar.itc=r3
  56.852 +
  56.853 +	ld8		r3=[r2],8;;
  56.854 +	mov		ar.pfs=r3
  56.855 +
  56.856 +	ld8		r3=[r2],8;;
  56.857 +	mov		ar.lc=r3
  56.858 +
  56.859 +	ld8		r3=[r2];;
  56.860 +	mov		ar.ec=r3
  56.861 +	add		r2=8*62,r2;;             // padding
  56.862 +
  56.863 +restore_RRs:
  56.864 +	mov		r5=ar.lc
  56.865 +	mov		ar.lc=0x08-1
  56.866 +	movl		r4=0x00;;
  56.867 +cStRRr:
  56.868 +	dep.z		r7=r4,61,3
  56.869 +	ld8		r3=[r2],8;;
  56.870 +	mov		rr[r7]=r3                   // what are its access previledges?
  56.871 +	add		r4=1,r4
  56.872 +	br.cloop.sptk.few	cStRRr
  56.873 +	;;
  56.874 +	mov		ar.lc=r5
  56.875 +	;;
  56.876 +end_os_mca_restore:
  56.877 +	br	ia64_os_mca_done_restore;;
  56.878 +
  56.879 +//EndStub//////////////////////////////////////////////////////////////////////
  56.880 +#else
  56.881 +ia64_os_mca_dispatch:
  56.882 +1:
  56.883 +	br.sptk 1b
  56.884 +ia64_os_mca_dispatch_end:
  56.885 +#endif /* !XEN */
  56.886 +
  56.887 +
  56.888 +// ok, the issue here is that we need to save state information so
  56.889 +// it can be useable by the kernel debugger and show regs routines.
  56.890 +// In order to do this, our best bet is save the current state (plus
  56.891 +// the state information obtain from the MIN_STATE_AREA) into a pt_regs
  56.892 +// format.  This way we can pass it on in a useable format.
  56.893 +//
  56.894 +
  56.895 +//
  56.896 +// SAL to OS entry point for INIT on the monarch processor
  56.897 +// This has been defined for registration purposes with SAL
  56.898 +// as a part of ia64_mca_init.
  56.899 +//
  56.900 +// When we get here, the following registers have been
  56.901 +// set by the SAL for our use
  56.902 +//
  56.903 +//		1. GR1 = OS INIT GP
  56.904 +//		2. GR8 = PAL_PROC physical address
  56.905 +//		3. GR9 = SAL_PROC physical address
  56.906 +//		4. GR10 = SAL GP (physical)
  56.907 +//		5. GR11 = Init Reason
  56.908 +//			0 = Received INIT for event other than crash dump switch
  56.909 +//			1 = Received wakeup at the end of an OS_MCA corrected machine check
  56.910 +//			2 = Received INIT dude to CrashDump switch assertion
  56.911 +//
  56.912 +//		6. GR12 = Return address to location within SAL_INIT procedure
  56.913 +
  56.914 +
  56.915 +GLOBAL_ENTRY(ia64_monarch_init_handler)
  56.916 +	.prologue
  56.917 +	// stash the information the SAL passed to os
  56.918 +	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
  56.919 +	;;
  56.920 +	SAVE_MIN_WITH_COVER
  56.921 +	;;
  56.922 +	mov r8=cr.ifa
  56.923 +	mov r9=cr.isr
  56.924 +	adds r3=8,r2				// set up second base pointer
  56.925 +	;;
  56.926 +	SAVE_REST
  56.927 +
  56.928 +// ok, enough should be saved at this point to be dangerous, and supply
  56.929 +// information for a dump
  56.930 +// We need to switch to Virtual mode before hitting the C functions.
  56.931 +
  56.932 +	movl	r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
  56.933 +	mov	r3=psr	// get the current psr, minimum enabled at this point
  56.934 +	;;
  56.935 +	or	r2=r2,r3
  56.936 +	;;
  56.937 +	movl	r3=IVirtual_Switch
  56.938 +	;;
  56.939 +	mov	cr.iip=r3	// short return to set the appropriate bits
  56.940 +	mov	cr.ipsr=r2	// need to do an rfi to set appropriate bits
  56.941 +	;;
  56.942 +	rfi
  56.943 +	;;
  56.944 +IVirtual_Switch:
  56.945 +	//
  56.946 +	// We should now be running virtual
  56.947 +	//
  56.948 +	// Let's call the C handler to get the rest of the state info
  56.949 +	//
  56.950 +	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
  56.951 +	;;
  56.952 +	adds out0=16,sp				// out0 = pointer to pt_regs
  56.953 +	;;
  56.954 +	DO_SAVE_SWITCH_STACK
  56.955 +	.body
  56.956 +	adds out1=16,sp				// out0 = pointer to switch_stack
  56.957 +
  56.958 +	br.call.sptk.many rp=ia64_init_handler
  56.959 +.ret1:
  56.960 +
  56.961 +return_from_init:
  56.962 +	br.sptk return_from_init
  56.963 +END(ia64_monarch_init_handler)
  56.964 +
  56.965 +//
  56.966 +// SAL to OS entry point for INIT on the slave processor
  56.967 +// This has been defined for registration purposes with SAL
  56.968 +// as a part of ia64_mca_init.
  56.969 +//
  56.970 +
  56.971 +GLOBAL_ENTRY(ia64_slave_init_handler)
  56.972 +1:	br.sptk 1b
  56.973 +END(ia64_slave_init_handler)
    57.1 --- a/xen/arch/ia64/linux-xen/minstate.h	Thu Jul 27 17:44:14 2006 -0500
    57.2 +++ b/xen/arch/ia64/linux-xen/minstate.h	Fri Jul 28 10:51:38 2006 +0100
    57.3 @@ -36,7 +36,31 @@
    57.4   * For mca_asm.S we want to access the stack physically since the state is saved before we
    57.5   * go virtual and don't want to destroy the iip or ipsr.
    57.6   */
    57.7 -#define MINSTATE_START_SAVE_MIN_PHYS								\
    57.8 +#ifdef XEN
    57.9 +# define MINSTATE_START_SAVE_MIN_PHYS								\
   57.10 +(pKStk)	movl r3=THIS_CPU(ia64_mca_data);;							\
   57.11 +(pKStk)	tpa r3 = r3;;										\
   57.12 +(pKStk)	ld8 r3 = [r3];;										\
   57.13 +(pKStk)	addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;						\
   57.14 +(pKStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
   57.15 +(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
   57.16 +(pUStk)	addl r22=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
   57.17 +	;;											\
   57.18 +(pUStk)	mov r24=ar.rnat;									\
   57.19 +(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base of memory stack */	\
   57.20 +(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
   57.21 +(pUStk)	dep r22=-1,r22,60,4;			/* compute Xen virtual addr of RBS */	\
   57.22 +	;;											\
   57.23 +(pUStk)	mov ar.bspstore=r22;			/* switch to Xen RBS */			\
   57.24 +	;;											\
   57.25 +(pUStk)	mov r18=ar.bsp;										\
   57.26 +(pUStk)	mov ar.rsc=0x3;	 /* set eager mode, pl 0, little-endian, loadrs=0 */			\
   57.27 +
   57.28 +# define MINSTATE_END_SAVE_MIN_PHYS								\
   57.29 +	dep r12=-1,r12,60,4;	    /* make sp a Xen virtual address */			\
   57.30 +	;;
   57.31 +#else
   57.32 +# define MINSTATE_START_SAVE_MIN_PHYS								\
   57.33  (pKStk) mov r3=IA64_KR(PER_CPU_DATA);;								\
   57.34  (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;							\
   57.35  (pKStk) ld8 r3 = [r3];;										\
   57.36 @@ -55,15 +79,17 @@
   57.37  (pUStk)	mov r18=ar.bsp;										\
   57.38  (pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
   57.39  
   57.40 -#define MINSTATE_END_SAVE_MIN_PHYS								\
   57.41 +# define MINSTATE_END_SAVE_MIN_PHYS								\
   57.42  	dep r12=-1,r12,61,3;		/* make sp a kernel virtual address */			\
   57.43  	;;
   57.44 +#endif /* XEN */
   57.45  
   57.46  #ifdef MINSTATE_VIRT
   57.47  #ifdef XEN
   57.48  # define MINSTATE_GET_CURRENT(reg)					\
   57.49                 movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;	\
   57.50                 ld8 reg=[reg]
   57.51 +# define MINSTATE_GET_CURRENT_VIRT(reg)	MINSTATE_GET_CURRENT(reg)
   57.52  #else
   57.53  # define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT)
   57.54  #endif
   57.55 @@ -72,7 +98,19 @@
   57.56  #endif
   57.57  
   57.58  #ifdef MINSTATE_PHYS
   57.59 +# ifdef XEN
   57.60 +# define MINSTATE_GET_CURRENT(reg)					\
   57.61 +	movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;		\
   57.62 +	tpa reg=reg;;							\
   57.63 +	ld8 reg=[reg];;							\
   57.64 +	tpa reg=reg;;
   57.65 +# define MINSTATE_GET_CURRENT_VIRT(reg)					\
   57.66 +	movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;		\
   57.67 +	tpa reg=reg;;							\
   57.68 +	ld8 reg=[reg];;
   57.69 +#else
   57.70  # define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT);; tpa reg=reg
   57.71 +#endif /* XEN */
   57.72  # define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
   57.73  # define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
   57.74  #endif
   57.75 @@ -175,8 +213,8 @@
   57.76  	;;											\
   57.77  .mem.offset 0,0; st8.spill [r16]=r13,16;							\
   57.78  .mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
   57.79 -	/* XEN mov r13=IA64_KR(CURRENT);*/	/* establish `current' */				\
   57.80 -	MINSTATE_GET_CURRENT(r13);		/* XEN establish `current' */				\
   57.81 +	/* XEN mov r13=IA64_KR(CURRENT);*/	/* establish `current' */			\
   57.82 +	MINSTATE_GET_CURRENT_VIRT(r13);		/* XEN establish `current' */			\
   57.83  	;;											\
   57.84  .mem.offset 0,0; st8.spill [r16]=r15,16;							\
   57.85  .mem.offset 8,0; st8.spill [r17]=r14,16;							\
    58.1 --- a/xen/arch/ia64/linux-xen/tlb.c	Thu Jul 27 17:44:14 2006 -0500
    58.2 +++ b/xen/arch/ia64/linux-xen/tlb.c	Fri Jul 28 10:51:38 2006 +0100
    58.3 @@ -173,7 +173,11 @@ EXPORT_SYMBOL(flush_tlb_range);
    58.4  void __devinit
    58.5  ia64_tlb_init (void)
    58.6  {
    58.7 +#ifndef XEN
    58.8  	ia64_ptce_info_t ptce_info;
    58.9 +#else
   58.10 +	ia64_ptce_info_t ptce_info = { 0 };
   58.11 +#endif
   58.12  	unsigned long tr_pgbits;
   58.13  	long status;
   58.14  
    59.1 --- a/xen/arch/ia64/linux-xen/unwind.c	Thu Jul 27 17:44:14 2006 -0500
    59.2 +++ b/xen/arch/ia64/linux-xen/unwind.c	Fri Jul 28 10:51:38 2006 +0100
    59.3 @@ -2056,6 +2056,28 @@ init_frame_info (struct unw_frame_info *
    59.4  }
    59.5  
    59.6  void
    59.7 +unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
    59.8 +			    struct pt_regs *pt, struct switch_stack *sw)
    59.9 +{
   59.10 +	unsigned long sof;
   59.11 +
   59.12 +	init_frame_info(info, t, sw, pt->r12);
   59.13 +	info->cfm_loc = &pt->cr_ifs;
   59.14 +	info->unat_loc = &pt->ar_unat;
   59.15 +	info->pfs_loc = &pt->ar_pfs;
   59.16 +	sof = *info->cfm_loc & 0x7f;
   59.17 +	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
   59.18 +	info->ip = pt->cr_iip + ia64_psr(pt)->ri;
   59.19 +	info->pt = (unsigned long) pt;
   59.20 +	UNW_DPRINT(3, "unwind.%s:\n"
   59.21 +		   "  bsp    0x%lx\n"
   59.22 +		   "  sof    0x%lx\n"
   59.23 +		   "  ip     0x%lx\n",
   59.24 +		   __FUNCTION__, info->bsp, sof, info->ip);
   59.25 +	find_save_locs(info);
   59.26 +}
   59.27 +
   59.28 +void
   59.29  unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
   59.30  {
   59.31  	unsigned long sol;
    60.1 --- a/xen/arch/ia64/tools/README.RunVT	Thu Jul 27 17:44:14 2006 -0500
    60.2 +++ b/xen/arch/ia64/tools/README.RunVT	Fri Jul 28 10:51:38 2006 +0100
    60.3 @@ -1,59 +1,46 @@
    60.4  INSTRUCTIONS FOR Running IPF/Xen with VT-enabled Tiger4 pltform
    60.5  
    60.6 -Note: the Domain0 must be an unmodified Linux
    60.7 -
    60.8 -1) Perform operations in README.xenia64 to get a flattened Xen IPF source tree
    60.9 -
   60.10 -2) Build an unmodified Linux 2.6 kernel
   60.11 -	a) tar xvfz  linux-2.6.11.tar.gz
   60.12 -        b) cp arch/ia64/configs/tiger_defconfig .config
   60.13 -	c) Build linux.
   60.14 -   		1) yes "" | make oldconfig
   60.15 -   		2) make
   60.16 -
   60.17 -3) Build IPF VT-enabled Xen image
   60.18 -	edit xen/arch/ia64/Rules.mk for
   60.19 -		CONFIG_VTI	?= y 	to enable VT-enable build
   60.20 -4) Setup ELILO.CONF
   60.21 -	image=xen
   60.22 -        	label=xen
   60.23 -        	initrd=vmlinux2.6.11		// unmodified Linux kernel image
   60.24 -        	read-only
   60.25 -        	append="nomca root=/dev/sda3"
   60.26 -
   60.27 -STATUS as 4/28/05 - Features implemented for Domain0
   60.28 -
   60.29 -0. Runs unmodified Linux kernel as Domain0
   60.30 -    Validated with Linux 2.6.11 to run Xwindow and NIC on UP logical processor
   60.31 +1. Install a Linux Disk, VT_Disk, to be used by VT
   60.32 +2. Setup the target VT_Disk
   60.33 +	1. Boot VT_Disk
   60.34 +	2. modify following files of VT_Disk
   60.35 +		/boot/efi/efi/redhat/elilo.conf -
   60.36 +			modify "append=" line to have "root=/dev/hda3"
   60.37 +			** note /dev/hda3 must reflect VT_Disk /root partition
   60.38  
   60.39 -1. Take advantage of VT-enabled processor
   60.40 -   a. Processor intercepts guest privileged instruction and deliver Opcode/Cause to Hypervisor
   60.41 -   b. One VPD (Virtual Processor Descriptor) per Virtual Processor
   60.42 -   c. Domains are in a different virtual address space from hypervisor. Domains have one less VA bit than hypervisor, where hypervisor runs in 0xF00000... address protected by the processor from Domains.
   60.43 -
   60.44 -2. vTLB and guest_VHPT
   60.45 -   a. vTLB extending machine TLB entries through hypervisor internal data structure
   60.46 -      vTLB caches Domains installed TR's and TC's, and then installs TC's for Domains instead.
   60.47 -      vTLB implements collision chains
   60.48 -   b. Processor walks hypervisor internal VHPT, not the domain VHPT.  On TLB miss, vTLB is consulted first to put hypervisor cached entry into VHPT without inject TLB miss to domain.
   60.49 -
   60.50 -3. Region ID fix-partitioning
   60.51 -   a. currently hard partition 24bits of RIDs into 16 partitions by using top 4bit.
   60.52 -   b. Hypervisor uses the very last partition RIDs, i.e., 0xFxxxxx RIDs
   60.53 -   c. Effectively supports Domain0 and 14 other DomainN
   60.54 -
   60.55 -4. HyperVisor is mapped with 2 sets of RIDs during runtime, its own RIDs and the active Domain RIDs
   60.56 -   a. Domain RIDs are used by processor to access guest_VHPT during Domain runtime
   60.57 -   b. Hypervisor RIDs are used when Hypervisor is running
   60.58 -   c. Implies there are some Region registers transition on entering/exiting hypervisor
   60.59 -
   60.60 -5. Linux styled pt_regs with minor modification for VT and instruction emulation
   60.61 -   a. Part of Domain registers are saved/restored from VPD
   60.62 -   b. Extended pt_regs to include r4~r7 and Domain's iipa & isr for possible instruction emulation, so no need to save a complete set of switch_stack on IVT entry
   60.63 -
   60.64 -6. Linux styled per virtual processor memory/RSE stacks, which is the same as non-VT domain0
   60.65 -
   60.66 -7. Handles splitted I/DCache design
   60.67 -   Newer IPF processors has split I/Dcaches.  The design takes this into consideration when Xen recopy Domain0 to target address for execution
   60.68 +		/etc/fstab -
   60.69 +			LABEL=/     /     ext3    DEFAULTS  1   1
   60.70 +		  to
   60.71 +			/dev/hda3   /     ext3    DEFAULTS  1   1
   60.72 +                  and other entries accordingly
   60.73 +3. Install Xen and boot XenLinux on your standard Linux disk
   60.74 +        1. modify /boot/efi/efi/redhat/elilo.conf -
   60.75 +			"append=" entry to have "root=/dev/sda3"
   60.76 +	2. modify /etc/fstab -
   60.77 +                        LABEL=/     /     ext3    DEFAULTS  1   1
   60.78 +                  to
   60.79 +                        /dev/sda3   /     ext3    DEFAULTS  1   1
   60.80 +                  and other entries accordingly
   60.81 +4. Reboot XenLinux with VT_Disk in /dev/sdb slot
   60.82 +	1. copy Guest_Firmware.bin into /usr/lib/xen/boot/guest_firmware.bin
   60.83 +	2. modify /etc/xen/xmexample.vti
   60.84 +		disk = [ 'phy:/dev/sdb,ioemu:hda,w' ]
   60.85 +	   and make sure
   60.86 +		kernel=/usr/lib/xen/boot/guest_firmware.bin
   60.87 +5. Make sure XenLinux has SDL installed by
   60.88 +	> rpm -q -a | grep SDL
   60.89 +		SDL-1.2.7-8 SDL-devel-1.2.7-8 
   60.90 +6. Start vncserver from XenLinux
   60.91 +	1. ifconfig  to get XenLinux IP address
   60.92 +	2. vncserver
   60.93 +7. Start VT Domain
   60.94 +	1. From a remote system connect to XenLinux through vnc viewer
   60.95 +	2. On vnc windows
   60.96 +		> xend start
   60.97 +		> xm create /etc/xen/xmexample.vti
   60.98 +	   an EFI shell will popup
   60.99 +		> fs0:
  60.100 +		fs0:> cd efi\redhat
  60.101 +		fs0:> elilo linux
  60.102  
  60.103  
    61.1 --- a/xen/arch/ia64/vmx/mmio.c	Thu Jul 27 17:44:14 2006 -0500
    61.2 +++ b/xen/arch/ia64/vmx/mmio.c	Fri Jul 28 10:51:38 2006 +0100
    61.3 @@ -27,7 +27,7 @@
    61.4  #include <asm/gcc_intrin.h>
    61.5  #include <linux/interrupt.h>
    61.6  #include <asm/vmx_vcpu.h>
    61.7 -#include <asm/privop.h>
    61.8 +#include <asm/bundle.h>
    61.9  #include <asm/types.h>
   61.10  #include <public/hvm/ioreq.h>
   61.11  #include <asm/mm.h>
   61.12 @@ -386,20 +386,16 @@ static void write_ipi (VCPU *vcpu, uint6
   61.13          struct pt_regs *targ_regs = vcpu_regs (targ);
   61.14          struct vcpu_guest_context c;
   61.15  
   61.16 -        printf ("arch_boot_vcpu: %p %p\n",
   61.17 -                (void *)d->arch.boot_rdv_ip,
   61.18 -                (void *)d->arch.boot_rdv_r1);
   61.19          memset (&c, 0, sizeof (c));
   61.20  
   61.21 -        c.flags = VGCF_VMX_GUEST;
   61.22          if (arch_set_info_guest (targ, &c) != 0) {
   61.23              printf ("arch_boot_vcpu: failure\n");
   61.24              return;
   61.25          }
   61.26          /* First or next rendez-vous: set registers.  */
   61.27          vcpu_init_regs (targ);
   61.28 -        targ_regs->cr_iip = d->arch.boot_rdv_ip;
   61.29 -        targ_regs->r1 = d->arch.boot_rdv_r1;
   61.30 +        targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
   61.31 +        targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
   61.32  
   61.33          if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
   61.34              vcpu_wake(targ);
   61.35 @@ -425,7 +421,6 @@ static void write_ipi (VCPU *vcpu, uint6
   61.36     dir 1: read 0:write
   61.37      inst_type 0:integer 1:floating point
   61.38   */
   61.39 -extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
   61.40  #define SL_INTEGER  0        // store/load interger
   61.41  #define SL_FLOATING    1       // store/load floating
   61.42  
    62.1 --- a/xen/arch/ia64/vmx/pal_emul.c	Thu Jul 27 17:44:14 2006 -0500
    62.2 +++ b/xen/arch/ia64/vmx/pal_emul.c	Fri Jul 28 10:51:38 2006 +0100
    62.3 @@ -24,18 +24,39 @@
    62.4  #include <asm/dom_fw.h>
    62.5  #include <asm/tlb.h>
    62.6  #include <asm/vmx_mm_def.h>
    62.7 +#include <xen/hypercall.h>
    62.8 +#include <public/sched.h>
    62.9 +
   62.10 +/*
   62.11 + * Handy macros to make sure that the PAL return values start out
   62.12 + * as something meaningful.
   62.13 + */
   62.14 +#define INIT_PAL_STATUS_UNIMPLEMENTED(x)		\
   62.15 +	{						\
   62.16 +		x.status = PAL_STATUS_UNIMPLEMENTED;	\
   62.17 +		x.v0 = 0;				\
   62.18 +		x.v1 = 0;				\
   62.19 +		x.v2 = 0;				\
   62.20 +	}
   62.21 +
   62.22 +#define INIT_PAL_STATUS_SUCCESS(x)			\
   62.23 +	{						\
   62.24 +	       	x.status = PAL_STATUS_SUCCESS;		\
   62.25 +		x.v0 = 0;				\
   62.26 +		x.v1 = 0;				\
   62.27 +		x.v2 = 0;				\
   62.28 +	}
   62.29  
   62.30  static void
   62.31 -get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
   62.32 -			UINT64 *gr30, UINT64 *gr31) {
   62.33 +get_pal_parameters(VCPU *vcpu, UINT64 *gr29, UINT64 *gr30, UINT64 *gr31) {
   62.34  
   62.35 -  	vcpu_get_gr_nat(vcpu,29,gr29);
   62.36 -  	vcpu_get_gr_nat(vcpu,30,gr30); 
   62.37 -  	vcpu_get_gr_nat(vcpu,31,gr31);
   62.38 +	vcpu_get_gr_nat(vcpu,29,gr29);
   62.39 +	vcpu_get_gr_nat(vcpu,30,gr30); 
   62.40 +	vcpu_get_gr_nat(vcpu,31,gr31);
   62.41  }
   62.42  
   62.43  static void
   62.44 -set_pal_result (VCPU *vcpu,struct ia64_pal_retval result) {
   62.45 +set_pal_result(VCPU *vcpu,struct ia64_pal_retval result) {
   62.46  
   62.47  	vcpu_set_gr(vcpu,8, result.status,0);
   62.48  	vcpu_set_gr(vcpu,9, result.v0,0);
   62.49 @@ -44,58 +65,60 @@ set_pal_result (VCPU *vcpu,struct ia64_p
   62.50  }
   62.51  
   62.52  static void
   62.53 -set_sal_result (VCPU *vcpu,struct sal_ret_values result) {
   62.54 +set_sal_result(VCPU *vcpu,struct sal_ret_values result) {
   62.55  
   62.56  	vcpu_set_gr(vcpu,8, result.r8,0);
   62.57  	vcpu_set_gr(vcpu,9, result.r9,0);
   62.58  	vcpu_set_gr(vcpu,10, result.r10,0);
   62.59  	vcpu_set_gr(vcpu,11, result.r11,0);
   62.60  }
   62.61 +
   62.62  static struct ia64_pal_retval
   62.63 -pal_cache_flush (VCPU *vcpu) {
   62.64 +pal_cache_flush(VCPU *vcpu) {
   62.65  	UINT64 gr28,gr29, gr30, gr31;
   62.66  	struct ia64_pal_retval result;
   62.67  
   62.68 -	get_pal_parameters (vcpu, &gr29, &gr30, &gr31);
   62.69 -	vcpu_get_gr_nat(vcpu,28,&gr28);
   62.70 +	get_pal_parameters(vcpu, &gr29, &gr30, &gr31);
   62.71 +	vcpu_get_gr_nat(vcpu, 28, &gr28);
   62.72  
   62.73  	/* Always call Host Pal in int=1 */
   62.74 -	gr30 = gr30 &(~(0x2UL));
   62.75 +	gr30 = gr30 & ~0x2UL;
   62.76  
   62.77 -	/* call Host PAL cache flush */
   62.78 -	result=ia64_pal_call_static(gr28 ,gr29, gr30,gr31,1);  // Clear psr.ic when call PAL_CACHE_FLUSH
   62.79 +	/*
   62.80 +	 * Call Host PAL cache flush
   62.81 +	 * Clear psr.ic when call PAL_CACHE_FLUSH
   62.82 +	 */
   62.83 +	result = ia64_pal_call_static(gr28 ,gr29, gr30, gr31, 1);
   62.84  
   62.85  	/* If host PAL call is interrupted, then loop to complete it */
   62.86 -//	while (result.status == 1) {
   62.87 -//		ia64_pal_call_static(gr28 ,gr29, gr30, 
   62.88 -//				result.v1,1LL);
   62.89 -//	}
   62.90 -	if(result.status != 0) {
   62.91 -        	panic_domain(vcpu_regs(vcpu),"PAL_CACHE_FLUSH ERROR, status %ld", result.status);
   62.92 -	}
   62.93 +//	while (result.status == 1)
   62.94 +//		ia64_pal_call_static(gr28 ,gr29, gr30, result.v1, 1LL);
   62.95 +//
   62.96 +	if (result.status != 0)
   62.97 +		panic_domain(vcpu_regs(vcpu), "PAL_CACHE_FLUSH ERROR, "
   62.98 +		             "status %ld", result.status);
   62.99  
  62.100  	return result;
  62.101  }
  62.102  
  62.103  static struct ia64_pal_retval
  62.104 -pal_vm_tr_read (VCPU *vcpu ) {
  62.105 +pal_vm_tr_read(VCPU *vcpu) {
  62.106  	struct ia64_pal_retval result;
  62.107  
  62.108 -	result.status= -1; //unimplemented
  62.109 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.110  
  62.111  	return result;
  62.112  }
  62.113  
  62.114 -
  62.115  static struct ia64_pal_retval
  62.116 -pal_prefetch_visibility (VCPU *vcpu)  {
  62.117 +pal_prefetch_visibility(VCPU *vcpu) {
  62.118  	/* Due to current MM virtualization algorithm,
  62.119  	 * We do not allow guest to change mapping attribute.
  62.120  	 * Thus we will not support PAL_PREFETCH_VISIBILITY
  62.121  	 */
  62.122  	struct ia64_pal_retval result;
  62.123  
  62.124 -	result.status= -1; //unimplemented
  62.125 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.126  
  62.127  	return result;
  62.128  }
  62.129 @@ -104,288 +127,315 @@ static struct ia64_pal_retval
  62.130  pal_platform_addr(VCPU *vcpu) {
  62.131  	struct ia64_pal_retval result;
  62.132  
  62.133 -	result.status= 0; //success
  62.134 -
  62.135 -	return result;
  62.136 -}
  62.137 -
  62.138 -static struct ia64_pal_retval
  62.139 -pal_halt (VCPU *vcpu) {
  62.140 -	//bugbug: to be implement. 
  62.141 -	struct ia64_pal_retval result;
  62.142 -
  62.143 -	result.status= -1; //unimplemented
  62.144 -
  62.145 -	return result;
  62.146 -}
  62.147 -
  62.148 -
  62.149 -static struct ia64_pal_retval
  62.150 -pal_halt_light (VCPU *vcpu) {
  62.151 -	struct ia64_pal_retval result;
  62.152 -
  62.153 -	result.status= -1; //unimplemented
  62.154 -
  62.155 -	return result;
  62.156 -}
  62.157 -
  62.158 -static struct ia64_pal_retval
  62.159 -pal_cache_read (VCPU *vcpu) {
  62.160 -	struct ia64_pal_retval result;
  62.161 -
  62.162 -	result.status= -1; //unimplemented
  62.163 -
  62.164 -	return result;
  62.165 -}
  62.166 -
  62.167 -static struct ia64_pal_retval
  62.168 -pal_cache_write (VCPU *vcpu) {
  62.169 -	struct ia64_pal_retval result;
  62.170 -
  62.171 -	result.status= -1; //unimplemented
  62.172 +	INIT_PAL_STATUS_SUCCESS(result);
  62.173  
  62.174  	return result;
  62.175  }
  62.176  
  62.177  static struct ia64_pal_retval
  62.178 -pal_bus_get_features(VCPU *vcpu){
  62.179 -	struct ia64_pal_retval result;
  62.180 -
  62.181 -	result.status= -1; //unimplemented
  62.182 -	return result;
  62.183 -}
  62.184 -
  62.185 -static struct ia64_pal_retval
  62.186 -pal_cache_summary(VCPU *vcpu){
  62.187 -	struct ia64_pal_retval result;
  62.188 -
  62.189 -	result.status= -1; //unimplemented
  62.190 -	return result;
  62.191 -}
  62.192 -
  62.193 -static struct ia64_pal_retval
  62.194 -pal_cache_init(VCPU *vcpu){
  62.195 -	struct ia64_pal_retval result;
  62.196 -	result.status=0;
  62.197 -	return result;
  62.198 -}
  62.199 -
  62.200 -static struct ia64_pal_retval
  62.201 -pal_cache_info(VCPU *vcpu){
  62.202 +pal_halt(VCPU *vcpu) {
  62.203 +	//bugbug: to be implement. 
  62.204  	struct ia64_pal_retval result;
  62.205  
  62.206 -	result.status= -1; //unimplemented
  62.207 -	return result;
  62.208 -}
  62.209 -
  62.210 -static struct ia64_pal_retval
  62.211 -pal_cache_prot_info(VCPU *vcpu){
  62.212 -	struct ia64_pal_retval result;
  62.213 -
  62.214 -	result.status= -1; //unimplemented
  62.215 -	return result;
  62.216 -}
  62.217 -
  62.218 -static struct ia64_pal_retval
  62.219 -pal_mem_attrib(VCPU *vcpu){
  62.220 -	struct ia64_pal_retval result;
  62.221 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.222  
  62.223 -	result.status= -1; //unimplemented
  62.224 -	return result;
  62.225 -}
  62.226 -
  62.227 -static struct ia64_pal_retval
  62.228 -pal_debug_info(VCPU *vcpu){
  62.229 -	struct ia64_pal_retval result;
  62.230 -
  62.231 -	result.status= -1; //unimplemented
  62.232 -	return result;
  62.233 -}
  62.234 -
  62.235 -static struct ia64_pal_retval
  62.236 -pal_fixed_addr(VCPU *vcpu){
  62.237 -	struct ia64_pal_retval result;
  62.238 -
  62.239 -	result.status= -1; //unimplemented
  62.240  	return result;
  62.241  }
  62.242  
  62.243  static struct ia64_pal_retval
  62.244 -pal_freq_base(VCPU *vcpu){
  62.245 -    struct ia64_pal_retval result;
  62.246 -    struct ia64_sal_retval isrv;
  62.247 -
  62.248 -    PAL_CALL(result,PAL_FREQ_BASE, 0, 0, 0);
  62.249 -    if(result.v0 == 0){ //PAL_FREQ_BASE may not be implemented in some platforms, call SAL instead.
  62.250 -        SAL_CALL(isrv, SAL_FREQ_BASE, 
  62.251 -                SAL_FREQ_BASE_PLATFORM, 0, 0, 0, 0, 0, 0);
  62.252 -        result.status = isrv.status;
  62.253 -        result.v0 = isrv.v0;
  62.254 -        result.v1 = result.v2 =0;
  62.255 -    }
  62.256 -    return result;
  62.257 -}
  62.258 +pal_halt_light(VCPU *vcpu) {
  62.259 +	struct ia64_pal_retval result;
  62.260 +	
  62.261 +	if (!is_unmasked_irq(vcpu))
  62.262 +		do_sched_op_compat(SCHEDOP_block, 0);
  62.263 +	    
  62.264 +	INIT_PAL_STATUS_SUCCESS(result);
  62.265  
  62.266 -static struct ia64_pal_retval
  62.267 -pal_freq_ratios(VCPU *vcpu){
  62.268 -    struct ia64_pal_retval result;
  62.269 -
  62.270 -    PAL_CALL(result,PAL_FREQ_RATIOS, 0, 0, 0);
  62.271 -    return result;
  62.272 -}
  62.273 -
  62.274 -static struct ia64_pal_retval
  62.275 -pal_halt_info(VCPU *vcpu){
  62.276 -	struct ia64_pal_retval result;
  62.277 -
  62.278 -	result.status= -1; //unimplemented
  62.279 -	return result;
  62.280 -}
  62.281 -
  62.282 -static struct ia64_pal_retval
  62.283 -pal_logical_to_physica(VCPU *vcpu){
  62.284 -	struct ia64_pal_retval result;
  62.285 -
  62.286 -	result.status= -1; //unimplemented
  62.287  	return result;
  62.288  }
  62.289  
  62.290  static struct ia64_pal_retval
  62.291 -pal_perf_mon_info(VCPU *vcpu){
  62.292 +pal_cache_read(VCPU *vcpu) {
  62.293  	struct ia64_pal_retval result;
  62.294  
  62.295 -	result.status= -1; //unimplemented
  62.296 -	return result;
  62.297 -}
  62.298 -
  62.299 -static struct ia64_pal_retval
  62.300 -pal_proc_get_features(VCPU *vcpu){
  62.301 -	struct ia64_pal_retval result;
  62.302 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.303  
  62.304 -	result.status= -1; //unimplemented
  62.305 -	return result;
  62.306 -}
  62.307 -
  62.308 -static struct ia64_pal_retval
  62.309 -pal_ptce_info(VCPU *vcpu){
  62.310 -	struct ia64_pal_retval result;
  62.311 -
  62.312 -	result.status= -1; //unimplemented
  62.313  	return result;
  62.314  }
  62.315  
  62.316  static struct ia64_pal_retval
  62.317 -pal_register_info(VCPU *vcpu){
  62.318 +pal_cache_write(VCPU *vcpu) {
  62.319  	struct ia64_pal_retval result;
  62.320  
  62.321 -	result.status= -1; //unimplemented
  62.322 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.323 +
  62.324  	return result;
  62.325  }
  62.326  
  62.327  static struct ia64_pal_retval
  62.328 -pal_rse_info(VCPU *vcpu){
  62.329 +pal_bus_get_features(VCPU *vcpu) {
  62.330  	struct ia64_pal_retval result;
  62.331  
  62.332 -	result.status= -1; //unimplemented
  62.333 -	return result;
  62.334 -}
  62.335 -static struct ia64_pal_retval
  62.336 -pal_test_info(VCPU *vcpu){
  62.337 -	struct ia64_pal_retval result;
  62.338 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.339  
  62.340 -	result.status= -1; //unimplemented
  62.341  	return result;
  62.342  }
  62.343  
  62.344  static struct ia64_pal_retval
  62.345 -pal_vm_summary(VCPU *vcpu){
  62.346 +pal_cache_summary(VCPU *vcpu) {
  62.347 +	struct ia64_pal_retval result;
  62.348 +
  62.349 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.350 +
  62.351 +	return result;
  62.352 +}
  62.353 +
  62.354 +static struct ia64_pal_retval
  62.355 +pal_cache_init(VCPU *vcpu) {
  62.356 +	struct ia64_pal_retval result;
  62.357 +
  62.358 +	INIT_PAL_STATUS_SUCCESS(result);
  62.359 +
  62.360 +	return result;
  62.361 +}
  62.362 +
  62.363 +static struct ia64_pal_retval
  62.364 +pal_cache_info(VCPU *vcpu) {
  62.365 +	struct ia64_pal_retval result;
  62.366 +
  62.367 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.368 +
  62.369 +	return result;
  62.370 +}
  62.371 +
  62.372 +static struct ia64_pal_retval
  62.373 +pal_cache_prot_info(VCPU *vcpu) {
  62.374 +	struct ia64_pal_retval result;
  62.375 +
  62.376 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.377 +
  62.378 +	return result;
  62.379 +}
  62.380 +
  62.381 +static struct ia64_pal_retval
  62.382 +pal_mem_attrib(VCPU *vcpu) {
  62.383 +	struct ia64_pal_retval result;
  62.384 +
  62.385 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.386 +
  62.387 +	return result;
  62.388 +}
  62.389 +
  62.390 +static struct ia64_pal_retval
  62.391 +pal_debug_info(VCPU *vcpu) {
  62.392 +	struct ia64_pal_retval result;
  62.393 +
  62.394 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.395 +
  62.396 +	return result;
  62.397 +}
  62.398 +
  62.399 +static struct ia64_pal_retval
  62.400 +pal_fixed_addr(VCPU *vcpu) {
  62.401 +	struct ia64_pal_retval result;
  62.402 +
  62.403 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.404 +
  62.405 +	return result;
  62.406 +}
  62.407 +
  62.408 +static struct ia64_pal_retval
  62.409 +pal_freq_base(VCPU *vcpu) {
  62.410 +	struct ia64_pal_retval result;
  62.411 +	struct ia64_sal_retval isrv;
  62.412 +
  62.413 +	PAL_CALL(result,PAL_FREQ_BASE, 0, 0, 0);
  62.414 +	/*
  62.415 +	 * PAL_FREQ_BASE may not be implemented in some platforms,
  62.416 +	 * call SAL instead.
  62.417 +	 */
  62.418 +	if (result.v0 == 0) {
  62.419 +		SAL_CALL(isrv, SAL_FREQ_BASE, 
  62.420 +		         SAL_FREQ_BASE_PLATFORM, 0, 0, 0, 0, 0, 0);
  62.421 +		result.status = isrv.status;
  62.422 +		result.v0 = isrv.v0;
  62.423 +		result.v1 = result.v2 = 0;
  62.424 +	}
  62.425 +	return result;
  62.426 +}
  62.427 +
  62.428 +static struct ia64_pal_retval
  62.429 +pal_freq_ratios(VCPU *vcpu) {
  62.430 +	struct ia64_pal_retval result;
  62.431 +
  62.432 +	PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
  62.433 +	return result;
  62.434 +}
  62.435 +
  62.436 +static struct ia64_pal_retval
  62.437 +pal_halt_info(VCPU *vcpu) {
  62.438 +	struct ia64_pal_retval result;
  62.439 +
  62.440 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.441 +
  62.442 +	return result;
  62.443 +}
  62.444 +
  62.445 +static struct ia64_pal_retval
  62.446 +pal_logical_to_physica(VCPU *vcpu) {
  62.447 +	struct ia64_pal_retval result;
  62.448 +
  62.449 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.450 +
  62.451 +	return result;
  62.452 +}
  62.453 +
  62.454 +static struct ia64_pal_retval
  62.455 +pal_perf_mon_info(VCPU *vcpu) {
  62.456 +	struct ia64_pal_retval result;
  62.457 +
  62.458 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.459 +
  62.460 +	return result;
  62.461 +}
  62.462 +
  62.463 +static struct ia64_pal_retval
  62.464 +pal_proc_get_features(VCPU *vcpu) {
  62.465 +	struct ia64_pal_retval result;
  62.466 +
  62.467 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.468 +
  62.469 +	return result;
  62.470 +}
  62.471 +
  62.472 +static struct ia64_pal_retval
  62.473 +pal_ptce_info(VCPU *vcpu) {
  62.474 +	struct ia64_pal_retval result;
  62.475 +
  62.476 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.477 +
  62.478 +	return result;
  62.479 +}
  62.480 +
  62.481 +static struct ia64_pal_retval
  62.482 +pal_register_info(VCPU *vcpu) {
  62.483 +	struct ia64_pal_retval result;
  62.484 +
  62.485 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.486 +
  62.487 +	return result;
  62.488 +}
  62.489 +
  62.490 +static struct ia64_pal_retval
  62.491 +pal_rse_info(VCPU *vcpu) {
  62.492 +	struct ia64_pal_retval result;
  62.493 +
  62.494 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.495 +
  62.496 +	return result;
  62.497 +}
  62.498 +
  62.499 +static struct ia64_pal_retval
  62.500 +pal_test_info(VCPU *vcpu) {
  62.501 +	struct ia64_pal_retval result;
  62.502 +
  62.503 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.504 +
  62.505 +	return result;
  62.506 +}
  62.507 +
  62.508 +static struct ia64_pal_retval
  62.509 +pal_vm_summary(VCPU *vcpu) {
  62.510  	pal_vm_info_1_u_t vminfo1;
  62.511  	pal_vm_info_2_u_t vminfo2;	
  62.512  	struct ia64_pal_retval result;
  62.513  	
  62.514 -	PAL_CALL(result,PAL_VM_SUMMARY,0,0,0);
  62.515 -	if(!result.status){
  62.516 +	PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
  62.517 +	if (!result.status) {
  62.518  		vminfo1.pvi1_val = result.v0;
  62.519  		vminfo1.pal_vm_info_1_s.max_itr_entry = NITRS -1;
  62.520  		vminfo1.pal_vm_info_1_s.max_dtr_entry = NDTRS -1;
  62.521  		result.v0 = vminfo1.pvi1_val;
  62.522  		vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
  62.523 -		vminfo2.pal_vm_info_2_s.rid_size = current->domain->arch.rid_bits;
  62.524 +		vminfo2.pal_vm_info_2_s.rid_size =
  62.525 +		                             current->domain->arch.rid_bits;
  62.526  		result.v1 = vminfo2.pvi2_val;
  62.527  	} 
  62.528  	return result;
  62.529  }
  62.530  
  62.531  static struct ia64_pal_retval
  62.532 -pal_vm_info(VCPU *vcpu){
  62.533 +pal_vm_info(VCPU *vcpu) {
  62.534  	struct ia64_pal_retval result;
  62.535  
  62.536 -	result.status= -1; //unimplemented
  62.537 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.538 +
  62.539  	return result;
  62.540  }
  62.541  
  62.542  static struct ia64_pal_retval
  62.543 -pal_vm_page_size(VCPU *vcpu){
  62.544 +pal_vm_page_size(VCPU *vcpu) {
  62.545  	struct ia64_pal_retval result;
  62.546  
  62.547 -	result.status= -1; //unimplemented
  62.548 +	INIT_PAL_STATUS_UNIMPLEMENTED(result);
  62.549 +
  62.550  	return result;
  62.551  }
  62.552 +
  62.553  void
  62.554 -pal_emul( VCPU *vcpu) {
  62.555 +pal_emul(VCPU *vcpu) {
  62.556  	UINT64 gr28;
  62.557  	struct ia64_pal_retval result;
  62.558  
  62.559 -
  62.560  	vcpu_get_gr_nat(vcpu,28,&gr28);  //bank1
  62.561  
  62.562  	switch (gr28) {
  62.563  		case PAL_CACHE_FLUSH:
  62.564 -			result = pal_cache_flush (vcpu);
  62.565 +			result = pal_cache_flush(vcpu);
  62.566  			break;
  62.567  
  62.568  		case PAL_PREFETCH_VISIBILITY:
  62.569 -			result = pal_prefetch_visibility (vcpu);
  62.570 +			result = pal_prefetch_visibility(vcpu);
  62.571  			break;
  62.572  
  62.573  		case PAL_VM_TR_READ:
  62.574 -			result = pal_vm_tr_read (vcpu);
  62.575 +			result = pal_vm_tr_read(vcpu);
  62.576  			break;
  62.577  
  62.578  		case PAL_HALT:
  62.579 -			result = pal_halt (vcpu);
  62.580 +			result = pal_halt(vcpu);
  62.581  			break;
  62.582  
  62.583  		case PAL_HALT_LIGHT:
  62.584 -			result = pal_halt_light (vcpu);
  62.585 +			result = pal_halt_light(vcpu);
  62.586  			break;
  62.587  
  62.588  		case PAL_CACHE_READ:
  62.589 -			result = pal_cache_read (vcpu);
  62.590 +			result = pal_cache_read(vcpu);
  62.591  			break;
  62.592  
  62.593  		case PAL_CACHE_WRITE:
  62.594 -			result = pal_cache_write (vcpu);
  62.595 +			result = pal_cache_write(vcpu);
  62.596  			break;
  62.597  
  62.598  		case PAL_PLATFORM_ADDR:
  62.599 -			result = pal_platform_addr (vcpu);
  62.600 +			result = pal_platform_addr(vcpu);
  62.601  			break;
  62.602  
  62.603  		case PAL_FREQ_RATIOS:
  62.604 -			result = pal_freq_ratios (vcpu);
  62.605 +			result = pal_freq_ratios(vcpu);
  62.606  			break;
  62.607  
  62.608  		case PAL_FREQ_BASE:
  62.609 -			result = pal_freq_base (vcpu);
  62.610 +			result = pal_freq_base(vcpu);
  62.611  			break;
  62.612  
  62.613  		case PAL_BUS_GET_FEATURES :
  62.614 -			result = pal_bus_get_features (vcpu);
  62.615 +			result = pal_bus_get_features(vcpu);
  62.616  			break;
  62.617  
  62.618  		case PAL_CACHE_SUMMARY :
  62.619 -			result = pal_cache_summary (vcpu);
  62.620 +			result = pal_cache_summary(vcpu);
  62.621  			break;
  62.622  
  62.623  		case PAL_CACHE_INIT :
  62.624 @@ -457,17 +507,18 @@ pal_emul( VCPU *vcpu) {
  62.625  			break;
  62.626  
  62.627  		default:
  62.628 -			panic_domain(vcpu_regs(vcpu),"pal_emul(): guest call unsupported pal" );
  62.629 -  }
  62.630 -		set_pal_result (vcpu, result);
  62.631 +			panic_domain(vcpu_regs(vcpu),"pal_emul(): guest "
  62.632 +			             "call unsupported pal" );
  62.633 +	}
  62.634 +	set_pal_result(vcpu, result);
  62.635  }
  62.636  
  62.637  void
  62.638  sal_emul(VCPU *v) {
  62.639  	struct sal_ret_values result;
  62.640 -	result = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
  62.641 -	                      vcpu_get_gr(v,34),vcpu_get_gr(v,35),
  62.642 -	                      vcpu_get_gr(v,36),vcpu_get_gr(v,37),
  62.643 -	                      vcpu_get_gr(v,38),vcpu_get_gr(v,39));
  62.644 +	result = sal_emulator(vcpu_get_gr(v, 32), vcpu_get_gr(v, 33),
  62.645 +	                      vcpu_get_gr(v, 34), vcpu_get_gr(v, 35),
  62.646 +	                      vcpu_get_gr(v, 36), vcpu_get_gr(v, 37),
  62.647 +	                      vcpu_get_gr(v, 38), vcpu_get_gr(v, 39));
  62.648  	set_sal_result(v, result);	
  62.649  }
    63.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Thu Jul 27 17:44:14 2006 -0500
    63.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Fri Jul 28 10:51:38 2006 +0100
    63.3 @@ -103,6 +103,7 @@ static void vtm_timer_fn(void *data)
    63.4      vitv = VCPU(vcpu, itv);
    63.5      if ( !ITV_IRQ_MASK(vitv) ){
    63.6          vmx_vcpu_pend_interrupt(vcpu, vitv & 0xff);
    63.7 +        vcpu_unblock(vcpu);
    63.8      }
    63.9      vtm=&(vcpu->arch.arch_vmx.vtm);
   63.10      cur_itc = now_itc(vtm);
   63.11 @@ -290,7 +291,7 @@ static void update_vhpi(VCPU *vcpu, int 
   63.12          vhpi = 16;
   63.13      }
   63.14      else {
   63.15 -        vhpi = vec / 16;
   63.16 +        vhpi = vec >> 4;
   63.17      }
   63.18  
   63.19      VCPU(vcpu,vhpi) = vhpi;
   63.20 @@ -437,7 +438,7 @@ static int highest_inservice_irq(VCPU *v
   63.21   */
   63.22  static int is_higher_irq(int pending, int inservice)
   63.23  {
   63.24 -    return ( (pending >> 4) > (inservice>>4) || 
   63.25 +    return ( (pending > inservice) || 
   63.26                  ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
   63.27  }
   63.28  
   63.29 @@ -461,7 +462,6 @@ static int
   63.30  _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
   63.31  {
   63.32      tpr_t    vtpr;
   63.33 -    uint64_t    mmi;
   63.34      
   63.35      vtpr.val = VCPU(vcpu, tpr);
   63.36  
   63.37 @@ -475,9 +475,9 @@ static int
   63.38      if ( h_inservice == ExtINT_VECTOR ) {
   63.39          return IRQ_MASKED_BY_INSVC;
   63.40      }
   63.41 -    mmi = vtpr.mmi;
   63.42 +
   63.43      if ( h_pending == ExtINT_VECTOR ) {
   63.44 -        if ( mmi ) {
   63.45 +        if ( vtpr.mmi ) {
   63.46              // mask all external IRQ
   63.47              return IRQ_MASKED_BY_VTPR;
   63.48          }
   63.49 @@ -487,7 +487,7 @@ static int
   63.50      }
   63.51  
   63.52      if ( is_higher_irq(h_pending, h_inservice) ) {
   63.53 -        if ( !mmi && is_higher_class(h_pending, vtpr.mic) ) {
   63.54 +        if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) {
   63.55              return IRQ_NO_MASKED;
   63.56          }
   63.57          else {
   63.58 @@ -551,8 +551,7 @@ void vmx_vcpu_pend_batch_interrupt(VCPU 
   63.59   * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when 
   63.60   * the interrupt becomes unmasked, it gets injected.
   63.61   * RETURN:
   63.62 - *  TRUE:   Interrupt is injected.
   63.63 - *  FALSE:  Not injected but may be in VHPI when vac.a_int=1
   63.64 + *    the highest unmasked interrupt.
   63.65   *
   63.66   * Optimization: We defer setting the VHPI until the EOI time, if a higher 
   63.67   *               priority interrupt is in-service. The idea is to reduce the 
   63.68 @@ -562,23 +561,26 @@ int vmx_check_pending_irq(VCPU *vcpu)
   63.69  {
   63.70      uint64_t  spsr, mask;
   63.71      int     h_pending, h_inservice;
   63.72 -    int injected=0;
   63.73      uint64_t    isr;
   63.74      IA64_PSR    vpsr;
   63.75      REGS *regs=vcpu_regs(vcpu);
   63.76      local_irq_save(spsr);
   63.77      h_pending = highest_pending_irq(vcpu);
   63.78 -    if ( h_pending == NULL_VECTOR ) goto chk_irq_exit;
   63.79 +    if ( h_pending == NULL_VECTOR ) {
   63.80 +        h_pending = SPURIOUS_VECTOR;
   63.81 +        goto chk_irq_exit;
   63.82 +    }
   63.83      h_inservice = highest_inservice_irq(vcpu);
   63.84  
   63.85 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
   63.86 +    vpsr.val = VCPU(vcpu, vpsr);
   63.87      mask = irq_masked(vcpu, h_pending, h_inservice);
   63.88      if (  vpsr.i && IRQ_NO_MASKED == mask ) {
   63.89          isr = vpsr.val & IA64_PSR_RI;
   63.90          if ( !vpsr.ic )
   63.91              panic_domain(regs,"Interrupt when IC=0\n");
   63.92 +        if (VCPU(vcpu, vhpi))
   63.93 +            update_vhpi(vcpu, NULL_VECTOR);
   63.94          vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ
   63.95 -        injected = 1;
   63.96      }
   63.97      else if ( mask == IRQ_MASKED_BY_INSVC ) {
   63.98          // cann't inject VHPI
   63.99 @@ -591,7 +593,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
  63.100  
  63.101  chk_irq_exit:
  63.102      local_irq_restore(spsr);
  63.103 -    return injected;
  63.104 +    return h_pending;
  63.105  }
  63.106  
  63.107  /*
  63.108 @@ -613,6 +615,20 @@ void guest_write_eoi(VCPU *vcpu)
  63.109  //    vmx_check_pending_irq(vcpu);
  63.110  }
  63.111  
  63.112 +int is_unmasked_irq(VCPU *vcpu)
  63.113 +{
  63.114 +    int h_pending, h_inservice;
  63.115 +
  63.116 +    h_pending = highest_pending_irq(vcpu);
  63.117 +    h_inservice = highest_inservice_irq(vcpu);
  63.118 +    if ( h_pending == NULL_VECTOR || 
  63.119 +        irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) {
  63.120 +        return 0;
  63.121 +    }
  63.122 +    else
  63.123 +        return 1;
  63.124 +}
  63.125 +
  63.126  uint64_t guest_read_vivr(VCPU *vcpu)
  63.127  {
  63.128      int vec, h_inservice;
  63.129 @@ -629,7 +645,8 @@ uint64_t guest_read_vivr(VCPU *vcpu)
  63.130   
  63.131      VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
  63.132      VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
  63.133 -    update_vhpi(vcpu, NULL_VECTOR);     // clear VHPI till EOI or IRR write
  63.134 +    if (VCPU(vcpu, vhpi))
  63.135 +        update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
  63.136      local_irq_restore(spsr);
  63.137      return (uint64_t)vec;
  63.138  }
  63.139 @@ -639,7 +656,7 @@ static void generate_exirq(VCPU *vcpu)
  63.140      IA64_PSR    vpsr;
  63.141      uint64_t    isr;
  63.142      REGS *regs=vcpu_regs(vcpu);
  63.143 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
  63.144 +    vpsr.val = VCPU(vcpu, vpsr);
  63.145      update_vhpi(vcpu, NULL_VECTOR);
  63.146      isr = vpsr.val & IA64_PSR_RI;
  63.147      if ( !vpsr.ic )
  63.148 @@ -653,7 +670,7 @@ void vhpi_detection(VCPU *vcpu)
  63.149      tpr_t       vtpr;
  63.150      IA64_PSR    vpsr;
  63.151      
  63.152 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
  63.153 +    vpsr.val = VCPU(vcpu, vpsr);
  63.154      vtpr.val = VCPU(vcpu, tpr);
  63.155  
  63.156      threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
    64.1 --- a/xen/arch/ia64/vmx/vmmu.c	Thu Jul 27 17:44:14 2006 -0500
    64.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Fri Jul 28 10:51:38 2006 +0100
    64.3 @@ -268,7 +268,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va
    64.4      PTA   vpta;
    64.5      IA64_PSR  vpsr; 
    64.6  
    64.7 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    64.8 +    vpsr.val = VCPU(vcpu, vpsr);
    64.9      vcpu_get_rr(vcpu, vadr, &vrr.rrval);
   64.10      vmx_vcpu_get_pta(vcpu,&vpta.val);
   64.11  
   64.12 @@ -290,6 +290,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va
   64.13  
   64.14  int unimplemented_gva(VCPU *vcpu,u64 vadr)
   64.15  {
   64.16 +#if 0
   64.17      int bit=vcpu->domain->arch.imp_va_msb;
   64.18      u64 ladr =(vadr<<3)>>(3+bit);
   64.19      if(!ladr||ladr==(1U<<(61-bit))-1){
   64.20 @@ -297,6 +298,9 @@ int unimplemented_gva(VCPU *vcpu,u64 vad
   64.21      }else{
   64.22          return 1;
   64.23      }
   64.24 +#else
   64.25 +    return 0;
   64.26 +#endif
   64.27  }
   64.28  
   64.29  
   64.30 @@ -618,7 +622,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
   64.31      visr.val=0;
   64.32      visr.ei=pt_isr.ei;
   64.33      visr.ir=pt_isr.ir;
   64.34 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
   64.35 +    vpsr.val = VCPU(vcpu, vpsr);
   64.36      if(vpsr.ic==0){
   64.37          visr.ni=1;
   64.38      }
    65.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Thu Jul 27 17:44:14 2006 -0500
    65.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Fri Jul 28 10:51:38 2006 +0100
    65.3 @@ -163,24 +163,39 @@ END(ia64_leave_nested)
    65.4  
    65.5  
    65.6  
    65.7 -GLOBAL_ENTRY(ia64_leave_hypervisor)
    65.8 +GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
    65.9      PT_REGS_UNWIND_INFO(0)
   65.10      /*
   65.11       * work.need_resched etc. mustn't get changed by this CPU before it returns to
   65.12      ;;
   65.13       * user- or fsys-mode, hence we disable interrupts early on:
   65.14       */
   65.15 +    adds r2 = PT(R4)+16,r12
   65.16 +    adds r3 = PT(R5)+16,r12
   65.17 +    adds r8 = PT(EML_UNAT)+16,r12
   65.18 +    ;;
   65.19 +    ld8 r8 = [r8]
   65.20 +    ;;
   65.21 +    mov ar.unat=r8
   65.22 +    ;;
   65.23 +    ld8.fill r4=[r2],16    //load r4
   65.24 +    ld8.fill r5=[r3],16    //load r5
   65.25 +    ;;
   65.26 +    ld8.fill r6=[r2]    //load r6
   65.27 +    ld8.fill r7=[r3]    //load r7
   65.28 +    ;;
   65.29 +END(ia64_leave_hypervisor_prepare)
   65.30 +//fall through
   65.31 +GLOBAL_ENTRY(ia64_leave_hypervisor)
   65.32 +    PT_REGS_UNWIND_INFO(0)
   65.33      rsm psr.i
   65.34      ;;
   65.35      alloc loc0=ar.pfs,0,1,1,0
   65.36 +    ;;
   65.37      adds out0=16,r12
   65.38 -    adds r7 = PT(EML_UNAT)+16,r12
   65.39 -    ;;
   65.40 -    ld8 r7 = [r7]
   65.41      br.call.sptk.many b0=leave_hypervisor_tail
   65.42      ;;
   65.43      mov ar.pfs=loc0
   65.44 -    mov ar.unat=r7
   65.45      adds r20=PT(PR)+16,r12
   65.46      ;;
   65.47      lfetch [r20],PT(CR_IPSR)-PT(PR)
   65.48 @@ -245,12 +260,6 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
   65.49      ldf.fill f10=[r2],32
   65.50      ldf.fill f11=[r3],24
   65.51      ;;
   65.52 -    ld8.fill r4=[r2],16    //load r4
   65.53 -    ld8.fill r5=[r3],16    //load r5
   65.54 -    ;;
   65.55 -    ld8.fill r6=[r2]    //load r6
   65.56 -    ld8.fill r7=[r3]    //load r7
   65.57 -    ;;
   65.58      srlz.i          // ensure interruption collection is off
   65.59      ;;
   65.60      bsw.0
   65.61 @@ -283,8 +292,8 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
   65.62      ld8 r19=[r16],PT(R3)-PT(AR_FPSR)    //load ar_fpsr
   65.63      ld8.fill r2=[r17],PT(AR_CCV)-PT(R2)    //load r2
   65.64      ;;
   65.65 -    ld8.fill r3=[r16]    //load r3
   65.66 -    ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV)           //load ar_ccv
   65.67 +    ld8.fill r3=[r16]	//load r3
   65.68 +    ld8 r18=[r17]	//load ar_ccv
   65.69      ;;
   65.70      mov ar.fpsr=r19
   65.71      mov ar.ccv=r18
   65.72 @@ -348,7 +357,6 @@ vmx_rse_clear_invalid:
   65.73      ;;
   65.74      mov ar.bspstore=r24
   65.75      ;;
   65.76 -    ld8 r24=[r17]       //load rfi_pfs
   65.77      mov ar.unat=r28
   65.78      mov ar.rnat=r25
   65.79      mov ar.rsc=r26
   65.80 @@ -356,10 +364,6 @@ vmx_rse_clear_invalid:
   65.81      mov cr.ipsr=r31
   65.82      mov cr.iip=r30
   65.83      mov cr.ifs=r29
   65.84 -    cmp.ne p6,p0=r24,r0
   65.85 -(p6)br.sptk vmx_dorfirfi
   65.86 -    ;;
   65.87 -vmx_dorfirfi_back:
   65.88      mov ar.pfs=r27
   65.89      adds r18=IA64_VPD_BASE_OFFSET,r21
   65.90      ;;
   65.91 @@ -370,20 +374,19 @@ vmx_dorfirfi_back:
   65.92      adds r19=VPD(VPSR),r18
   65.93      ;;
   65.94      ld8 r19=[r19]        //vpsr
   65.95 -//vsa_sync_write_start
   65.96      movl r20=__vsa_base
   65.97      ;;
   65.98 +//vsa_sync_write_start
   65.99      ld8 r20=[r20]       // read entry point
  65.100      mov r25=r18
  65.101      ;;
  65.102 +    movl r24=ia64_vmm_entry  // calculate return address
  65.103      add r16=PAL_VPS_SYNC_WRITE,r20
  65.104 -    movl r24=switch_rr7  // calculate return address
  65.105      ;;
  65.106      mov b0=r16
  65.107      br.cond.sptk b0         // call the service
  65.108      ;;
  65.109  END(ia64_leave_hypervisor)
  65.110 -switch_rr7:
  65.111  // fall through
  65.112  GLOBAL_ENTRY(ia64_vmm_entry)
  65.113  /*
  65.114 @@ -416,23 +419,6 @@ ia64_vmm_entry_out:
  65.115      br.cond.sptk b0             // call pal service
  65.116  END(ia64_vmm_entry)
  65.117  
  65.118 -//r24 rfi_pfs
  65.119 -//r17 address of rfi_pfs
  65.120 -GLOBAL_ENTRY(vmx_dorfirfi)
  65.121 -    mov r16=ar.ec
  65.122 -    movl r20 = vmx_dorfirfi_back
  65.123 -	;;
  65.124 -// clean rfi_pfs
  65.125 -    st8 [r17]=r0
  65.126 -    mov b0=r20
  65.127 -// pfs.pec=ar.ec
  65.128 -    dep r24 = r16, r24, 52, 6
  65.129 -    ;;
  65.130 -    mov ar.pfs=r24
  65.131 -	;;
  65.132 -    br.ret.sptk b0
  65.133 -	;;
  65.134 -END(vmx_dorfirfi)
  65.135  
  65.136  #ifdef XEN_DBL_MAPPING  /* will be removed */
  65.137  
    66.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Jul 27 17:44:14 2006 -0500
    66.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Fri Jul 28 10:51:38 2006 +0100
    66.3 @@ -156,6 +156,7 @@ static vpd_t *alloc_vpd(void)
    66.4  	int i;
    66.5  	cpuid3_t cpuid3;
    66.6  	vpd_t *vpd;
    66.7 +	mapped_regs_t *mregs;
    66.8  
    66.9  	vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
   66.10  	if (!vpd) {
   66.11 @@ -165,23 +166,26 @@ static vpd_t *alloc_vpd(void)
   66.12  
   66.13  	printk("vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t));
   66.14  	memset(vpd, 0, VPD_SIZE);
   66.15 +	mregs = &vpd->vpd_low;
   66.16 +
   66.17  	/* CPUID init */
   66.18  	for (i = 0; i < 5; i++)
   66.19 -		vpd->vcpuid[i] = ia64_get_cpuid(i);
   66.20 +		mregs->vcpuid[i] = ia64_get_cpuid(i);
   66.21  
   66.22  	/* Limit the CPUID number to 5 */
   66.23 -	cpuid3.value = vpd->vcpuid[3];
   66.24 +	cpuid3.value = mregs->vcpuid[3];
   66.25  	cpuid3.number = 4;	/* 5 - 1 */
   66.26 -	vpd->vcpuid[3] = cpuid3.value;
   66.27 +	mregs->vcpuid[3] = cpuid3.value;
   66.28  
   66.29 -    vpd->vac.a_from_int_cr = 1;
   66.30 -    vpd->vac.a_to_int_cr = 1;
   66.31 -    vpd->vac.a_from_psr = 1;
   66.32 -    vpd->vac.a_from_cpuid = 1;
   66.33 -    vpd->vac.a_cover = 1;
   66.34 -    vpd->vac.a_bsw = 1;
   66.35 -
   66.36 -	vpd->vdc.d_vmsw = 1;
   66.37 +	mregs->vac.a_from_int_cr = 1;
   66.38 +	mregs->vac.a_to_int_cr = 1;
   66.39 +	mregs->vac.a_from_psr = 1;
   66.40 +	mregs->vac.a_from_cpuid = 1;
   66.41 +	mregs->vac.a_cover = 1;
   66.42 +	mregs->vac.a_bsw = 1;
   66.43 +	mregs->vac.a_int = 1;
   66.44 +       
   66.45 +	mregs->vdc.d_vmsw = 1;
   66.46  
   66.47  	return vpd;
   66.48  }
   66.49 @@ -201,7 +205,7 @@ static void
   66.50  vmx_create_vp(struct vcpu *v)
   66.51  {
   66.52  	u64 ret;
   66.53 -	vpd_t *vpd = v->arch.privregs;
   66.54 +	vpd_t *vpd = (vpd_t *)v->arch.privregs;
   66.55  	u64 ivt_base;
   66.56      extern char vmx_ia64_ivt;
   66.57  	/* ia64_ivt is function pointer, so need this tranlation */
   66.58 @@ -271,13 +275,11 @@ vmx_final_setup_guest(struct vcpu *v)
   66.59  {
   66.60  	vpd_t *vpd;
   66.61  
   66.62 -	free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
   66.63 -
   66.64  	vpd = alloc_vpd();
   66.65  	ASSERT(vpd);
   66.66  
   66.67 -	v->arch.privregs = vpd;
   66.68 -	vpd->virt_env_vaddr = vm_buffer;
   66.69 +	v->arch.privregs = (mapped_regs_t *)vpd;
   66.70 +	vpd->vpd_low.virt_env_vaddr = vm_buffer;
   66.71  
   66.72  	/* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
   66.73  	 * to this solution. Maybe it can be deferred until we know created
   66.74 @@ -298,6 +300,8 @@ vmx_final_setup_guest(struct vcpu *v)
   66.75  
   66.76  	/* One more step to enable interrupt assist */
   66.77  	set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
   66.78 +	/* Set up guest 's indicator for VTi domain*/
   66.79 +	set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
   66.80  }
   66.81  
   66.82  void
   66.83 @@ -317,7 +321,7 @@ typedef struct io_range {
   66.84  	unsigned long type;
   66.85  } io_range_t;
   66.86  
   66.87 -io_range_t io_ranges[] = {
   66.88 +static const io_range_t io_ranges[] = {
   66.89  	{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
   66.90  	{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
   66.91  	{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
   66.92 @@ -325,24 +329,22 @@ io_range_t io_ranges[] = {
   66.93  	{PIB_START, PIB_SIZE, GPFN_PIB},
   66.94  };
   66.95  
   66.96 +/* Reseve 1 page for shared I/O and 1 page for xenstore.  */
   66.97  #define VMX_SYS_PAGES	(2 + (GFW_SIZE >> PAGE_SHIFT))
   66.98  #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
   66.99  
  66.100 -int vmx_build_physmap_table(struct domain *d)
  66.101 +static void vmx_build_physmap_table(struct domain *d)
  66.102  {
  66.103  	unsigned long i, j, start, tmp, end, mfn;
  66.104 -	struct vcpu *v = d->vcpu[0];
  66.105  	struct list_head *list_ent = d->page_list.next;
  66.106  
  66.107 -	ASSERT(!d->arch.physmap_built);
  66.108 -	ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
  66.109  	ASSERT(d->max_pages == d->tot_pages);
  66.110  
  66.111  	/* Mark I/O ranges */
  66.112  	for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
  66.113  	    for (j = io_ranges[i].start;
  66.114 -		 j < io_ranges[i].start + io_ranges[i].size;
  66.115 -		 j += PAGE_SIZE)
  66.116 +		j < io_ranges[i].start + io_ranges[i].size;
  66.117 +		j += PAGE_SIZE)
  66.118  		__assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable);
  66.119  	}
  66.120  
  66.121 @@ -362,21 +364,19 @@ int vmx_build_physmap_table(struct domai
  66.122  	if (unlikely(end > MMIO_START)) {
  66.123  	    start = 4 * MEM_G;
  66.124  	    end = start + (end - 3 * MEM_G);
  66.125 -	    for (i = start; (i < end) &&
  66.126 -		 (list_ent != &d->page_list); i += PAGE_SIZE) {
  66.127 -		mfn = page_to_mfn(list_entry(
  66.128 -		    list_ent, struct page_info, list));
  66.129 +	    for (i = start;
  66.130 +	         (i < end) && (list_ent != &d->page_list); i += PAGE_SIZE) {
  66.131 +		mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
  66.132  		assign_domain_page(d, i, mfn << PAGE_SHIFT);
  66.133  		list_ent = mfn_to_page(mfn)->list.next;
  66.134  	    }
  66.135  	    ASSERT(list_ent != &d->page_list);
  66.136 -        }
  66.137 +	}
  66.138  	 
  66.139  	/* Map guest firmware */
  66.140  	for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
  66.141  		(list_ent != &d->page_list); i += PAGE_SIZE) {
  66.142 -	    mfn = page_to_mfn(list_entry(
  66.143 -		list_ent, struct page_info, list));
  66.144 +	    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
  66.145  	    assign_domain_page(d, i, mfn << PAGE_SHIFT);
  66.146  	    list_ent = mfn_to_page(mfn)->list.next;
  66.147  	}
  66.148 @@ -393,24 +393,21 @@ int vmx_build_physmap_table(struct domai
  66.149  	list_ent = mfn_to_page(mfn)->list.next;
  66.150  	ASSERT(list_ent == &d->page_list);
  66.151  
  66.152 -	d->arch.max_pfn = end >> PAGE_SHIFT;
  66.153 -	d->arch.physmap_built = 1;
  66.154 -	set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
  66.155 -	return 0;
  66.156  }
  66.157  
  66.158 -void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
  66.159 +void vmx_setup_platform(struct domain *d)
  66.160  {
  66.161  	ASSERT(d != dom0); /* only for non-privileged vti domain */
  66.162  
  66.163 -	if (!d->arch.physmap_built)
  66.164 -	    vmx_build_physmap_table(d);
  66.165 +	vmx_build_physmap_table(d);
  66.166  
  66.167  	d->arch.vmx_platform.shared_page_va =
  66.168  		(unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
  66.169  	/* TEMP */
  66.170  	d->arch.vmx_platform.pib_base = 0xfee00000UL;
  66.171  
  66.172 +	d->arch.sal_data = xmalloc(struct xen_sal_data);
  66.173 +
  66.174  	/* Only open one port for I/O and interrupt emulation */
  66.175  	memset(&d->shared_info->evtchn_mask[0], 0xff,
  66.176  	    sizeof(d->shared_info->evtchn_mask));
  66.177 @@ -430,8 +427,7 @@ void vmx_do_launch(struct vcpu *v)
  66.178  	    domain_crash_synchronous();
  66.179  	}
  66.180  
  66.181 -	clear_bit(iopacket_port(v),
  66.182 -		&v->domain->shared_info->evtchn_mask[0]);
  66.183 +	clear_bit(iopacket_port(v), &v->domain->shared_info->evtchn_mask[0]);
  66.184  
  66.185  	vmx_load_all_rr(v);
  66.186  }
    67.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Thu Jul 27 17:44:14 2006 -0500
    67.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Fri Jul 28 10:51:38 2006 +0100
    67.3 @@ -117,7 +117,7 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
    67.4  {
    67.5      IA64_PSR vpsr;
    67.6      u64 value;
    67.7 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    67.8 +    vpsr.val = VCPU(vcpu, vpsr);
    67.9      /* Vol2, Table 8-1 */
   67.10      if ( vpsr.ic ) {
   67.11          if ( set_ifa){
    68.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Thu Jul 27 17:44:14 2006 -0500
    68.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Fri Jul 28 10:51:38 2006 +0100
    68.3 @@ -58,6 +58,7 @@
    68.4  #include <asm/thread_info.h>
    68.5  #include <asm/unistd.h>
    68.6  #include <asm/vhpt.h>
    68.7 +#include <asm/virt_event.h>
    68.8  
    68.9  #ifdef VTI_DEBUG
   68.10    /*
   68.11 @@ -200,7 +201,7 @@ vmx_itlb_loop:
   68.12      ;;
   68.13  vmx_itlb_out:
   68.14      mov r19 = 1
   68.15 -    br.sptk vmx_dispatch_tlb_miss
   68.16 +    br.sptk vmx_dispatch_itlb_miss
   68.17      VMX_FAULT(1);
   68.18  END(vmx_itlb_miss)
   68.19  
   68.20 @@ -274,7 +275,7 @@ vmx_dtlb_loop:
   68.21      ;;
   68.22  vmx_dtlb_out:
   68.23      mov r19 = 2
   68.24 -    br.sptk vmx_dispatch_tlb_miss
   68.25 +    br.sptk vmx_dispatch_dtlb_miss
   68.26      VMX_FAULT(2);
   68.27  END(vmx_dtlb_miss)
   68.28  
   68.29 @@ -787,6 +788,22 @@ ENTRY(vmx_virtualization_fault)
   68.30      st8 [r16] = r24
   68.31      st8 [r17] = r25
   68.32      ;;
   68.33 +    cmp.ne p6,p0=EVENT_RFI, r24
   68.34 +    (p6) br.sptk vmx_dispatch_virtualization_fault
   68.35 +    ;;
   68.36 +    adds r18=IA64_VPD_BASE_OFFSET,r21
   68.37 +    ;;
   68.38 +    ld8 r18=[r18]
   68.39 +    ;;
   68.40 +    adds r18=IA64_VPD_VIFS_OFFSET,r18
   68.41 +    ;;
   68.42 +    ld8 r18=[r18]
   68.43 +    ;;
   68.44 +    tbit.z p6,p0=r18,63
   68.45 +    (p6) br.sptk vmx_dispatch_virtualization_fault
   68.46 +    ;;
   68.47 +    //if vifs.v=1 desert current register frame
   68.48 +    alloc r18=ar.pfs,0,0,0,0
   68.49      br.sptk vmx_dispatch_virtualization_fault
   68.50  END(vmx_virtualization_fault)
   68.51  
   68.52 @@ -1024,9 +1041,10 @@ ENTRY(vmx_dispatch_virtualization_fault)
   68.53      srlz.i                  // guarantee that interruption collection is on
   68.54      ;;
   68.55      (p15) ssm psr.i               // restore psr.i
   68.56 -    movl r14=ia64_leave_hypervisor
   68.57 +    movl r14=ia64_leave_hypervisor_prepare
   68.58      ;;
   68.59      VMX_SAVE_REST
   68.60 +    VMX_SAVE_EXTRA
   68.61      mov rp=r14
   68.62      ;;
   68.63      adds out1=16,sp         //regs
   68.64 @@ -1053,7 +1071,7 @@ ENTRY(vmx_dispatch_vexirq)
   68.65      br.call.sptk.many b6=vmx_vexirq
   68.66  END(vmx_dispatch_vexirq)
   68.67  
   68.68 -ENTRY(vmx_dispatch_tlb_miss)
   68.69 +ENTRY(vmx_dispatch_itlb_miss)
   68.70      VMX_SAVE_MIN_WITH_COVER_R19
   68.71      alloc r14=ar.pfs,0,0,3,0
   68.72      mov out0=cr.ifa
   68.73 @@ -1072,8 +1090,29 @@ ENTRY(vmx_dispatch_tlb_miss)
   68.74      ;;
   68.75      adds out2=16,r12
   68.76      br.call.sptk.many b6=vmx_hpw_miss
   68.77 -END(vmx_dispatch_tlb_miss)
   68.78 +END(vmx_dispatch_itlb_miss)
   68.79  
   68.80 +ENTRY(vmx_dispatch_dtlb_miss)
   68.81 +    VMX_SAVE_MIN_WITH_COVER_R19
   68.82 +    alloc r14=ar.pfs,0,0,3,0
   68.83 +    mov out0=cr.ifa
   68.84 +    mov out1=r15
   68.85 +    adds r3=8,r2                // set up second base pointer
   68.86 +    ;;
   68.87 +    ssm psr.ic
   68.88 +    ;;
   68.89 +    srlz.i                  // guarantee that interruption collection is on
   68.90 +    ;;
   68.91 +    (p15) ssm psr.i               // restore psr.i
   68.92 +    movl r14=ia64_leave_hypervisor_prepare
   68.93 +    ;;
   68.94 +    VMX_SAVE_REST
   68.95 +    VMX_SAVE_EXTRA
   68.96 +    mov rp=r14
   68.97 +    ;;
   68.98 +    adds out2=16,r12
   68.99 +    br.call.sptk.many b6=vmx_hpw_miss
  68.100 +END(vmx_dispatch_dtlb_miss)
  68.101  
  68.102  ENTRY(vmx_dispatch_break_fault)
  68.103      VMX_SAVE_MIN_WITH_COVER_R19
    69.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h	Thu Jul 27 17:44:14 2006 -0500
    69.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h	Fri Jul 28 10:51:38 2006 +0100
    69.3 @@ -57,8 +57,8 @@
    69.4      ;;
    69.5  
    69.6  
    69.7 -#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
    69.8 -    /* begin to call pal vps sync_read and cleanup psr.pl */     \
    69.9 +#define PAL_VSA_SYNC_READ           \
   69.10 +    /* begin to call pal vps sync_read */     \
   69.11      add r25=IA64_VPD_BASE_OFFSET, r21;       \
   69.12      movl r20=__vsa_base;     \
   69.13      ;;          \
   69.14 @@ -68,31 +68,17 @@
   69.15      add r20=PAL_VPS_SYNC_READ,r20;  \
   69.16      ;;  \
   69.17  { .mii;  \
   69.18 -    add r22=VPD(VPSR),r25;   \
   69.19 +    nop 0x0;   \
   69.20      mov r24=ip;        \
   69.21      mov b0=r20;     \
   69.22      ;;      \
   69.23  };           \
   69.24  { .mmb;      \
   69.25      add r24 = 0x20, r24;    \
   69.26 -    mov r16 = cr.ipsr;  /* Temp workaround since psr.ic is off */ \
   69.27 +    nop 0x0;   	 \
   69.28      br.cond.sptk b0;        /*  call the service */ \
   69.29      ;;              \
   69.30  };           \
   69.31 -    ld8 r17=[r22];   \
   69.32 -    /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */    \
   69.33 -    extr.u r30=r16, IA64_PSR_CPL0_BIT, 2;   \
   69.34 -    ;;      \
   69.35 -    dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2;   \
   69.36 -    extr.u r30=r16, IA64_PSR_BE_BIT, 5;   \
   69.37 -    ;;      \
   69.38 -    dep r17=r30, r17, IA64_PSR_BE_BIT, 5;   \
   69.39 -    extr.u r30=r16, IA64_PSR_RI_BIT, 2;   \
   69.40 -    ;;      \
   69.41 -    dep r17=r30, r17, IA64_PSR_RI_BIT, 2;   \
   69.42 -    ;;      \
   69.43 -    st8 [r22]=r17;      \
   69.44 -    ;;
   69.45  
   69.46  
   69.47  
   69.48 @@ -219,7 +205,7 @@
   69.49      movl r11=FPSR_DEFAULT;   /* L-unit */                           \
   69.50      movl r1=__gp;       /* establish kernel global pointer */               \
   69.51      ;;                                          \
   69.52 -    PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
   69.53 +    PAL_VSA_SYNC_READ           \
   69.54      VMX_MINSTATE_END_SAVE_MIN
   69.55  
   69.56  /*
   69.57 @@ -274,24 +260,27 @@
   69.58      stf.spill [r3]=f9,32;           \
   69.59      ;;                  \
   69.60      stf.spill [r2]=f10,32;         \
   69.61 -    stf.spill [r3]=f11,24;         \
   69.62 +    stf.spill [r3]=f11;         \
   69.63 +    adds r25=PT(B7)-PT(F11),r3;     \
   69.64      ;;                  \
   69.65 +    st8 [r24]=r18,16;       /* b6 */    \
   69.66 +    st8 [r25]=r19,16;       /* b7 */    \
   69.67 +    adds r3=PT(R5)-PT(F11),r3;     \
   69.68 +    ;;                  \
   69.69 +    st8 [r24]=r9;           /* ar.csd */    \
   69.70 +    st8 [r25]=r10;          /* ar.ssd */    \
   69.71 +    ;;
   69.72 +
   69.73 +#define VMX_SAVE_EXTRA               \
   69.74  .mem.offset 0,0; st8.spill [r2]=r4,16;     \
   69.75  .mem.offset 8,0; st8.spill [r3]=r5,16;     \
   69.76      ;;                  \
   69.77  .mem.offset 0,0; st8.spill [r2]=r6,16;      \
   69.78  .mem.offset 8,0; st8.spill [r3]=r7;      \
   69.79 -    adds r25=PT(B7)-PT(R7),r3;     \
   69.80 -    ;;                  \
   69.81 -    st8 [r24]=r18,16;       /* b6 */    \
   69.82 -    st8 [r25]=r19,16;       /* b7 */    \
   69.83 -    ;;                  \
   69.84 -    st8 [r24]=r9;           /* ar.csd */    \
   69.85 -    mov r26=ar.unat;            \
   69.86 -    ;;      \
   69.87 -    st8 [r25]=r10;          /* ar.ssd */    \
   69.88 +    ;;			\
   69.89 +    mov r26=ar.unat;    \
   69.90 +    ;;			\
   69.91      st8 [r2]=r26;       /* eml_unat */ \
   69.92 -    ;;
   69.93  
   69.94  #define VMX_SAVE_MIN_WITH_COVER   VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
   69.95  #define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
    70.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Jul 27 17:44:14 2006 -0500
    70.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Jul 28 10:51:38 2006 +0100
    70.3 @@ -110,10 +110,8 @@ void
    70.4  physical_tlb_miss(VCPU *vcpu, u64 vadr)
    70.5  {
    70.6      u64 pte;
    70.7 -    IA64_PSR vpsr;
    70.8 -    vpsr.val=vmx_vcpu_get_psr(vcpu);
    70.9      pte =  vadr& _PAGE_PPN_MASK;
   70.10 -    pte = pte|(vpsr.cpl<<7)|PHY_PAGE_WB;
   70.11 +    pte = pte | PHY_PAGE_WB;
   70.12      thash_purge_and_insert(vcpu, pte, (PAGE_SHIFT<<2), vadr);
   70.13      return;
   70.14  }
   70.15 @@ -204,23 +202,7 @@ vmx_load_all_rr(VCPU *vcpu)
   70.16  	ia64_srlz_i();
   70.17  }
   70.18  
   70.19 -void
   70.20 -vmx_load_rr7_and_pta(VCPU *vcpu)
   70.21 -{
   70.22 -	unsigned long psr;
   70.23 -
   70.24 -	local_irq_save(psr);
   70.25  
   70.26 -	vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
   70.27 -			(void *)vcpu->domain->shared_info,
   70.28 -			(void *)vcpu->arch.privregs,
   70.29 -			(void *)vcpu->arch.vhpt.hash, pal_vaddr );
   70.30 -	ia64_set_pta(vcpu->arch.arch_vmx.mpta);
   70.31 -
   70.32 -	ia64_srlz_d();
   70.33 -	local_irq_restore(psr);
   70.34 -	ia64_srlz_i();
   70.35 -}
   70.36  
   70.37  void
   70.38  switch_to_physical_rid(VCPU *vcpu)
    71.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Thu Jul 27 17:44:14 2006 -0500
    71.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Fri Jul 28 10:51:38 2006 +0100
    71.3 @@ -35,7 +35,7 @@
    71.4  #include <asm/io.h>
    71.5  #include <asm/processor.h>
    71.6  #include <asm/desc.h>
    71.7 -//#include <asm/ldt.h>
    71.8 +#include <asm/vlsapic.h>
    71.9  #include <xen/irq.h>
   71.10  #include <xen/event.h>
   71.11  #include <asm/regionreg.h>
   71.12 @@ -82,7 +82,7 @@ void vmx_reflect_interruption(UINT64 ifa
   71.13       UINT64 vector,REGS *regs)
   71.14  {
   71.15      VCPU *vcpu = current;
   71.16 -    UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
   71.17 +    UINT64 vpsr = VCPU(vcpu, vpsr);
   71.18      vector=vec2off[vector];
   71.19      if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
   71.20          panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
   71.21 @@ -156,7 +156,7 @@ void save_banked_regs_to_vpd(VCPU *v, RE
   71.22      IA64_PSR vpsr;
   71.23      src=&regs->r16;
   71.24      sunat=&regs->eml_unat;
   71.25 -    vpsr.val = vmx_vcpu_get_psr(v);
   71.26 +    vpsr.val = VCPU(v, vpsr);
   71.27      if(vpsr.bn){
   71.28          dst = &VCPU(v, vgr[0]);
   71.29          dunat =&VCPU(v, vnat);
   71.30 @@ -188,14 +188,13 @@ void leave_hypervisor_tail(struct pt_reg
   71.31      struct vcpu *v = current;
   71.32      // FIXME: Will this work properly if doing an RFI???
   71.33      if (!is_idle_domain(d) ) {	// always comes from guest
   71.34 -        extern void vmx_dorfirfi(void);
   71.35 -        struct pt_regs *user_regs = vcpu_regs(current);
   71.36 -        if (local_softirq_pending())
   71.37 -            do_softirq();
   71.38 +//        struct pt_regs *user_regs = vcpu_regs(current);
   71.39 +        local_irq_enable();
   71.40 +        do_softirq();
   71.41          local_irq_disable();
   71.42  
   71.43 -        if (user_regs != regs)
   71.44 -            printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
   71.45 +//        if (user_regs != regs)
   71.46 +//            printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
   71.47  
   71.48          /* VMX Domain N has other interrupt source, saying DM  */
   71.49          if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
   71.50 @@ -216,12 +215,18 @@ void leave_hypervisor_tail(struct pt_reg
   71.51  
   71.52          if ( v->arch.irq_new_pending ) {
   71.53              v->arch.irq_new_pending = 0;
   71.54 +            v->arch.irq_new_condition = 0;
   71.55              vmx_check_pending_irq(v);
   71.56 +            return;
   71.57          }
   71.58 -//        if (VCPU(v,vac).a_bsw){
   71.59 -//            save_banked_regs_to_vpd(v,regs);
   71.60 -//        }
   71.61 -
   71.62 +        if (VCPU(v, vac).a_int) {
   71.63 +            vhpi_detection(v);
   71.64 +            return;
   71.65 +        }
   71.66 +        if (v->arch.irq_new_condition) {
   71.67 +            v->arch.irq_new_condition = 0;
   71.68 +            vhpi_detection(v);
   71.69 +        }
   71.70      }
   71.71  }
   71.72  
   71.73 @@ -248,7 +253,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
   71.74      check_vtlb_sanity(vtlb);
   71.75      dump_vtlb(vtlb);
   71.76  #endif
   71.77 -    vpsr.val = vmx_vcpu_get_psr(v);
   71.78 +    vpsr.val = VCPU(v, vpsr);
   71.79      misr.val=VMX(v,cr_isr);
   71.80  
   71.81      if(is_physical_mode(v)&&(!(vadr<<1>>62))){
    72.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Thu Jul 27 17:44:14 2006 -0500
    72.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Fri Jul 28 10:51:38 2006 +0100
    72.3 @@ -58,7 +58,7 @@ void vmx_wait_io(void)
    72.4      if (d->shared_info->evtchn_pending[port / BITS_PER_LONG])
    72.5          set_bit(port / BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
    72.6  
    72.7 -    if (&v->vcpu_info->evtchn_pending_sel)
    72.8 +    if (v->vcpu_info->evtchn_pending_sel)
    72.9          set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   72.10  }
   72.11  
    73.1 --- a/xen/arch/ia64/vmx/vmx_utility.c	Thu Jul 27 17:44:14 2006 -0500
    73.2 +++ b/xen/arch/ia64/vmx/vmx_utility.c	Fri Jul 28 10:51:38 2006 +0100
    73.3 @@ -381,7 +381,7 @@ set_isr_ei_ni (VCPU *vcpu)
    73.4  
    73.5      visr.val = 0;
    73.6  
    73.7 -    vpsr.val = vmx_vcpu_get_psr (vcpu);
    73.8 +    vpsr.val = VCPU(vcpu, vpsr);
    73.9  
   73.10      if (!vpsr.ic == 1 ) {
   73.11          /* Set ISR.ni */
    74.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Jul 27 17:44:14 2006 -0500
    74.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Fri Jul 28 10:51:38 2006 +0100
    74.3 @@ -67,6 +67,8 @@
    74.4  #include <asm/vmx_pal_vsa.h>
    74.5  #include <asm/kregs.h>
    74.6  //unsigned long last_guest_rsm = 0x0;
    74.7 +
    74.8 +#ifdef	VTI_DEBUG
    74.9  struct guest_psr_bundle{
   74.10      unsigned long ip;
   74.11      unsigned long psr;
   74.12 @@ -74,6 +76,7 @@ struct guest_psr_bundle{
   74.13  
   74.14  struct guest_psr_bundle guest_psr_buf[100];
   74.15  unsigned long guest_psr_index = 0;
   74.16 +#endif
   74.17  
   74.18  void
   74.19  vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
   74.20 @@ -82,7 +85,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
   74.21      UINT64 mask;
   74.22      REGS *regs;
   74.23      IA64_PSR old_psr, new_psr;
   74.24 -    old_psr.val=vmx_vcpu_get_psr(vcpu);
   74.25 +    old_psr.val=VCPU(vcpu, vpsr);
   74.26  
   74.27      regs=vcpu_regs(vcpu);
   74.28      /* We only support guest as:
   74.29 @@ -108,7 +111,8 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
   74.30          // vpsr.i 0->1
   74.31          vcpu->arch.irq_new_condition = 1;
   74.32      }
   74.33 -    new_psr.val=vmx_vcpu_get_psr(vcpu);
   74.34 +    new_psr.val=VCPU(vcpu, vpsr);
   74.35 +#ifdef	VTI_DEBUG    
   74.36      {
   74.37      struct pt_regs *regs = vcpu_regs(vcpu);
   74.38      guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
   74.39 @@ -116,6 +120,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
   74.40      if (++guest_psr_index >= 100)
   74.41          guest_psr_index = 0;
   74.42      }
   74.43 +#endif    
   74.44  #if 0
   74.45      if (old_psr.i != new_psr.i) {
   74.46      if (old_psr.i)
   74.47 @@ -149,25 +154,15 @@ IA64FAULT vmx_vcpu_increment_iip(VCPU *v
   74.48  {
   74.49      // TODO: trap_bounce?? Eddie
   74.50      REGS *regs = vcpu_regs(vcpu);
   74.51 -    IA64_PSR vpsr;
   74.52      IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
   74.53  
   74.54 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
   74.55 -    if (vpsr.ri == 2) {
   74.56 -    vpsr.ri = 0;
   74.57 -    regs->cr_iip += 16;
   74.58 +    if (ipsr->ri == 2) {
   74.59 +        ipsr->ri = 0;
   74.60 +        regs->cr_iip += 16;
   74.61      } else {
   74.62 -    vpsr.ri++;
   74.63 +        ipsr->ri++;
   74.64      }
   74.65  
   74.66 -    ipsr->ri = vpsr.ri;
   74.67 -    vpsr.val &=
   74.68 -            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
   74.69 -                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
   74.70 -            ));
   74.71 -
   74.72 -    VCPU(vcpu, vpsr) = vpsr.val;
   74.73 -
   74.74      ipsr->val &=
   74.75              (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
   74.76                  IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
   74.77 @@ -181,7 +176,7 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
   74.78  {
   74.79      REGS *regs = vcpu_regs(vcpu);
   74.80      IA64_PSR vpsr;
   74.81 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
   74.82 +    vpsr.val = VCPU(vcpu, vpsr);
   74.83  
   74.84      if(!vpsr.ic)
   74.85          VCPU(vcpu,ifs) = regs->cr_ifs;
   74.86 @@ -280,22 +275,13 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
   74.87      vcpu_bsw1(vcpu);
   74.88      vmx_vcpu_set_psr(vcpu,psr);
   74.89      ifs=VCPU(vcpu,ifs);
   74.90 -    if((ifs>>63)&&(ifs<<1)){
   74.91 -        ifs=(regs->cr_ifs)&0x7f;
   74.92 -        regs->rfi_pfs = (ifs<<7)|ifs;
   74.93 -        regs->cr_ifs = VCPU(vcpu,ifs);
   74.94 -    }
   74.95 +    if(ifs>>63)
   74.96 +        regs->cr_ifs = ifs;
   74.97      regs->cr_iip = VCPU(vcpu,iip);
   74.98      return (IA64_NO_FAULT);
   74.99  }
  74.100  
  74.101  
  74.102 -UINT64
  74.103 -vmx_vcpu_get_psr(VCPU *vcpu)
  74.104 -{
  74.105 -    return VCPU(vcpu,vpsr);
  74.106 -}
  74.107 -
  74.108  #if 0
  74.109  IA64FAULT
  74.110  vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
  74.111 @@ -393,6 +379,20 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg
  74.112  
  74.113  #endif
  74.114  
  74.115 +/*
  74.116 +    VPSR can't keep track of below bits of guest PSR
  74.117 +    This function gets guest PSR
  74.118 + */
  74.119 +
  74.120 +UINT64 vmx_vcpu_get_psr(VCPU *vcpu)
  74.121 +{
  74.122 +    UINT64 mask;
  74.123 +    REGS *regs = vcpu_regs(vcpu);
  74.124 +    mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
  74.125 +           IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
  74.126 +    return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
  74.127 +}
  74.128 +
  74.129  IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
  74.130  {
  74.131      UINT64 vpsr;
  74.132 @@ -415,6 +415,7 @@ IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu
  74.133  
  74.134  IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
  74.135  {
  74.136 +    val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
  74.137      vmx_vcpu_set_psr(vcpu, val);
  74.138      return IA64_NO_FAULT;
  74.139  }
    75.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Thu Jul 27 17:44:14 2006 -0500
    75.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Fri Jul 28 10:51:38 2006 +0100
    75.3 @@ -20,10 +20,7 @@
    75.4   *  Shaofan Li (Susue Li) <susie.li@intel.com>
    75.5   *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
    75.6   */
    75.7 -
    75.8 -
    75.9 -
   75.10 -#include <asm/privop.h>
   75.11 +#include <asm/bundle.h>
   75.12  #include <asm/vmx_vcpu.h>
   75.13  #include <asm/processor.h>
   75.14  #include <asm/delay.h>	// Debug only
   75.15 @@ -33,8 +30,6 @@
   75.16  #include <asm/vmx.h>
   75.17  #include <asm/virt_event.h>
   75.18  #include <asm/vmx_phy_mode.h>
   75.19 -extern UINT64 privop_trace;
   75.20 -extern void vhpi_detection(VCPU *vcpu);//temporarily place here,need a header file.
   75.21  
   75.22  void
   75.23  ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64  * cause)
   75.24 @@ -159,7 +154,6 @@ IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST6
   75.25      return vmx_vcpu_set_psr_sm(vcpu,imm24);
   75.26  }
   75.27  
   75.28 -unsigned long last_guest_psr = 0x0;
   75.29  IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
   75.30  {
   75.31      UINT64 tgt = inst.M33.r1;
   75.32 @@ -172,7 +166,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
   75.33      */
   75.34      val = vmx_vcpu_get_psr(vcpu);
   75.35      val = (val & MASK(0, 32)) | (val & MASK(35, 2));
   75.36 -    last_guest_psr = val;
   75.37      return vcpu_set_gr(vcpu, tgt, val, 0);
   75.38  }
   75.39  
   75.40 @@ -186,14 +179,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu
   75.41      if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
   75.42  	panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
   75.43  
   75.44 -	val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
   75.45 -#if 0
   75.46 -	if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
   75.47 -		while(1);
   75.48 -	else
   75.49 -		last_mov_from_psr = 0;
   75.50 -#endif
   75.51 -        return vmx_vcpu_set_psr_l(vcpu,val);
   75.52 +    return vmx_vcpu_set_psr_l(vcpu, val);
   75.53  }
   75.54  
   75.55  
   75.56 @@ -261,6 +247,7 @@ IA64FAULT vmx_emul_cover(VCPU *vcpu, INS
   75.57  IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
   75.58  {
   75.59      u64 r2,r3;
   75.60 +#ifdef  VMAL_NO_FAULT_CHECK
   75.61      IA64_PSR  vpsr;
   75.62  
   75.63      vpsr.val=vmx_vcpu_get_psr(vcpu);
   75.64 @@ -270,6 +257,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
   75.65          privilege_op (vcpu);
   75.66          return IA64_FAULT;
   75.67      }
   75.68 +#endif // VMAL_NO_FAULT_CHECK
   75.69      if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
   75.70  #ifdef  VMAL_NO_FAULT_CHECK
   75.71          ISR isr;
   75.72 @@ -293,10 +281,10 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
   75.73  IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
   75.74  {
   75.75      u64 r3;
   75.76 +#ifdef  VMAL_NO_FAULT_CHECK
   75.77      IA64_PSR  vpsr;
   75.78  
   75.79      vpsr.val=vmx_vcpu_get_psr(vcpu);
   75.80 -#ifdef  VMAL_NO_FAULT_CHECK
   75.81      ISR isr;
   75.82      if ( vpsr.cpl != 0) {
   75.83          /* Inject Privileged Operation fault into guest */
   75.84 @@ -579,6 +567,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
   75.85  IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
   75.86  {
   75.87      UINT64 itir, ifa, pte, slot;
   75.88 +#ifdef  VMAL_NO_FAULT_CHECK
   75.89      IA64_PSR  vpsr;
   75.90      vpsr.val=vmx_vcpu_get_psr(vcpu);
   75.91      if ( vpsr.ic ) {
   75.92 @@ -586,7 +575,6 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
   75.93          illegal_op(vcpu);
   75.94          return IA64_FAULT;
   75.95      }
   75.96 -#ifdef  VMAL_NO_FAULT_CHECK
   75.97      ISR isr;
   75.98      if ( vpsr.cpl != 0) {
   75.99          /* Inject Privileged Operation fault into guest */
  75.100 @@ -638,7 +626,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
  75.101      UINT64 itir, ifa, pte, slot;
  75.102  #ifdef  VMAL_NO_FAULT_CHECK
  75.103      ISR isr;
  75.104 -#endif
  75.105      IA64_PSR  vpsr;
  75.106      vpsr.val=vmx_vcpu_get_psr(vcpu);
  75.107      if ( vpsr.ic ) {
  75.108 @@ -646,7 +633,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
  75.109          illegal_op(vcpu);
  75.110          return IA64_FAULT;
  75.111      }
  75.112 -#ifdef  VMAL_NO_FAULT_CHECK
  75.113      if ( vpsr.cpl != 0) {
  75.114          /* Inject Privileged Operation fault into guest */
  75.115          set_privileged_operation_isr (vcpu, 0);
  75.116 @@ -694,9 +680,10 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
  75.117  
  75.118  IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
  75.119  {
  75.120 -    IA64_PSR  vpsr;
  75.121      IA64FAULT	ret1;
  75.122  
  75.123 +#ifdef  VMAL_NO_FAULT_CHECK
  75.124 +    IA64_PSR  vpsr;
  75.125      vpsr.val=vmx_vcpu_get_psr(vcpu);
  75.126      if ( vpsr.ic ) {
  75.127          set_illegal_op_isr(vcpu);
  75.128 @@ -704,7 +691,6 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
  75.129          return IA64_FAULT;
  75.130      }
  75.131  
  75.132 -#ifdef  VMAL_NO_FAULT_CHECK
  75.133      UINT64 fault;
  75.134      ISR isr;
  75.135      if ( vpsr.cpl != 0) {
  75.136 @@ -1346,14 +1332,6 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
  75.137  }
  75.138  
  75.139  
  75.140 -static void post_emulation_action(VCPU *vcpu)
  75.141 -{
  75.142 -    if ( vcpu->arch.irq_new_condition ) {
  75.143 -        vcpu->arch.irq_new_condition = 0;
  75.144 -        vhpi_detection(vcpu);
  75.145 -    }
  75.146 -}
  75.147 -
  75.148  //#define  BYPASS_VMAL_OPCODE
  75.149  extern IA64_SLOT_TYPE  slot_types[0x20][3];
  75.150  IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
  75.151 @@ -1381,15 +1359,6 @@ vmx_emulate(VCPU *vcpu, REGS *regs)
  75.152      cause = VMX(vcpu,cause);
  75.153      opcode = VMX(vcpu,opcode);
  75.154  
  75.155 -/*
  75.156 -    if (privop_trace) {
  75.157 -        static long i = 400;
  75.158 -        //if (i > 0) printf("privop @%p\n",iip);
  75.159 -        if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
  75.160 -            iip,ia64_get_itc(),ia64_get_itm());
  75.161 -        i--;
  75.162 -    }
  75.163 -*/
  75.164  #ifdef  VTLB_DEBUG
  75.165      check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
  75.166      dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
  75.167 @@ -1565,8 +1534,6 @@ if ( (cause == 0xff && opcode == 0x1e000
  75.168      }
  75.169  
  75.170      recover_if_physical_mode(vcpu);
  75.171 -    post_emulation_action (vcpu);
  75.172 -//TODO    set_irq_check(v);
  75.173      return;
  75.174  
  75.175  }
    76.1 --- a/xen/arch/ia64/xen/Makefile	Thu Jul 27 17:44:14 2006 -0500
    76.2 +++ b/xen/arch/ia64/xen/Makefile	Fri Jul 28 10:51:38 2006 +0100
    76.3 @@ -24,5 +24,6 @@ obj-y += xenmisc.o
    76.4  obj-y += xensetup.o
    76.5  obj-y += xentime.o
    76.6  obj-y += flushd.o
    76.7 +obj-y += privop_stat.o
    76.8  
    76.9  obj-$(crash_debug) += gdbstub.o
    77.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Thu Jul 27 17:44:14 2006 -0500
    77.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Fri Jul 28 10:51:38 2006 +0100
    77.3 @@ -19,6 +19,11 @@
    77.4  #include <xen/guest_access.h>
    77.5  #include <public/sched_ctl.h>
    77.6  #include <asm/vmx.h>
    77.7 +#include <asm/dom_fw.h>
    77.8 +#include <xen/iocap.h>
    77.9 +
   77.10 +void build_physmap_table(struct domain *d);
   77.11 +
   77.12  extern unsigned long total_pages;
   77.13  long arch_do_dom0_op(dom0_op_t *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
   77.14  {
   77.15 @@ -154,52 +159,37 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
   77.16  
   77.17      case DOM0_GETMEMLIST:
   77.18      {
   77.19 -        unsigned long i = 0;
   77.20 +        unsigned long i;
   77.21          struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
   77.22          unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
   77.23          unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
   77.24          unsigned long mfn;
   77.25 -        struct list_head *list_ent;
   77.26 -
   77.27 -        ret = -EINVAL;
   77.28 -        if ( d != NULL )
   77.29 -        {
   77.30 -            ret = 0;
   77.31 -
   77.32 -            list_ent = d->page_list.next;
   77.33 -            while ( (i != start_page) && (list_ent != &d->page_list)) {
   77.34 -                mfn = page_to_mfn(list_entry(
   77.35 -                    list_ent, struct page_info, list));
   77.36 -                i++;
   77.37 -                list_ent = mfn_to_page(mfn)->list.next;
   77.38 -            }
   77.39  
   77.40 -            if (i == start_page)
   77.41 -            {
   77.42 -                while((i < (start_page + nr_pages)) &&
   77.43 -                      (list_ent != &d->page_list))
   77.44 -                {
   77.45 -                    mfn = page_to_mfn(list_entry(
   77.46 -                        list_ent, struct page_info, list));
   77.47 +        if ( d == NULL ) {
   77.48 +            ret = -EINVAL;
   77.49 +            break;
   77.50 +        }
   77.51 +        for (i = 0 ; i < nr_pages ; i++) {
   77.52 +            pte_t *pte;
   77.53  
   77.54 -                    if ( copy_to_guest_offset(op->u.getmemlist.buffer,
   77.55 -                                          i - start_page, &mfn, 1) )
   77.56 -                    {
   77.57 -                        ret = -EFAULT;
   77.58 -                        break;
   77.59 -                    }
   77.60 -                    i++;
   77.61 -                    list_ent = mfn_to_page(mfn)->list.next;
   77.62 -                }
   77.63 -            } else
   77.64 -                ret = -ENOMEM;
   77.65 +            pte = (pte_t *)lookup_noalloc_domain_pte(d,
   77.66 +                                               (start_page + i) << PAGE_SHIFT);
   77.67 +            if (pte && pte_present(*pte))
   77.68 +                mfn = pte_pfn(*pte);
   77.69 +            else
   77.70 +                mfn = INVALID_MFN;
   77.71  
   77.72 -            op->u.getmemlist.num_pfns = i - start_page;
   77.73 -            if (copy_to_guest(u_dom0_op, op, 1))
   77.74 -                ret = -EFAULT;
   77.75 -            
   77.76 -            put_domain(d);
   77.77 +            if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
   77.78 +                    ret = -EFAULT;
   77.79 +                    break;
   77.80 +            }
   77.81          }
   77.82 +
   77.83 +        op->u.getmemlist.num_pfns = i;
   77.84 +        if (copy_to_guest(u_dom0_op, op, 1))
   77.85 +            ret = -EFAULT;
   77.86 +
   77.87 +        put_domain(d);
   77.88      }
   77.89      break;
   77.90  
   77.91 @@ -225,6 +215,95 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
   77.92      }
   77.93      break;
   77.94  
   77.95 +    case DOM0_DOMAIN_SETUP:
   77.96 +    {
   77.97 +        dom0_domain_setup_t *ds = &op->u.domain_setup;
   77.98 +        struct domain *d = find_domain_by_id(ds->domain);
   77.99 +
  77.100 +        if ( d == NULL) {
  77.101 +            ret = -EINVAL;
  77.102 +            break;
  77.103 +        }
  77.104 +
  77.105 +        if (ds->flags & XEN_DOMAINSETUP_query) {
  77.106 +            /* Set flags.  */
  77.107 +            if (d->arch.is_vti)
  77.108 +                ds->flags |= XEN_DOMAINSETUP_hvm_guest;
  77.109 +            /* Set params.  */
  77.110 +            ds->bp = 0;		/* unknown.  */
  77.111 +            ds->maxmem = 0; /* unknown.  */
  77.112 +            ds->xsi_va = d->arch.shared_info_va;
  77.113 +            ds->hypercall_imm = d->arch.breakimm;
  77.114 +            /* Copy back.  */
  77.115 +            if ( copy_to_guest(u_dom0_op, op, 1) )
  77.116 +                ret = -EFAULT;
  77.117 +        }
  77.118 +        else {
  77.119 +            if (ds->flags & XEN_DOMAINSETUP_hvm_guest) {
  77.120 +                if (!vmx_enabled) {
  77.121 +                    printk("No VMX hardware feature for vmx domain.\n");
  77.122 +                    ret = -EINVAL;
  77.123 +                    break;
  77.124 +                }
  77.125 +                d->arch.is_vti = 1;
  77.126 +                vmx_setup_platform(d);
  77.127 +            }
  77.128 +            else {
  77.129 +                build_physmap_table(d);
  77.130 +                dom_fw_setup(d, ds->bp, ds->maxmem);
  77.131 +                if (ds->xsi_va)
  77.132 +                    d->arch.shared_info_va = ds->xsi_va;
  77.133 +                if (ds->hypercall_imm) {
  77.134 +                    struct vcpu *v;
  77.135 +                    d->arch.breakimm = ds->hypercall_imm;
  77.136 +                    for_each_vcpu (d, v)
  77.137 +                        v->arch.breakimm = d->arch.breakimm;
  77.138 +                }
  77.139 +            }
  77.140 +        }
  77.141 +
  77.142 +        put_domain(d);
  77.143 +    }
  77.144 +    break;
  77.145 +
  77.146 +    case DOM0_SHADOW_CONTROL:
  77.147 +    {
  77.148 +        struct domain *d; 
  77.149 +        ret = -ESRCH;
  77.150 +        d = find_domain_by_id(op->u.shadow_control.domain);
  77.151 +        if ( d != NULL )
  77.152 +        {
  77.153 +            ret = shadow_mode_control(d, &op->u.shadow_control);
  77.154 +            put_domain(d);
  77.155 +            copy_to_guest(u_dom0_op, op, 1);
  77.156 +        } 
  77.157 +    }
  77.158 +    break;
  77.159 +
  77.160 +    case DOM0_IOPORT_PERMISSION:
  77.161 +    {
  77.162 +        struct domain *d;
  77.163 +        unsigned int fp = op->u.ioport_permission.first_port;
  77.164 +        unsigned int np = op->u.ioport_permission.nr_ports;
  77.165 +        unsigned int lp = fp + np - 1;
  77.166 +
  77.167 +        ret = -ESRCH;
  77.168 +        d = find_domain_by_id(op->u.ioport_permission.domain);
  77.169 +        if (unlikely(d == NULL))
  77.170 +            break;
  77.171 +
  77.172 +        if (np == 0)
  77.173 +            ret = 0;
  77.174 +        else {
  77.175 +            if (op->u.ioport_permission.allow_access)
  77.176 +                ret = ioports_permit_access(d, fp, lp);
  77.177 +            else
  77.178 +                ret = ioports_deny_access(d, fp, lp);
  77.179 +        }
  77.180 +
  77.181 +        put_domain(d);
  77.182 +    }
  77.183 +    break;
  77.184      default:
  77.185          printf("arch_do_dom0_op: unrecognized dom0 op: %d!!!\n",op->cmd);
  77.186          ret = -ENOSYS;
  77.187 @@ -235,6 +314,24 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
  77.188  }
  77.189  
  77.190  #ifdef CONFIG_XEN_IA64_DOM0_VP
  77.191 +static unsigned long
  77.192 +dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
  77.193 +{
  77.194 +    unsigned long end;
  77.195 +
  77.196 +    /* Linux may use a 0 size!  */
  77.197 +    if (size == 0)
  77.198 +        size = PAGE_SIZE;
  77.199 +
  77.200 +    end = PAGE_ALIGN(mpaddr + size);
  77.201 +
  77.202 +    if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
  77.203 +                                (end >> PAGE_SHIFT) - 1))
  77.204 +        return -EPERM;