ia64/xen-unstable

changeset 3833:39538bfd72f0

bitkeeper revision 1.1201.1.1 (421098bauUknclk2-oQphDo3ZBC1Vw)

Major ia64 update -- now boots dom0.

Signed-off-by: dan.magenheimer@hp.com
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@freefall.cl.cam.ac.uk
date Mon Feb 14 12:25:30 2005 +0000 (2005-02-14)
parents 917ea52007ea
children 73f9c9685211 3b3ed38bd02b
files .rootkeys xen/Makefile xen/arch/ia64/Makefile xen/arch/ia64/Rules.mk xen/arch/ia64/acpi.c xen/arch/ia64/asm-offsets.c xen/arch/ia64/dom0_ops.c xen/arch/ia64/dom_fw.c xen/arch/ia64/domain.c xen/arch/ia64/idle0_task.c xen/arch/ia64/irq.c xen/arch/ia64/lib/Makefile xen/arch/ia64/mm_init.c xen/arch/ia64/patch/linux-2.6.7/bootmem.h xen/arch/ia64/patch/linux-2.6.7/current.h xen/arch/ia64/patch/linux-2.6.7/efi.c xen/arch/ia64/patch/linux-2.6.7/efi.h xen/arch/ia64/patch/linux-2.6.7/entry.S xen/arch/ia64/patch/linux-2.6.7/gcc_intrin.h xen/arch/ia64/patch/linux-2.6.7/hardirq.h xen/arch/ia64/patch/linux-2.6.7/head.S xen/arch/ia64/patch/linux-2.6.7/hpsim_irq.c xen/arch/ia64/patch/linux-2.6.7/hpsim_ssc.h xen/arch/ia64/patch/linux-2.6.7/hw_irq.h xen/arch/ia64/patch/linux-2.6.7/ide.h xen/arch/ia64/patch/linux-2.6.7/init_task.c xen/arch/ia64/patch/linux-2.6.7/init_task.h xen/arch/ia64/patch/linux-2.6.7/interrupt.h xen/arch/ia64/patch/linux-2.6.7/io.h xen/arch/ia64/patch/linux-2.6.7/irq.h xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c xen/arch/ia64/patch/linux-2.6.7/ivt.S xen/arch/ia64/patch/linux-2.6.7/kregs.h xen/arch/ia64/patch/linux-2.6.7/lds.S xen/arch/ia64/patch/linux-2.6.7/linuxtime.h xen/arch/ia64/patch/linux-2.6.7/minstate.h xen/arch/ia64/patch/linux-2.6.7/mm_bootmem.c xen/arch/ia64/patch/linux-2.6.7/mm_contig.c xen/arch/ia64/patch/linux-2.6.7/mmzone.h xen/arch/ia64/patch/linux-2.6.7/page.h xen/arch/ia64/patch/linux-2.6.7/page_alloc.c xen/arch/ia64/patch/linux-2.6.7/processor.h xen/arch/ia64/patch/linux-2.6.7/sal.h xen/arch/ia64/patch/linux-2.6.7/setup.c xen/arch/ia64/patch/linux-2.6.7/slab.c xen/arch/ia64/patch/linux-2.6.7/slab.h xen/arch/ia64/patch/linux-2.6.7/system.h xen/arch/ia64/patch/linux-2.6.7/time.c xen/arch/ia64/patch/linux-2.6.7/tlb.c xen/arch/ia64/patch/linux-2.6.7/types.h xen/arch/ia64/patch/linux-2.6.7/unaligned.c xen/arch/ia64/patch/linux-2.6.7/wait.h xen/arch/ia64/pdb-stub.c xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/smp.c xen/arch/ia64/smpboot.c xen/arch/ia64/tools/README.xenia64 xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/vcpu.c xen/arch/ia64/vhpt.c xen/arch/ia64/xenasm.S xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/common/keyhandler.c xen/include/acpi/acdispat.h xen/include/acpi/acinterp.h xen/include/acpi/aclocal.h xen/include/acpi/acstruct.h xen/include/asm-ia64/config.h xen/include/asm-ia64/debugger.h xen/include/asm-ia64/dom_fw.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/init.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/mmu_context.h xen/include/asm-ia64/multicall.h xen/include/asm-ia64/offsets.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/regs.h xen/include/asm-ia64/shadow.h xen/include/asm-ia64/time.h xen/include/asm-ia64/tlb.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vhpt.h xen/include/asm-ia64/xenserial.h xen/include/public/arch-ia64.h xen/include/xen/keyhandler.h
line diff
     1.1 --- a/.rootkeys	Mon Feb 14 11:42:11 2005 +0000
     1.2 +++ b/.rootkeys	Mon Feb 14 12:25:30 2005 +0000
     1.3 @@ -873,11 +873,69 @@ 40e9808eHXvs_5eggj9McD_J90mhNw tools/xfr
     1.4  3f72f1bdJPsV3JCnBqs9ddL9tr6D2g xen/COPYING
     1.5  3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen/Makefile
     1.6  3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen/Rules.mk
     1.7 +421098b25A0RvuYN3rP28ga3_FN3_Q xen/arch/ia64/Makefile
     1.8 +421098b2okIeYXS9w9avmSozls61xA xen/arch/ia64/Rules.mk
     1.9 +421098b21p12UcKjHBrLh_LjlvNEwA xen/arch/ia64/acpi.c
    1.10 +421098b26C_0yoypoHqjDcJA9UrG_g xen/arch/ia64/asm-offsets.c
    1.11 +421098b2PHgzf_Gg4R65YRNi_QzMKQ xen/arch/ia64/dom0_ops.c
    1.12 +421098b2O7jsNfzQXA1v3rbAc1QhpA xen/arch/ia64/dom_fw.c
    1.13 +421098b2ZlaBcyiuuPr3WpzaSDwg6Q xen/arch/ia64/domain.c
    1.14 +421098b3LYAS8xJkQiGP7tiTlyBt0Q xen/arch/ia64/idle0_task.c
    1.15 +421098b3ys5GAr4z6_H1jD33oem82g xen/arch/ia64/irq.c
    1.16 +421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile
    1.17 +421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c
    1.18 +421098b39QFMC-1t1r38CA7NxAYBPA xen/arch/ia64/patch/linux-2.6.7/bootmem.h
    1.19 +421098b3SIA1vZX9fFUjo1T3o_jMCQ xen/arch/ia64/patch/linux-2.6.7/current.h
    1.20 +421098b3ZBl80iPuSeDU_Id5AgZl0w xen/arch/ia64/patch/linux-2.6.7/efi.c
    1.21 +421098b3VUmGT2Jdy4SWeDTwcCHaqg xen/arch/ia64/patch/linux-2.6.7/efi.h
    1.22 +421098b3dPmLXyvKEmvH_2XALeIYlg xen/arch/ia64/patch/linux-2.6.7/entry.S
    1.23 +421098b3eoimqDUiVw9p_RADfvICwQ xen/arch/ia64/patch/linux-2.6.7/gcc_intrin.h
    1.24 +421098b3ZcvjJahWCTvmpNb1RWArww xen/arch/ia64/patch/linux-2.6.7/hardirq.h
    1.25 +421098b3gZO0kxetbOVLlpsFkf0PWQ xen/arch/ia64/patch/linux-2.6.7/head.S
    1.26 +421098b3Hz4y9vxFo6rZ03PXkFF6-w xen/arch/ia64/patch/linux-2.6.7/hpsim_irq.c
    1.27 +421098b3mn7maohx9UTPjTZEVov-kg xen/arch/ia64/patch/linux-2.6.7/hpsim_ssc.h
    1.28 +421098b4HWTbzGFd8fAT27GIavt61g xen/arch/ia64/patch/linux-2.6.7/hw_irq.h
    1.29 +421098b4wVriEglxpLtvD9NMUr76Ew xen/arch/ia64/patch/linux-2.6.7/ide.h
    1.30 +421098b4ckKw7I-p3APMhFOuefMWMA xen/arch/ia64/patch/linux-2.6.7/init_task.c
    1.31 +421098b4CSuWMM-4vHvAa4F4luDOLQ xen/arch/ia64/patch/linux-2.6.7/init_task.h
    1.32 +421098b4x5Hnxgvf22nhvxzPMszw1g xen/arch/ia64/patch/linux-2.6.7/interrupt.h
    1.33 +421098b4BgHuG3PiGY2QOQCNEqMYsA xen/arch/ia64/patch/linux-2.6.7/io.h
    1.34 +421098b4JnNHXkW2732slXwxMX79RA xen/arch/ia64/patch/linux-2.6.7/irq.h
    1.35 +421098b4H-Upf_mxF2apXBffvYadUw xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c
    1.36 +421098b4C0Lc3xag4Nm-_yC9IMTDqA xen/arch/ia64/patch/linux-2.6.7/ivt.S
    1.37 +421098b4weyd0AQTjPLmooChUJm13Q xen/arch/ia64/patch/linux-2.6.7/kregs.h
    1.38 +421098b4vHCejAUPem4w8p5V-AD1Ig xen/arch/ia64/patch/linux-2.6.7/lds.S
    1.39 +421098b4uooGl5X8zZM96qpmS0Furg xen/arch/ia64/patch/linux-2.6.7/linuxtime.h
    1.40 +421098b4awnw3Jf23gohJWoK8s7-Qg xen/arch/ia64/patch/linux-2.6.7/minstate.h
    1.41 +421098b5hIfMbZlQTfrOKN4BtzJgDQ xen/arch/ia64/patch/linux-2.6.7/mm_bootmem.c
    1.42 +421098b53IVBoQPcDjFciZy86YEhRQ xen/arch/ia64/patch/linux-2.6.7/mm_contig.c
    1.43 +421098b5pZw41QuBTvhjvSol6aAHDw xen/arch/ia64/patch/linux-2.6.7/mmzone.h
    1.44 +421098b5B_dClZDGuPYeY3IXo8Hlbw xen/arch/ia64/patch/linux-2.6.7/page.h
    1.45 +421098b5saClfxPj36l47H9Um7h1Fw xen/arch/ia64/patch/linux-2.6.7/page_alloc.c
    1.46 +421098b5OkmcjMBq8gxs7ZrTa4Ao6g xen/arch/ia64/patch/linux-2.6.7/processor.h
    1.47 +421098b51RLB6jWr6rIlpB2SNObxZg xen/arch/ia64/patch/linux-2.6.7/sal.h
    1.48 +421098b5WFeRnwGtZnHkSvHVzA4blg xen/arch/ia64/patch/linux-2.6.7/setup.c
    1.49 +421098b5Jm2i8abzb0mpT6mlEiKZDg xen/arch/ia64/patch/linux-2.6.7/slab.c
    1.50 +421098b5w6MBnluEpQJAWDTBFrbWSQ xen/arch/ia64/patch/linux-2.6.7/slab.h
    1.51 +421098b5Cg7nbIXm3RhUF-uG3SKaUA xen/arch/ia64/patch/linux-2.6.7/system.h
    1.52 +421098b5XrkDYW_Nd9lg5CDgNzHLmg xen/arch/ia64/patch/linux-2.6.7/time.c
    1.53 +421098b5_kFbvZIIPM3bdCES1Ocqnw xen/arch/ia64/patch/linux-2.6.7/tlb.c
    1.54 +421098b5DWbgK-tBR4um8PEAqPwqTA xen/arch/ia64/patch/linux-2.6.7/types.h
    1.55 +421098b5il9YfZM0HpeCnaMgVN_q9g xen/arch/ia64/patch/linux-2.6.7/unaligned.c
    1.56 +421098b65M5cPramsLGbODg8lQwUjQ xen/arch/ia64/patch/linux-2.6.7/wait.h
    1.57 +421098b6cYDwzXP86ViTLlTO2x7ovA xen/arch/ia64/pdb-stub.c
    1.58  41a26ebcqaSGVQ8qTMwpPwOJSJ7qSw xen/arch/ia64/privop.c
    1.59  41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/ia64/process.c
    1.60  41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
    1.61 +421098b69pUiIJrqu_w0JMUnZ2uc2A xen/arch/ia64/smp.c
    1.62 +421098b6_ToSGrf6Pk1Uwg5aMAIBxg xen/arch/ia64/smpboot.c
    1.63 +421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64
    1.64 +421098b6rQ2BQ103qu1n1HNofbS2Og xen/arch/ia64/tools/mkbuildtree
    1.65  41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
    1.66 +421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c
    1.67  41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
    1.68 +421098b6mWyFPtkhPz9h1LCmKpoCLg xen/arch/ia64/xenmisc.c
    1.69 +421098b6lY2JzrV1oFDbrt7XQhtElg xen/arch/ia64/xensetup.c
    1.70  3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/x86/Makefile
    1.71  3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/x86/Rules.mk
    1.72  3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi.c
    1.73 @@ -1016,6 +1074,25 @@ 40715b2d3CdS6dIpZDTiCJRlDG3LCA xen/inclu
    1.74  40715b2dKRW7A71SNaeV6zfrEzYxPw xen/include/acpi/platform/acenv.h
    1.75  40715b2d8fYydJMcODFrV1ocLklGDg xen/include/acpi/platform/acgcc.h
    1.76  40715b2d1yZkqyAt0kgx2xEwsatuuA xen/include/acpi/platform/aclinux.h
    1.77 +421098b6Y3xqcv873Gvg1rQ5CChfFw xen/include/asm-ia64/config.h
    1.78 +421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
    1.79 +421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
    1.80 +421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
    1.81 +421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
    1.82 +421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
    1.83 +421098b7XC1A5PhA-lrU9pIO3sSSmA xen/include/asm-ia64/mm.h
    1.84 +421098b7c0Dx0ABuW_yHQdAqKhUoiQ xen/include/asm-ia64/mmu_context.h
    1.85 +421098b7C2dr3O7lgc_oeC9TEE9GKw xen/include/asm-ia64/multicall.h
    1.86 +421098b7dX_56NCV9zjftqm1yIqC8w xen/include/asm-ia64/offsets.h
    1.87 +421098b72bPUyviWloEAIB85dGCm2Q xen/include/asm-ia64/privop.h
    1.88 +421098b7Z6OwjZnrTZkh34DoDfcjrA xen/include/asm-ia64/regionreg.h
    1.89 +421098b707cY5YluUcWK5Pc-71ETVw xen/include/asm-ia64/regs.h
    1.90 +421098b7czhvyPGFa5nskL0N4vNvFw xen/include/asm-ia64/shadow.h
    1.91 +421098b7GkWOnlzSmPvNAhByOSZ1Dw xen/include/asm-ia64/time.h
    1.92 +421098b7FK3xgShpnH0I0Ou3O4fJ2Q xen/include/asm-ia64/tlb.h
    1.93 +421098b78IGdFOGUlPmpS7h_QBmoFg xen/include/asm-ia64/vcpu.h
    1.94 +421098b7PiAencgmBFGAqALU-V5rqQ xen/include/asm-ia64/vhpt.h
    1.95 +421098b7LfwIHQ2lRYWhO4ruEXqIuQ xen/include/asm-ia64/xenserial.h
    1.96  40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-x86/acpi.h
    1.97  3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-x86/apic.h
    1.98  3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-x86/apicdef.h
    1.99 @@ -1085,6 +1162,7 @@ 404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/inclu
   1.100  40e1966azOJZfNI6Ilthe6Q-T3Hewg xen/include/asm-x86/x86_64/string.h
   1.101  404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
   1.102  400304fcmRQmDdFYEzDh0wcBba9alg xen/include/public/COPYING
   1.103 +421098b7OKb9YH_EUA_UpCxBjaqtgA xen/include/public/arch-ia64.h
   1.104  404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/public/arch-x86_32.h
   1.105  404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/public/arch-x86_64.h
   1.106  3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/public/dom0_ops.h
     2.1 --- a/xen/Makefile	Mon Feb 14 11:42:11 2005 +0000
     2.2 +++ b/xen/Makefile	Mon Feb 14 12:25:30 2005 +0000
     2.3 @@ -77,9 +77,9 @@ include/xen/compile.h: include/xen/compi
     2.4  	@mv -f $@.new $@
     2.5  
     2.6  tools/figlet/figlet: tools/figlet/figlet.o
     2.7 -	$(CC) -o $@ $<
     2.8 +	$(HOSTCC) -o $@ $<
     2.9  tools/figlet/figlet.o: tools/figlet/figlet.c
    2.10 -	$(CC) -o $@ -c $<
    2.11 +	$(HOSTCC) -o $@ -c $<
    2.12  
    2.13  include/xen/banner.h: tools/figlet/figlet tools/figlet/xen.flf
    2.14  	tools/figlet/figlet -d tools/figlet Xen $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION) > $@.new
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/Makefile	Mon Feb 14 12:25:30 2005 +0000
     3.3 @@ -0,0 +1,55 @@
     3.4 +include $(BASEDIR)/Rules.mk
     3.5 +
     3.6 +# libs-y	+= arch/ia64/lib/lib.a
     3.7 +
     3.8 +OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
     3.9 +	xenmisc.o pdb-stub.o acpi.o \
    3.10 +	machvec.o dom0_ops.o domain.o \
    3.11 +	idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
    3.12 +	mm_bootmem.o sal.o cmdline.o mm_init.o tlb.o page_alloc.o slab.o \
    3.13 +	regionreg.o entry.o unaligned.o privop.o vcpu.o \
    3.14 +	irq_ia64.o irq_lsapic.o hpsim_irq.o vhpt.o xenasm.o dom_fw.o
    3.15 +# perfmon.o
    3.16 +# unwind.o needed for kernel unwinding (rare)
    3.17 +
    3.18 +OBJS := $(subst $(TARGET_ARCH)/asm-offsets.o,,$(OBJS))
    3.19 +
    3.20 +# remove following line if not privifying in memory
    3.21 +# OBJS += privify.o
    3.22 +
    3.23 +# What happens here? We link monitor object files together, starting
    3.24 +# at MONITOR_BASE (a very high address). But bootloader cannot put
    3.25 +# things there, so we initially load at LOAD_BASE. A hacky little
    3.26 +# tool called `elf-reloc' is used to modify segment offsets from
    3.27 +# MONITOR_BASE-relative to LOAD_BASE-relative.
    3.28 +# (NB. Linux gets round this by turning its image into raw binary, then 
    3.29 +# wrapping that with a low-memory bootstrapper.)
    3.30 +default: $(OBJS) head.o ia64lib.o xen.lds.s
    3.31 +	$(LD) -r -o arch.o $(OBJS) ia64lib.o
    3.32 +	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    3.33 +		-Map map.out head.o $(ALL_OBJS) -o $(TARGET)-syms
    3.34 +	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
    3.35 +#	$(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET)
    3.36 +
    3.37 +asm-offsets.s: asm-offsets.c
    3.38 +	$(CC) $(CFLAGS) -S -o $@ $<
    3.39 +
    3.40 +# I'm sure a Makefile wizard would know a better way to do this
    3.41 +xen.lds.s: xen.lds.S
    3.42 +	$(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
    3.43 +		-o xen.lds.s xen.lds.S
    3.44 +
    3.45 +ia64lib.o:
    3.46 +	$(MAKE) -C lib && cp lib/ia64lib.o .
    3.47 +
    3.48 +clean:
    3.49 +	rm -f *.o *~ core  xen.lds.s
    3.50 +	$(MAKE) -C lib clean
    3.51 +
    3.52 +# setup.o contains bits of compile.h so it must be blown away
    3.53 +delete-unfresh-files:
    3.54 +	echo any unfresh-files to delete for ia64\?
    3.55 +#	rm -f setup.o
    3.56 +
    3.57 +.PHONY: default clean delete-unfresh-files
    3.58 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/Rules.mk	Mon Feb 14 12:25:30 2005 +0000
     4.3 @@ -0,0 +1,33 @@
     4.4 +########################################
     4.5 +# x86-specific definitions
     4.6 +
     4.7 +ifeq ($(COMPILE_ARCH),$(TARGET_ARCH))
     4.8 +OBJCOPY = objcopy
     4.9 +endif
    4.10 +ifneq ($(COMPILE_ARCH),$(TARGET_ARCH))
    4.11 +CC = /usr/local/sp_env/v2.2/i686/bin/ia64-unknown-linux-gcc
    4.12 +LD = /usr/local/sp_env/v2.2/i686/bin/ia64-unknown-linux-ld
    4.13 +OBJCOPY = /usr/local/sp_env/v2.2/i686/bin/ia64-unknown-linux-objcopy
    4.14 +endif
    4.15 +HOSTCC := gcc
    4.16 +#LD := ld
    4.17 +# Linker should relocate monitor to this address
    4.18 +MONITOR_BASE := 0xFC500000
    4.19 +# Bootloader should load monitor to this real address
    4.20 +LOAD_BASE    := 0x00100000
    4.21 +AFLAGS  += -D__ASSEMBLY__
    4.22 +CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64
    4.23 +CFLAGS  := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
    4.24 +#CFLAGS  += -O3		# -O3 over-inlines making debugging tough!
    4.25 +CFLAGS  += -O2		# but no optimization causes compile errors!
    4.26 +CFLAGS  += -iwithprefix include -Wall -DMONITOR_BASE=$(MONITOR_BASE)
    4.27 +CFLAGS  += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
    4.28 +CFLAGS  += -I$(BASEDIR)/include/asm-ia64
    4.29 +CFLAGS  += -Wno-pointer-arith -Wredundant-decls
    4.30 +CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
    4.31 +CFLAGS	+= -ffixed-r13 -mfixed-range=f12-f15,f32-f127
    4.32 +CFLAGS	+= -w -g
    4.33 +#TARGET_CPU := i686
    4.34 +#CFLAGS += -march=$(TARGET_CPU)
    4.35 +#LDARCHFLAGS := --oformat elf32-i386 
    4.36 +LDFLAGS := -g
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/acpi.c	Mon Feb 14 12:25:30 2005 +0000
     5.3 @@ -0,0 +1,685 @@
     5.4 +/*
     5.5 + *  acpi.c - Architecture-Specific Low-Level ACPI Support
     5.6 + *
     5.7 + *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
     5.8 + *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
     5.9 + *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
    5.10 + *
    5.11 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    5.12 + *
    5.13 + *  This program is free software; you can redistribute it and/or modify
    5.14 + *  it under the terms of the GNU General Public License as published by
    5.15 + *  the Free Software Foundation; either version 2 of the License, or
    5.16 + *  (at your option) any later version.
    5.17 + *
    5.18 + *  This program is distributed in the hope that it will be useful,
    5.19 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    5.20 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    5.21 + *  GNU General Public License for more details.
    5.22 + *
    5.23 + *  You should have received a copy of the GNU General Public License
    5.24 + *  along with this program; if not, write to the Free Software
    5.25 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    5.26 + *
    5.27 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    5.28 + */
    5.29 +
    5.30 +#include <xen/config.h>
    5.31 +#include <xen/kernel.h>
    5.32 +#include <xen/init.h>
    5.33 +#include <xen/types.h>
    5.34 +/*#include <xen/stddef.h>*/
    5.35 +#include <xen/slab.h>
    5.36 +#include <xen/pci.h>
    5.37 +/*#include <xen/bootmem.h>*/
    5.38 +#include <xen/irq.h>
    5.39 +#include <xen/acpi.h>
    5.40 +//#include <asm/mpspec.h>
    5.41 +#include <asm/io.h>
    5.42 +//#include <asm/apic.h>
    5.43 +//#include <asm/apicdef.h>
    5.44 +#include <asm/page.h>
    5.45 +/*#include <asm/pgtable.h>*/
    5.46 +#include <asm/pgalloc.h>
    5.47 +//#include <asm/io_apic.h>
    5.48 +#include <asm/acpi.h>
    5.49 +/*#include <asm/save_state.h>*/
    5.50 +//#include <asm/smpboot.h>
    5.51 +
    5.52 +
    5.53 +#define PREFIX			"ACPI: "
    5.54 +
    5.55 +int acpi_lapic = 0;
    5.56 +int acpi_ioapic = 0;
    5.57 +
    5.58 +/* --------------------------------------------------------------------------
    5.59 +                              Boot-time Configuration
    5.60 +   -------------------------------------------------------------------------- */
    5.61 +
    5.62 +#ifdef CONFIG_ACPI_BOOT
    5.63 +int acpi_noirq __initdata = 0;  /* skip ACPI IRQ initialization */
    5.64 +int acpi_ht __initdata = 1;     /* enable HT */
    5.65 +
    5.66 +enum acpi_irq_model_id		acpi_irq_model;
    5.67 +
    5.68 +
    5.69 +/*
    5.70 + * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
    5.71 + * to map the target physical address. The problem is that set_fixmap()
    5.72 + * provides a single page, and it is possible that the page is not
    5.73 + * sufficient.
    5.74 + * By using this area, we can map up to MAX_IO_APICS pages temporarily,
    5.75 + * i.e. until the next __va_range() call.
    5.76 + *
    5.77 + * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
    5.78 + * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
    5.79 + * count idx down while incrementing the phys address.
    5.80 + */
    5.81 +char *__acpi_map_table(unsigned long phys, unsigned long size)
    5.82 +{
    5.83 +	unsigned long base, offset, mapped_size;
    5.84 +	int idx;
    5.85 +
    5.86 +	if (phys + size < 8*1024*1024) 
    5.87 +		return __va(phys); 
    5.88 +
    5.89 +	offset = phys & (PAGE_SIZE - 1);
    5.90 +	mapped_size = PAGE_SIZE - offset;
    5.91 +#ifndef XEN
    5.92 +// where is FIX_ACPI_*? hack for now, FIXME later
    5.93 +	set_fixmap(FIX_ACPI_END, phys);
    5.94 +	base = fix_to_virt(FIX_ACPI_END);
    5.95 +
    5.96 +	/*
    5.97 +	 * Most cases can be covered by the below.
    5.98 +	 */
    5.99 +	idx = FIX_ACPI_END;
   5.100 +	while (mapped_size < size) {
   5.101 +		if (--idx < FIX_ACPI_BEGIN)
   5.102 +			return 0;	/* cannot handle this */
   5.103 +		phys += PAGE_SIZE;
   5.104 +		set_fixmap(idx, phys);
   5.105 +		mapped_size += PAGE_SIZE;
   5.106 +	}
   5.107 +#endif
   5.108 +
   5.109 +	return ((unsigned char *) base + offset);
   5.110 +}
   5.111 +
   5.112 +
   5.113 +#ifdef CONFIG_X86_LOCAL_APIC
   5.114 +
   5.115 +static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
   5.116 +
   5.117 +
   5.118 +static int __init
   5.119 +acpi_parse_madt (
   5.120 +	unsigned long		phys_addr,
   5.121 +	unsigned long		size)
   5.122 +{
   5.123 +	struct acpi_table_madt	*madt = NULL;
   5.124 +
   5.125 +	if (!phys_addr || !size)
   5.126 +		return -EINVAL;
   5.127 +
   5.128 +	madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
   5.129 +	if (!madt) {
   5.130 +		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
   5.131 +		return -ENODEV;
   5.132 +	}
   5.133 +
   5.134 +	if (madt->lapic_address)
   5.135 +		acpi_lapic_addr = (u64) madt->lapic_address;
   5.136 +
   5.137 +	printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n",
   5.138 +		madt->lapic_address);
   5.139 +
   5.140 +	detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id);
   5.141 +
   5.142 +	return 0;
   5.143 +}
   5.144 +
   5.145 +
   5.146 +static int __init
   5.147 +acpi_parse_lapic (
   5.148 +	acpi_table_entry_header *header)
   5.149 +{
   5.150 +	struct acpi_table_lapic	*processor = NULL;
   5.151 +
   5.152 +	processor = (struct acpi_table_lapic*) header;
   5.153 +	if (!processor)
   5.154 +		return -EINVAL;
   5.155 +
   5.156 +	acpi_table_print_madt_entry(header);
   5.157 +
   5.158 +	mp_register_lapic (
   5.159 +		processor->id,					   /* APIC ID */
   5.160 +		processor->flags.enabled);			  /* Enabled? */
   5.161 +
   5.162 +	return 0;
   5.163 +}
   5.164 +
   5.165 +
   5.166 +static int __init
   5.167 +acpi_parse_lapic_addr_ovr (
   5.168 +	acpi_table_entry_header *header)
   5.169 +{
   5.170 +	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
   5.171 +
   5.172 +	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
   5.173 +	if (!lapic_addr_ovr)
   5.174 +		return -EINVAL;
   5.175 +
   5.176 +	acpi_lapic_addr = lapic_addr_ovr->address;
   5.177 +
   5.178 +	return 0;
   5.179 +}
   5.180 +
   5.181 +static int __init
   5.182 +acpi_parse_lapic_nmi (
   5.183 +	acpi_table_entry_header *header)
   5.184 +{
   5.185 +	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
   5.186 +
   5.187 +	lapic_nmi = (struct acpi_table_lapic_nmi*) header;
   5.188 +	if (!lapic_nmi)
   5.189 +		return -EINVAL;
   5.190 +
   5.191 +	acpi_table_print_madt_entry(header);
   5.192 +
   5.193 +	if (lapic_nmi->lint != 1)
   5.194 +		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
   5.195 +
   5.196 +	return 0;
   5.197 +}
   5.198 +
   5.199 +#endif /*CONFIG_X86_LOCAL_APIC*/
   5.200 +
   5.201 +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   5.202 +
   5.203 +static int __init
   5.204 +acpi_parse_ioapic (
   5.205 +	acpi_table_entry_header *header)
   5.206 +{
   5.207 +	struct acpi_table_ioapic *ioapic = NULL;
   5.208 +
   5.209 +	ioapic = (struct acpi_table_ioapic*) header;
   5.210 +	if (!ioapic)
   5.211 +		return -EINVAL;
   5.212 + 
   5.213 +	acpi_table_print_madt_entry(header);
   5.214 +
   5.215 +	mp_register_ioapic (
   5.216 +		ioapic->id,
   5.217 +		ioapic->address,
   5.218 +		ioapic->global_irq_base);
   5.219 + 
   5.220 +	return 0;
   5.221 +}
   5.222 +
   5.223 +
   5.224 +static int __init
   5.225 +acpi_parse_int_src_ovr (
   5.226 +	acpi_table_entry_header *header)
   5.227 +{
   5.228 +	struct acpi_table_int_src_ovr *intsrc = NULL;
   5.229 +
   5.230 +	intsrc = (struct acpi_table_int_src_ovr*) header;
   5.231 +	if (!intsrc)
   5.232 +		return -EINVAL;
   5.233 +
   5.234 +	acpi_table_print_madt_entry(header);
   5.235 +
   5.236 +	mp_override_legacy_irq (
   5.237 +		intsrc->bus_irq,
   5.238 +		intsrc->flags.polarity,
   5.239 +		intsrc->flags.trigger,
   5.240 +		intsrc->global_irq);
   5.241 +
   5.242 +	return 0;
   5.243 +}
   5.244 +
   5.245 +
   5.246 +static int __init
   5.247 +acpi_parse_nmi_src (
   5.248 +	acpi_table_entry_header *header)
   5.249 +{
   5.250 +	struct acpi_table_nmi_src *nmi_src = NULL;
   5.251 +
   5.252 +	nmi_src = (struct acpi_table_nmi_src*) header;
   5.253 +	if (!nmi_src)
   5.254 +		return -EINVAL;
   5.255 +
   5.256 +	acpi_table_print_madt_entry(header);
   5.257 +
   5.258 +	/* TBD: Support nimsrc entries? */
   5.259 +
   5.260 +	return 0;
   5.261 +}
   5.262 +
   5.263 +#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   5.264 +
   5.265 +
   5.266 +static unsigned long __init
   5.267 +acpi_scan_rsdp (
   5.268 +	unsigned long		start,
   5.269 +	unsigned long		length)
   5.270 +{
   5.271 +	unsigned long		offset = 0;
   5.272 +	unsigned long		sig_len = sizeof("RSD PTR ") - 1;
   5.273 +
   5.274 +	/*
   5.275 +	 * Scan all 16-byte boundaries of the physical memory region for the
   5.276 +	 * RSDP signature.
   5.277 +	 */
   5.278 +	for (offset = 0; offset < length; offset += 16) {
   5.279 +		if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
   5.280 +			continue;
   5.281 +		return (start + offset);
   5.282 +	}
   5.283 +
   5.284 +	return 0;
   5.285 +}
   5.286 +
   5.287 +
   5.288 +unsigned long __init
   5.289 +acpi_find_rsdp (void)
   5.290 +{
   5.291 +	unsigned long		rsdp_phys = 0;
   5.292 +
   5.293 +	/*
   5.294 +	 * Scan memory looking for the RSDP signature. First search EBDA (low
   5.295 +	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
   5.296 +	 */
   5.297 +	rsdp_phys = acpi_scan_rsdp (0, 0x400);
   5.298 +	if (!rsdp_phys)
   5.299 +		rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF);
   5.300 +
   5.301 +	return rsdp_phys;
   5.302 +}
   5.303 +
   5.304 +
   5.305 +/*
   5.306 + * acpi_boot_init()
   5.307 + *  called from setup_arch(), always.
   5.308 + *	1. maps ACPI tables for later use
   5.309 + *	2. enumerates lapics
   5.310 + *	3. enumerates io-apics
   5.311 + *
   5.312 + * side effects:
   5.313 + * 	acpi_lapic = 1 if LAPIC found
   5.314 + *	acpi_ioapic = 1 if IOAPIC found
   5.315 + *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
   5.316 + *	if acpi_blacklisted() acpi_disabled = 1;
   5.317 + *	acpi_irq_model=...
   5.318 + *	...
   5.319 + *
   5.320 + * return value: (currently ignored)
   5.321 + *	0: success
   5.322 + *	!0: failure
   5.323 + */
   5.324 +int __init
   5.325 +acpi_boot_init (void)
   5.326 +{
   5.327 +	int			result = 0;
   5.328 +
   5.329 +	if (acpi_disabled && !acpi_ht)
   5.330 +		return(1);
   5.331 +
   5.332 +	/*
   5.333 +	 * The default interrupt routing model is PIC (8259).  This gets
   5.334 +	 * overriden if IOAPICs are enumerated (below).
   5.335 +	 */
   5.336 +	acpi_irq_model = ACPI_IRQ_MODEL_PIC;
   5.337 +
   5.338 +	/* 
   5.339 +	 * Initialize the ACPI boot-time table parser.
   5.340 +	 */
   5.341 +	result = acpi_table_init();
   5.342 +	if (result) {
   5.343 +#ifndef XEN
   5.344 +// hack for now, FIXME later
   5.345 +		acpi_disabled = 1;
   5.346 +#endif
   5.347 +		return result;
   5.348 +	}
   5.349 +
   5.350 +	result = acpi_blacklisted();
   5.351 +	if (result) {
   5.352 +		printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
   5.353 +#ifndef XEN
   5.354 +// hack for now, FIXME later
   5.355 +		acpi_disabled = 1;
   5.356 +#endif
   5.357 +		return result;
   5.358 +	}
   5.359 +
   5.360 +#ifdef CONFIG_X86_LOCAL_APIC
   5.361 +
   5.362 +	/* 
   5.363 +	 * MADT
   5.364 +	 * ----
   5.365 +	 * Parse the Multiple APIC Description Table (MADT), if exists.
   5.366 +	 * Note that this table provides platform SMP configuration 
   5.367 +	 * information -- the successor to MPS tables.
   5.368 +	 */
   5.369 +
   5.370 +	result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
   5.371 +	if (!result) {
   5.372 +		return 0;
   5.373 +	}
   5.374 +	else if (result < 0) {
   5.375 +		printk(KERN_ERR PREFIX "Error parsing MADT\n");
   5.376 +		return result;
   5.377 +	}
   5.378 +	else if (result > 1) 
   5.379 +		printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n");
   5.380 +
   5.381 +	/* 
   5.382 +	 * Local APIC
   5.383 +	 * ----------
   5.384 +	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
   5.385 +	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
   5.386 +	 */
   5.387 +
   5.388 +	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
   5.389 +	if (result < 0) {
   5.390 +		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
   5.391 +		return result;
   5.392 +	}
   5.393 +
   5.394 +	mp_register_lapic_address(acpi_lapic_addr);
   5.395 +
   5.396 +	result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic);
   5.397 +	if (!result) { 
   5.398 +		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
   5.399 +		/* TBD: Cleanup to allow fallback to MPS */
   5.400 +		return -ENODEV;
   5.401 +	}
   5.402 +	else if (result < 0) {
   5.403 +		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
   5.404 +		/* TBD: Cleanup to allow fallback to MPS */
   5.405 +		return result;
   5.406 +	}
   5.407 +
   5.408 +	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
   5.409 +	if (result < 0) {
   5.410 +		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   5.411 +		/* TBD: Cleanup to allow fallback to MPS */
   5.412 +		return result;
   5.413 +	}
   5.414 +
   5.415 +	acpi_lapic = 1;
   5.416 +
   5.417 +#endif /*CONFIG_X86_LOCAL_APIC*/
   5.418 +
   5.419 +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   5.420 +
   5.421 +	/* 
   5.422 +	 * I/O APIC 
   5.423 +	 * --------
   5.424 +	 */
   5.425 +
   5.426 +	/*
   5.427 +	 * ACPI interpreter is required to complete interrupt setup,
   5.428 +	 * so if it is off, don't enumerate the io-apics with ACPI.
   5.429 +	 * If MPS is present, it will handle them,
   5.430 +	 * otherwise the system will stay in PIC mode
   5.431 +	 */
   5.432 +	if (acpi_disabled || acpi_noirq) {
   5.433 +		return 1;
   5.434 +	}
   5.435 +
   5.436 +	/*
   5.437 +	 * if "noapic" boot option, don't look for IO-APICs
   5.438 +	 */
   5.439 +	if (ioapic_setup_disabled()) {
   5.440 +		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
   5.441 +			"due to 'noapic' option.\n");
   5.442 +		return 1;
   5.443 +        }
   5.444 +
   5.445 +
   5.446 +	result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
   5.447 +	if (!result) { 
   5.448 +		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
   5.449 +		return -ENODEV;
   5.450 +	}
   5.451 +	else if (result < 0) {
   5.452 +		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
   5.453 +		return result;
   5.454 +	}
   5.455 +
   5.456 +	/* Build a default routing table for legacy (ISA) interrupts. */
   5.457 +	mp_config_acpi_legacy_irqs();
   5.458 +
   5.459 +	result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
   5.460 +	if (result < 0) {
   5.461 +		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
   5.462 +		/* TBD: Cleanup to allow fallback to MPS */
   5.463 +		return result;
   5.464 +	}
   5.465 +
   5.466 +	result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
   5.467 +	if (result < 0) {
   5.468 +		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   5.469 +		/* TBD: Cleanup to allow fallback to MPS */
   5.470 +		return result;
   5.471 +	}
   5.472 +
   5.473 +	acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
   5.474 +
   5.475 +	acpi_irq_balance_set(NULL);
   5.476 +
   5.477 +	acpi_ioapic = 1;
   5.478 +
   5.479 +	if (acpi_lapic && acpi_ioapic)
   5.480 +		smp_found_config = 1;
   5.481 +
   5.482 +#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   5.483 +
   5.484 +	return 0;
   5.485 +}
   5.486 +
   5.487 +#endif /*CONFIG_ACPI_BOOT*/
   5.488 +
   5.489 +#ifdef	CONFIG_ACPI_BUS
   5.490 +/*
   5.491 + * "acpi_pic_sci=level" (current default)
   5.492 + * programs the PIC-mode SCI to Level Trigger.
   5.493 + * (NO-OP if the BIOS set Level Trigger already)
   5.494 + *
   5.495 + * If a PIC-mode SCI is not recogznied or gives spurious IRQ7's
   5.496 + * it may require Edge Trigger -- use "acpi_pic_sci=edge"
   5.497 + * (NO-OP if the BIOS set Edge Trigger already)
   5.498 + *
   5.499 + * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
   5.500 + * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
   5.501 + * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
   5.502 + * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
   5.503 + */
   5.504 +
   5.505 +static __initdata int	acpi_pic_sci_trigger;	/* 0: level, 1: edge */
   5.506 +
   5.507 +void __init
   5.508 +acpi_pic_sci_set_trigger(unsigned int irq)
   5.509 +{
   5.510 +	unsigned char mask = 1 << (irq & 7);
   5.511 +	unsigned int port = 0x4d0 + (irq >> 3);
   5.512 +	unsigned char val = inb(port);
   5.513 +
   5.514 +	
   5.515 +	printk(PREFIX "IRQ%d SCI:", irq);
   5.516 +	if (!(val & mask)) {
   5.517 +		printk(" Edge");
   5.518 +
   5.519 +		if (!acpi_pic_sci_trigger) {
   5.520 +			printk(" set to Level");
   5.521 +			outb(val | mask, port);
   5.522 +		}
   5.523 +	} else {
   5.524 +		printk(" Level");
   5.525 +
   5.526 +		if (acpi_pic_sci_trigger) {
   5.527 +			printk(" set to Edge");
   5.528 +			outb(val | mask, port);
   5.529 +		}
   5.530 +	}
   5.531 +	printk(" Trigger.\n");
   5.532 +}
   5.533 +
   5.534 +int __init
   5.535 +acpi_pic_sci_setup(char *str)
   5.536 +{
   5.537 +	while (str && *str) {
   5.538 +		if (strncmp(str, "level", 5) == 0)
   5.539 +			acpi_pic_sci_trigger = 0;	/* force level trigger */
   5.540 +		if (strncmp(str, "edge", 4) == 0)
   5.541 +			acpi_pic_sci_trigger = 1;	/* force edge trigger */
   5.542 +		str = strchr(str, ',');
   5.543 +		if (str)
   5.544 +			str += strspn(str, ", \t");
   5.545 +	}
   5.546 +	return 1;
   5.547 +}
   5.548 +
   5.549 +__setup("acpi_pic_sci=", acpi_pic_sci_setup);
   5.550 +
   5.551 +#endif /* CONFIG_ACPI_BUS */
   5.552 +
   5.553 +
   5.554 +
   5.555 +/* --------------------------------------------------------------------------
   5.556 +                              Low-Level Sleep Support
   5.557 +   -------------------------------------------------------------------------- */
   5.558 +
   5.559 +#ifdef CONFIG_ACPI_SLEEP
   5.560 +
   5.561 +#define DEBUG
   5.562 +
   5.563 +#ifdef DEBUG
   5.564 +#include <xen/serial.h>
   5.565 +#endif
   5.566 +
   5.567 +/* address in low memory of the wakeup routine. */
   5.568 +unsigned long acpi_wakeup_address = 0;
   5.569 +
   5.570 +/* new page directory that we will be using */
   5.571 +static pmd_t *pmd;
   5.572 +
   5.573 +/* saved page directory */
   5.574 +static pmd_t saved_pmd;
   5.575 +
   5.576 +/* page which we'll use for the new page directory */
   5.577 +static pte_t *ptep;
   5.578 +
   5.579 +extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
   5.580 +
   5.581 +/*
   5.582 + * acpi_create_identity_pmd
   5.583 + *
   5.584 + * Create a new, identity mapped pmd.
   5.585 + *
   5.586 + * Do this by creating new page directory, and marking all the pages as R/W
   5.587 + * Then set it as the new Page Middle Directory.
   5.588 + * And, of course, flush the TLB so it takes effect.
   5.589 + *
   5.590 + * We save the address of the old one, for later restoration.
   5.591 + */
   5.592 +static void acpi_create_identity_pmd (void)
   5.593 +{
   5.594 +	pgd_t *pgd;
   5.595 +	int i;
   5.596 +
   5.597 +	ptep = (pte_t*)__get_free_page(GFP_KERNEL);
   5.598 +
   5.599 +	/* fill page with low mapping */
   5.600 +	for (i = 0; i < PTRS_PER_PTE; i++)
   5.601 +		set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
   5.602 +
   5.603 +	pgd = pgd_offset(current->active_mm, 0);
   5.604 +	pmd = pmd_alloc(current->mm,pgd, 0);
   5.605 +
   5.606 +	/* save the old pmd */
   5.607 +	saved_pmd = *pmd;
   5.608 +
   5.609 +	/* set the new one */
   5.610 +	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep)));
   5.611 +
   5.612 +	/* flush the TLB */
   5.613 +	local_flush_tlb();
   5.614 +}
   5.615 +
   5.616 +/*
   5.617 + * acpi_restore_pmd
   5.618 + *
   5.619 + * Restore the old pmd saved by acpi_create_identity_pmd and
   5.620 + * free the page that said function alloc'd
   5.621 + */
   5.622 +static void acpi_restore_pmd (void)
   5.623 +{
   5.624 +	set_pmd(pmd, saved_pmd);
   5.625 +	local_flush_tlb();
   5.626 +	free_page((unsigned long)ptep);
   5.627 +}
   5.628 +
   5.629 +/**
   5.630 + * acpi_save_state_mem - save kernel state
   5.631 + *
   5.632 + * Create an identity mapped page table and copy the wakeup routine to
   5.633 + * low memory.
   5.634 + */
   5.635 +int acpi_save_state_mem (void)
   5.636 +{
   5.637 +	acpi_create_identity_pmd();
   5.638 +	acpi_copy_wakeup_routine(acpi_wakeup_address);
   5.639 +
   5.640 +	return 0;
   5.641 +}
   5.642 +
   5.643 +/**
   5.644 + * acpi_save_state_disk - save kernel state to disk
   5.645 + *
   5.646 + */
   5.647 +int acpi_save_state_disk (void)
   5.648 +{
   5.649 +	return 1;
   5.650 +}
   5.651 +
   5.652 +/*
   5.653 + * acpi_restore_state
   5.654 + */
   5.655 +void acpi_restore_state_mem (void)
   5.656 +{
   5.657 +	acpi_restore_pmd();
   5.658 +}
   5.659 +
   5.660 +/**
   5.661 + * acpi_reserve_bootmem - do _very_ early ACPI initialisation
   5.662 + *
   5.663 + * We allocate a page in low memory for the wakeup
   5.664 + * routine for when we come back from a sleep state. The
   5.665 + * runtime allocator allows specification of <16M pages, but not
   5.666 + * <1M pages.
   5.667 + */
   5.668 +void __init acpi_reserve_bootmem(void)
   5.669 +{
   5.670 +	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
   5.671 +	printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
   5.672 +}
   5.673 +
   5.674 +void do_suspend_lowlevel_s4bios(int resume)
   5.675 +{
   5.676 +	if (!resume) {
   5.677 +		save_processor_context();
   5.678 +		acpi_save_register_state((unsigned long)&&acpi_sleep_done);
   5.679 +		acpi_enter_sleep_state_s4bios();
   5.680 +		return;
   5.681 +	}
   5.682 +acpi_sleep_done:
   5.683 +	restore_processor_context();
   5.684 +}
   5.685 +
   5.686 +
   5.687 +#endif /*CONFIG_ACPI_SLEEP*/
   5.688 +
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/asm-offsets.c	Mon Feb 14 12:25:30 2005 +0000
     6.3 @@ -0,0 +1,224 @@
     6.4 +/*
     6.5 + * Generate definitions needed by assembly language modules.
     6.6 + * This code generates raw asm output which is post-processed
     6.7 + * to extract and format the required data.
     6.8 + */
     6.9 +
    6.10 +#include <xen/config.h>
    6.11 +#include <xen/sched.h>
    6.12 +#include <asm/processor.h>
    6.13 +#include <asm/ptrace.h>
    6.14 +
    6.15 +#define task_struct exec_domain
    6.16 +
    6.17 +#define DEFINE(sym, val) \
    6.18 +        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
    6.19 +
    6.20 +#define BLANK() asm volatile("\n->" : : )
    6.21 +
    6.22 +#define OFFSET(_sym, _str, _mem) \
    6.23 +    DEFINE(_sym, offsetof(_str, _mem));
    6.24 +
    6.25 +void foo(void)
    6.26 +{
    6.27 +	DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
    6.28 +	DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
    6.29 +	DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
    6.30 +	DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
    6.31 +	//DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo));
    6.32 +	DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
    6.33 +	//DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
    6.34 +	DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
    6.35 +
    6.36 +	BLANK();
    6.37 +
    6.38 +	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
    6.39 +	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
    6.40 +
    6.41 +	BLANK();
    6.42 +
    6.43 +	//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
    6.44 +	//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
    6.45 +	//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
    6.46 +	//DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
    6.47 +	//DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
    6.48 +	//DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
    6.49 +	//DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
    6.50 +	//DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
    6.51 +	//DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
    6.52 +	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
    6.53 +	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
    6.54 +
    6.55 +	BLANK();
    6.56 +
    6.57 +	//DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));
    6.58 +
    6.59 +	BLANK();
    6.60 +
    6.61 +	//DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
    6.62 +							     //group_stop_count));
    6.63 +	//DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));
    6.64 +
    6.65 +	BLANK();
    6.66 +
    6.67 +	DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
    6.68 +	DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
    6.69 +	DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
    6.70 +	DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
    6.71 +	DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
    6.72 +	DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
    6.73 +	DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
    6.74 +	DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
    6.75 +	DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
    6.76 +	DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
    6.77 +	DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
    6.78 +	DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
    6.79 +	DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
    6.80 +	DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
    6.81 +	DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));
    6.82 +
    6.83 +	DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
    6.84 +	DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
    6.85 +	DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
    6.86 +	DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
    6.87 +	DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
    6.88 +	DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
    6.89 +	DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
    6.90 +	DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
    6.91 +	DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
    6.92 +	DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
    6.93 +	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
    6.94 +	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
    6.95 +	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
    6.96 +	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
    6.97 +	DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
    6.98 +	DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
    6.99 +	DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
   6.100 +	DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
   6.101 +	DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
   6.102 +	DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
   6.103 +	DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
   6.104 +	DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
   6.105 +	DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
   6.106 +	DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
   6.107 +	DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
   6.108 +	DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
   6.109 +	DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
   6.110 +	DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
   6.111 +	DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
   6.112 +	DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
   6.113 +	DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
   6.114 +	DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
   6.115 +	DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
   6.116 +	DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
   6.117 +	DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
   6.118 +
   6.119 +	BLANK();
   6.120 +
   6.121 +	DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
   6.122 +	DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
   6.123 +	DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
   6.124 +	DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
   6.125 +	DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
   6.126 +	DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
   6.127 +	DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
   6.128 +	DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
   6.129 +	DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
   6.130 +	DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
   6.131 +	DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
   6.132 +	DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
   6.133 +	DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
   6.134 +	DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
   6.135 +	DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
   6.136 +	DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
   6.137 +	DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
   6.138 +	DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
   6.139 +	DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
   6.140 +	DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
   6.141 +	DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
   6.142 +	DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
   6.143 +	DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
   6.144 +	DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
   6.145 +	DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
   6.146 +	DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
   6.147 +	DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
   6.148 +	DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
   6.149 +	DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
   6.150 +	DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
   6.151 +	DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
   6.152 +	DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
   6.153 +	DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
   6.154 +	DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
   6.155 +	DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
   6.156 +	DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
   6.157 +	DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
   6.158 +	DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
   6.159 +	DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
   6.160 +	DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
   6.161 +	DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
   6.162 +	DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));
   6.163 +
   6.164 +	BLANK();
   6.165 +
   6.166 +	//DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
   6.167 +	//DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
   6.168 +	//DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
   6.169 +	//DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat));
   6.170 +	//DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat));
   6.171 +	//DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0]));
   6.172 +	//DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm));
   6.173 +	//DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags));
   6.174 +	//DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6]));
   6.175 +	//DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr));
   6.176 +	//DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12]));
   6.177 +	//DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base));
   6.178 +	//DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs));
   6.179 +
   6.180 +	//BLANK();
   6.181 +
   6.182 +	//DEFINE(IA64_SIGPENDING_SIGNAL_OFFSET, offsetof (struct sigpending, signal));
   6.183 +
   6.184 +	//BLANK();
   6.185 +
   6.186 +	//DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0));
   6.187 +	//DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1));
   6.188 +	//DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2));
   6.189 +	//DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler));
   6.190 +	//DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc));
   6.191 +	//BLANK();
   6.192 +    /* for assembly files which can't include sched.h: */
   6.193 +	//DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
   6.194 +	//DEFINE(IA64_CLONE_VM, CLONE_VM);
   6.195 +
   6.196 +	BLANK();
   6.197 +	DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
   6.198 +	DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
   6.199 +
   6.200 +
   6.201 +	DEFINE(CLONE_IDLETASK_BIT, 12);
   6.202 +	DEFINE(CLONE_SETTLS_BIT, 19);
   6.203 +//#if CLONE_SETTLS != (1<<19)
   6.204 +//# error "CLONE_SETTLS_BIT incorrect, please fix"
   6.205 +//#endif
   6.206 +
   6.207 +	//BLANK();
   6.208 +	//DEFINE(IA64_MCA_TLB_INFO_SIZE, sizeof (struct ia64_mca_tlb_info));
   6.209 +	/* used by head.S */
   6.210 +	DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
   6.211 +
   6.212 +	BLANK();
   6.213 +	/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
   6.214 +	//DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
   6.215 +	//DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
   6.216 +	//DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift));
   6.217 +	//DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc));
   6.218 +	//DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset));
   6.219 +	//DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle));
   6.220 +	//DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter));
   6.221 +	//DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter));
   6.222 +	//DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask));
   6.223 +	//DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU);
   6.224 +	//DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
   6.225 +	//DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
   6.226 +	//DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
   6.227 +}
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/ia64/dom0_ops.c	Mon Feb 14 12:25:30 2005 +0000
     7.3 @@ -0,0 +1,57 @@
     7.4 +/******************************************************************************
     7.5 + * Arch-specific dom0_ops.c
     7.6 + * 
     7.7 + * Process command requests from domain-0 guest OS.
     7.8 + * 
     7.9 + * Copyright (c) 2002, K A Fraser
    7.10 + */
    7.11 +
    7.12 +#include <xen/config.h>
    7.13 +#include <xen/types.h>
    7.14 +#include <xen/lib.h>
    7.15 +#include <xen/mm.h>
    7.16 +#include <public/dom0_ops.h>
    7.17 +#include <xen/sched.h>
    7.18 +#include <xen/event.h>
    7.19 +#include <asm/domain_page.h>
    7.20 +//#include <asm/msr.h>
    7.21 +#include <asm/pdb.h>
    7.22 +#include <xen/trace.h>
    7.23 +#include <xen/console.h>
    7.24 +//#include <xen/shadow.h>
    7.25 +#include <public/sched_ctl.h>
    7.26 +
    7.27 +#define TRC_DOM0OP_ENTER_BASE  0x00020000
    7.28 +#define TRC_DOM0OP_LEAVE_BASE  0x00030000
    7.29 +
    7.30 +extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
    7.31 +
    7.32 +static int msr_cpu_mask;
    7.33 +static unsigned long msr_addr;
    7.34 +static unsigned long msr_lo;
    7.35 +static unsigned long msr_hi;
    7.36 +
    7.37 +long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
    7.38 +{
    7.39 +    long ret = 0;
    7.40 +
    7.41 +    if ( !IS_PRIV(current->domain) )
    7.42 +        return -EPERM;
    7.43 +
    7.44 +    switch ( op->cmd )
    7.45 +    {
    7.46 +
    7.47 +    default:
    7.48 +        ret = -ENOSYS;
    7.49 +
    7.50 +    }
    7.51 +
    7.52 +    return ret;
    7.53 +}
    7.54 +
    7.55 +void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
    7.56 +{ 
    7.57 +    int i;
    7.58 +
    7.59 +	dummy();
    7.60 +}
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/ia64/dom_fw.c	Mon Feb 14 12:25:30 2005 +0000
     8.3 @@ -0,0 +1,511 @@
     8.4 +/*
     8.5 + *  Xen domain firmware emulation support
     8.6 + *  Copyright (C) 2004 Hewlett-Packard Co.
     8.7 + *       Dan Magenheimer (dan.magenheimer@hp.com)
     8.8 + *
     8.9 + */
    8.10 +
    8.11 +#include <xen/config.h>
    8.12 +#include <asm/system.h>
    8.13 +#include <asm/pgalloc.h>
    8.14 +
    8.15 +#ifdef CONFIG_PCI
    8.16 +# include <linux/pci.h>
    8.17 +#endif
    8.18 +
    8.19 +#include <linux/efi.h>
    8.20 +#include <asm/io.h>
    8.21 +#include <asm/pal.h>
    8.22 +#include <asm/sal.h>
    8.23 +
    8.24 +#include <asm/dom_fw.h>
    8.25 +
    8.26 +struct ia64_boot_param *dom_fw_init(struct domain *, char *,int,char *,int);
    8.27 +extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
    8.28 +extern struct domain *dom0;
    8.29 +extern unsigned long dom0_start;
    8.30 +
    8.31 +extern unsigned long running_on_sim;
    8.32 +
    8.33 +
    8.34 +unsigned long dom_fw_base_mpa = -1;
    8.35 +unsigned long imva_fw_base = -1;
    8.36 +
    8.37 +// return domain (meta)physical address for a given imva
    8.38 +// this function is a call-back from dom_fw_init
    8.39 +unsigned long dom_pa(unsigned long imva)
    8.40 +{
    8.41 +	if (dom_fw_base_mpa == -1 || imva_fw_base == -1) {
    8.42 +		printf("dom_pa: uninitialized! (spinning...)\n");
    8.43 +		while(1);
    8.44 +	}
    8.45 +	if (imva - imva_fw_base > PAGE_SIZE) {
    8.46 +		printf("dom_pa: bad offset! imva=%p, imva_fw_base=%p (spinning...)\n",imva,imva_fw_base);
    8.47 +		while(1);
    8.48 +	}
    8.49 +	return dom_fw_base_mpa + (imva - imva_fw_base);
    8.50 +}
    8.51 +
    8.52 +// builds a hypercall bundle at domain physical address
    8.53 +void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall)
    8.54 +{
    8.55 +	unsigned long imva;
    8.56 +
    8.57 +	if (d == dom0) paddr += dom0_start;
    8.58 +	imva = domain_mpa_to_imva(d,paddr);
    8.59 +	build_hypercall_bundle(imva,d->breakimm,hypercall,1);
    8.60 +}
    8.61 +
    8.62 +
    8.63 +// builds a hypercall bundle at domain physical address
    8.64 +void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall,unsigned long ret)
    8.65 +{
    8.66 +	unsigned long imva;
    8.67 +
    8.68 +	if (d == dom0) paddr += dom0_start;
    8.69 +	imva = domain_mpa_to_imva(d,paddr);
    8.70 +	build_hypercall_bundle(imva,d->breakimm,hypercall,ret);
    8.71 +}
    8.72 +
    8.73 +
    8.74 +// FIXME: This is really a hack: Forcing the boot parameter block
    8.75 +// at domain mpaddr 0 page, then grabbing only the low bits of the
    8.76 +// Xen imva, which is the offset into the page
    8.77 +unsigned long dom_fw_setup(struct domain *d, char *args, int arglen)
    8.78 +{
    8.79 +	struct ia64_boot_param *bp;
    8.80 +
    8.81 +	dom_fw_base_mpa = 0;
    8.82 +	if (d == dom0) dom_fw_base_mpa += dom0_start;
    8.83 +	imva_fw_base = domain_mpa_to_imva(d,dom_fw_base_mpa);
    8.84 +	bp = dom_fw_init(d,args,arglen,imva_fw_base,PAGE_SIZE);
    8.85 +	return dom_pa((unsigned long)bp);
    8.86 +}
    8.87 +
    8.88 +
    8.89 +/* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
    8.90 +
    8.91 +#define MB	(1024*1024UL)
    8.92 +
    8.93 +#define NUM_EFI_SYS_TABLES 6
    8.94 +#define PASS_THRU_IOPORT_SPACE
    8.95 +#ifdef PASS_THRU_IOPORT_SPACE
    8.96 +# define NUM_MEM_DESCS	4
    8.97 +#else
    8.98 +# define NUM_MEM_DESCS	3
    8.99 +#endif
   8.100 +
   8.101 +
   8.102 +#define SECS_PER_HOUR   (60 * 60)
   8.103 +#define SECS_PER_DAY    (SECS_PER_HOUR * 24)
   8.104 +
   8.105 +/* Compute the `struct tm' representation of *T,
   8.106 +   offset OFFSET seconds east of UTC,
   8.107 +   and store year, yday, mon, mday, wday, hour, min, sec into *TP.
   8.108 +   Return nonzero if successful.  */
   8.109 +int
   8.110 +offtime (unsigned long t, efi_time_t *tp)
   8.111 +{
   8.112 +	const unsigned short int __mon_yday[2][13] =
   8.113 +	{
   8.114 +		/* Normal years.  */
   8.115 +		{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
   8.116 +		/* Leap years.  */
   8.117 +		{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
   8.118 +	};
   8.119 +	long int days, rem, y;
   8.120 +	const unsigned short int *ip;
   8.121 +
   8.122 +	days = t / SECS_PER_DAY;
   8.123 +	rem = t % SECS_PER_DAY;
   8.124 +	while (rem < 0) {
   8.125 +		rem += SECS_PER_DAY;
   8.126 +		--days;
   8.127 +	}
   8.128 +	while (rem >= SECS_PER_DAY) {
   8.129 +		rem -= SECS_PER_DAY;
   8.130 +		++days;
   8.131 +	}
   8.132 +	tp->hour = rem / SECS_PER_HOUR;
   8.133 +	rem %= SECS_PER_HOUR;
   8.134 +	tp->minute = rem / 60;
   8.135 +	tp->second = rem % 60;
   8.136 +	/* January 1, 1970 was a Thursday.  */
   8.137 +	y = 1970;
   8.138 +
   8.139 +#	define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
   8.140 +#	define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
   8.141 +#	define __isleap(year) \
   8.142 +	  ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
   8.143 +
   8.144 +	while (days < 0 || days >= (__isleap (y) ? 366 : 365)) {
   8.145 +		/* Guess a corrected year, assuming 365 days per year.  */
   8.146 +		long int yg = y + days / 365 - (days % 365 < 0);
   8.147 +
   8.148 +		/* Adjust DAYS and Y to match the guessed year.  */
   8.149 +		days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1)
   8.150 +			 - LEAPS_THRU_END_OF (y - 1));
   8.151 +		y = yg;
   8.152 +	}
   8.153 +	tp->year = y;
   8.154 +	ip = __mon_yday[__isleap(y)];
   8.155 +	for (y = 11; days < (long int) ip[y]; --y)
   8.156 +		continue;
   8.157 +	days -= ip[y];
   8.158 +	tp->month = y + 1;
   8.159 +	tp->day = days + 1;
   8.160 +	return 1;
   8.161 +}
   8.162 +
   8.163 +extern void pal_emulator_static (void);
   8.164 +
   8.165 +/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
   8.166 +
   8.167 +#define BUILD_CMD(addr)		((0x80000000 | (addr)) & ~3)
   8.168 +
   8.169 +#define REG_OFFSET(addr)	(0x00000000000000FF & (addr))
   8.170 +#define DEVICE_FUNCTION(addr)	(0x000000000000FF00 & (addr))
   8.171 +#define BUS_NUMBER(addr)	(0x0000000000FF0000 & (addr))
   8.172 +
   8.173 +#ifndef XEN
   8.174 +static efi_status_t
   8.175 +fw_efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
   8.176 +{
   8.177 +#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
   8.178 +	struct {
   8.179 +		int tv_sec;	/* must be 32bits to work */
   8.180 +		int tv_usec;
   8.181 +	} tv32bits;
   8.182 +
   8.183 +	ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD);
   8.184 +
   8.185 +	memset(tm, 0, sizeof(*tm));
   8.186 +	offtime(tv32bits.tv_sec, tm);
   8.187 +
   8.188 +	if (tc)
   8.189 +		memset(tc, 0, sizeof(*tc));
   8.190 +#else
   8.191 +#	error Not implemented yet...
   8.192 +#endif
   8.193 +	return EFI_SUCCESS;
   8.194 +}
   8.195 +
   8.196 +static void
   8.197 +efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data)
   8.198 +{
   8.199 +#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
   8.200 +	ssc(status, 0, 0, 0, SSC_EXIT);
   8.201 +#else
   8.202 +#	error Not implemented yet...
   8.203 +#endif
   8.204 +}
   8.205 +
   8.206 +static efi_status_t
   8.207 +efi_unimplemented (void)
   8.208 +{
   8.209 +	return EFI_UNSUPPORTED;
   8.210 +}
   8.211 +#endif /* !XEN */
   8.212 +
   8.213 +struct sal_ret_values
   8.214 +sal_emulator (long index, unsigned long in1, unsigned long in2,
   8.215 +	      unsigned long in3, unsigned long in4, unsigned long in5,
   8.216 +	      unsigned long in6, unsigned long in7)
   8.217 +{
   8.218 +	long r9  = 0;
   8.219 +	long r10 = 0;
   8.220 +	long r11 = 0;
   8.221 +	long status;
   8.222 +
   8.223 +	/*
   8.224 +	 * Don't do a "switch" here since that gives us code that
   8.225 +	 * isn't self-relocatable.
   8.226 +	 */
   8.227 +	status = 0;
   8.228 +	if (index == SAL_FREQ_BASE) {
   8.229 +		switch (in1) {
   8.230 +		      case SAL_FREQ_BASE_PLATFORM:
   8.231 +			r9 = 200000000;
   8.232 +			break;
   8.233 +
   8.234 +		      case SAL_FREQ_BASE_INTERVAL_TIMER:
   8.235 +			/*
   8.236 +			 * Is this supposed to be the cr.itc frequency
   8.237 +			 * or something platform specific?  The SAL
   8.238 +			 * doc ain't exactly clear on this...
   8.239 +			 */
   8.240 +			r9 = 700000000;
   8.241 +			break;
   8.242 +
   8.243 +		      case SAL_FREQ_BASE_REALTIME_CLOCK:
   8.244 +			r9 = 1;
   8.245 +			break;
   8.246 +
   8.247 +		      default:
   8.248 +			status = -1;
   8.249 +			break;
   8.250 +		}
   8.251 +	} else if (index == SAL_PCI_CONFIG_READ) {
   8.252 +		if (current->domain == dom0) {
   8.253 +			u64 value;
   8.254 +			// note that args 2&3 are swapped!!
   8.255 +			status = ia64_sal_pci_config_read(in1,in3,in2,&value);
   8.256 +			r9 = value;
   8.257 +		}
   8.258 +		else printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
   8.259 +	} else if (index == SAL_PCI_CONFIG_WRITE) {
   8.260 +		if (current->domain == dom0) {
   8.261 +			if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
   8.262 +			    (in4 > 1) ||
   8.263 +			    (in2 > 8) || (in2 & (in2-1)))
   8.264 +			    	printf("*** SAL_PCI_CONF_WRITE?!?(adr=%p,typ=%p,sz=%p,val=%p)\n",in1,in4,in2,in3);
   8.265 +			// note that args are in a different order!!
   8.266 +			status = ia64_sal_pci_config_write(in1,in4,in2,in3);
   8.267 +		}
   8.268 +		else printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
   8.269 +	} else if (index == SAL_SET_VECTORS) {
   8.270 +		printf("*** CALLED SAL_SET_VECTORS.  IGNORED...\n");
   8.271 +	} else if (index == SAL_GET_STATE_INFO) {
   8.272 +		printf("*** CALLED SAL_GET_STATE_INFO.  IGNORED...\n");
   8.273 +	} else if (index == SAL_GET_STATE_INFO_SIZE) {
   8.274 +		printf("*** CALLED SAL_GET_STATE_INFO_SIZE.  IGNORED...\n");
   8.275 +	} else if (index == SAL_CLEAR_STATE_INFO) {
   8.276 +		printf("*** CALLED SAL_CLEAR_STATE_INFO.  IGNORED...\n");
   8.277 +	} else if (index == SAL_MC_RENDEZ) {
   8.278 +		printf("*** CALLED SAL_MC_RENDEZ.  IGNORED...\n");
   8.279 +	} else if (index == SAL_MC_SET_PARAMS) {
   8.280 +		printf("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
   8.281 +	} else if (index == SAL_CACHE_FLUSH) {
   8.282 +		printf("*** CALLED SAL_CACHE_FLUSH.  IGNORED...\n");
   8.283 +	} else if (index == SAL_CACHE_INIT) {
   8.284 +		printf("*** CALLED SAL_CACHE_INIT.  IGNORED...\n");
   8.285 +	} else if (index == SAL_UPDATE_PAL) {
   8.286 +		printf("*** CALLED SAL_UPDATE_PAL.  IGNORED...\n");
   8.287 +	} else {
   8.288 +		printf("*** CALLED SAL_ WITH UNKNOWN INDEX.  IGNORED...\n");
   8.289 +		status = -1;
   8.290 +	}
   8.291 +	return ((struct sal_ret_values) {status, r9, r10, r11});
   8.292 +}
   8.293 +
   8.294 +
   8.295 +#define NFUNCPTRS 20
   8.296 +
   8.297 +void print_md(efi_memory_desc_t *md)
   8.298 +{
   8.299 +#if 1
   8.300 +	printk("domain mem: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
   8.301 +		md->type, md->attribute, md->phys_addr,
   8.302 +		md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
   8.303 +		md->num_pages >> (20 - EFI_PAGE_SHIFT));
   8.304 +#endif
   8.305 +}
   8.306 +
   8.307 +struct ia64_boot_param *
   8.308 +dom_fw_init (struct domain *d, char *args, int arglen, char *fw_mem, int fw_mem_size)
   8.309 +{
   8.310 +	efi_system_table_t *efi_systab;
   8.311 +	efi_runtime_services_t *efi_runtime;
   8.312 +	efi_config_table_t *efi_tables;
   8.313 +	struct ia64_sal_systab *sal_systab;
   8.314 +	efi_memory_desc_t *efi_memmap, *md;
   8.315 +	unsigned long *pal_desc, *sal_desc;
   8.316 +	struct ia64_sal_desc_entry_point *sal_ed;
   8.317 +	struct ia64_boot_param *bp;
   8.318 +	unsigned long *pfn;
   8.319 +	unsigned char checksum = 0;
   8.320 +	char *cp, *cmd_line, *fw_vendor;
   8.321 +	int i = 0;
   8.322 +	unsigned long maxmem = d->max_pages * PAGE_SIZE;
   8.323 +	unsigned long start_mpaddr = ((d==dom0)?dom0_start:0);
   8.324 +
   8.325 +#	define MAKE_MD(typ, attr, start, end)		\
   8.326 +	do {						\
   8.327 +		md = efi_memmap + i++;			\
   8.328 +		md->type = typ;				\
   8.329 +		md->pad = 0;				\
   8.330 +		md->phys_addr = start_mpaddr + start;	\
   8.331 +		md->virt_addr = 0;			\
   8.332 +		md->num_pages = (end - start) >> 12;	\
   8.333 +		md->attribute = attr;			\
   8.334 +		print_md(md);				\
   8.335 +	} while (0)
   8.336 +
   8.337 +/* FIXME: should check size but for now we have a whole MB to play with.
   8.338 +   And if stealing code from fw-emu.c, watch out for new fw_vendor on the end!
   8.339 +	if (fw_mem_size < sizeof(fw_mem_proto)) {
   8.340 +		printf("sys_fw_init: insufficient space for fw_mem\n");
   8.341 +		return 0;
   8.342 +	}
   8.343 +*/
   8.344 +	memset(fw_mem, 0, fw_mem_size);
   8.345 +
   8.346 +#ifdef XEN
   8.347 +#else
   8.348 +	pal_desc = (unsigned long *) &pal_emulator_static;
   8.349 +	sal_desc = (unsigned long *) &sal_emulator;
   8.350 +#endif
   8.351 +
   8.352 +	cp = fw_mem;
   8.353 +	efi_systab  = (void *) cp; cp += sizeof(*efi_systab);
   8.354 +	efi_runtime = (void *) cp; cp += sizeof(*efi_runtime);
   8.355 +	efi_tables  = (void *) cp; cp += NUM_EFI_SYS_TABLES * sizeof(*efi_tables);
   8.356 +	sal_systab  = (void *) cp; cp += sizeof(*sal_systab);
   8.357 +	sal_ed      = (void *) cp; cp += sizeof(*sal_ed);
   8.358 +	efi_memmap  = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
   8.359 +	bp	    = (void *) cp; cp += sizeof(*bp);
   8.360 +	pfn        = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
   8.361 +	cmd_line    = (void *) cp;
   8.362 +
   8.363 +	if (args) {
   8.364 +		if (arglen >= 1024)
   8.365 +			arglen = 1023;
   8.366 +		memcpy(cmd_line, args, arglen);
   8.367 +	} else {
   8.368 +		arglen = 0;
   8.369 +	}
   8.370 +	cmd_line[arglen] = '\0';
   8.371 +
   8.372 +	memset(efi_systab, 0, sizeof(efi_systab));
   8.373 +	efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
   8.374 +	efi_systab->hdr.revision  = EFI_SYSTEM_TABLE_REVISION;
   8.375 +	efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
   8.376 +	cp = fw_vendor = &cmd_line[arglen] + (2-(arglen&1)); // round to 16-bit boundary
   8.377 +#define FW_VENDOR "X\0e\0n\0/\0i\0a\0\066\0\064\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
   8.378 +	cp += sizeof(FW_VENDOR) + (8-((unsigned long)cp & 7)); // round to 64-bit boundary
   8.379 +
   8.380 +	memcpy(fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
   8.381 +	efi_systab->fw_vendor = dom_pa(fw_vendor);
   8.382 +	
   8.383 +	efi_systab->fw_revision = 1;
   8.384 +	efi_systab->runtime = (void *) dom_pa(efi_runtime);
   8.385 +	efi_systab->nr_tables = NUM_EFI_SYS_TABLES;
   8.386 +	efi_systab->tables = dom_pa(efi_tables);
   8.387 +
   8.388 +	efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
   8.389 +	efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
   8.390 +	efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
   8.391 +#define EFI_HYPERCALL_PATCH(tgt,call) do { \
   8.392 +    dom_efi_hypercall_patch(d,FW_HYPERCALL_##call##_PADDR,FW_HYPERCALL_##call); \
   8.393 +    tgt = dom_pa(pfn); \
   8.394 +    *pfn++ = FW_HYPERCALL_##call##_PADDR + ((d==dom0)?dom0_start:0); \
   8.395 +    *pfn++ = 0; \
   8.396 +    } while (0)
   8.397 +
   8.398 +	EFI_HYPERCALL_PATCH(efi_runtime->get_time,EFI_GET_TIME);
   8.399 +	EFI_HYPERCALL_PATCH(efi_runtime->set_time,EFI_SET_TIME);
   8.400 +	EFI_HYPERCALL_PATCH(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
   8.401 +	EFI_HYPERCALL_PATCH(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
   8.402 +	EFI_HYPERCALL_PATCH(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
   8.403 +	EFI_HYPERCALL_PATCH(efi_runtime->get_variable,EFI_GET_VARIABLE);
   8.404 +	EFI_HYPERCALL_PATCH(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
   8.405 +	EFI_HYPERCALL_PATCH(efi_runtime->set_variable,EFI_SET_VARIABLE);
   8.406 +	EFI_HYPERCALL_PATCH(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
   8.407 +	EFI_HYPERCALL_PATCH(efi_runtime->reset_system,EFI_RESET_SYSTEM);
   8.408 +
   8.409 +	efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
   8.410 +	efi_tables[0].table = dom_pa(sal_systab);
   8.411 +	for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
   8.412 +		efi_tables[i].guid = NULL_GUID;
   8.413 +		efi_tables[i].table = 0;
   8.414 +	}
   8.415 +	if (d == dom0) {
   8.416 +		printf("Domain0 EFI passthrough:");
   8.417 +		i = 1;
   8.418 +		if (efi.mps) {
   8.419 +			efi_tables[i].guid = MPS_TABLE_GUID;
   8.420 +			efi_tables[i].table = __pa(efi.mps);
   8.421 +			printf(" MPS=%0xlx",efi_tables[i].table);
   8.422 +			i++;
   8.423 +		}
   8.424 +		if (efi.acpi20) {
   8.425 +			efi_tables[i].guid = ACPI_20_TABLE_GUID;
   8.426 +			efi_tables[i].table = __pa(efi.acpi20);
   8.427 +			printf(" ACPI 2.0=%0xlx",efi_tables[i].table);
   8.428 +			i++;
   8.429 +		}
   8.430 +		if (efi.acpi) {
   8.431 +			efi_tables[i].guid = ACPI_TABLE_GUID;
   8.432 +			efi_tables[i].table = __pa(efi.acpi);
   8.433 +			printf(" ACPI=%0xlx",efi_tables[i].table);
   8.434 +			i++;
   8.435 +		}
   8.436 +		if (efi.smbios) {
   8.437 +			efi_tables[i].guid = SMBIOS_TABLE_GUID;
   8.438 +			efi_tables[i].table = __pa(efi.smbios);
   8.439 +			printf(" SMBIOS=%0xlx",efi_tables[i].table);
   8.440 +			i++;
   8.441 +		}
   8.442 +		if (efi.hcdp) {
   8.443 +			efi_tables[i].guid = HCDP_TABLE_GUID;
   8.444 +			efi_tables[i].table = __pa(efi.hcdp);
   8.445 +			printf(" HCDP=%0xlx",efi_tables[i].table);
   8.446 +			i++;
   8.447 +		}
   8.448 +		printf("\n");
   8.449 +	}
   8.450 +
   8.451 +	/* fill in the SAL system table: */
   8.452 +	memcpy(sal_systab->signature, "SST_", 4);
   8.453 +	sal_systab->size = sizeof(*sal_systab);
   8.454 +	sal_systab->sal_rev_minor = 1;
   8.455 +	sal_systab->sal_rev_major = 0;
   8.456 +	sal_systab->entry_count = 1;
   8.457 +
   8.458 +	strcpy(sal_systab->oem_id, "Xen/ia64");
   8.459 +	strcpy(sal_systab->product_id, "Xen/ia64");
   8.460 +
   8.461 +	/* fill in an entry point: */
   8.462 +	sal_ed->type = SAL_DESC_ENTRY_POINT;
   8.463 +#define FW_HYPERCALL_PATCH(tgt,call,ret) do { \
   8.464 +    dom_fw_hypercall_patch(d,FW_HYPERCALL_##call##_PADDR,FW_HYPERCALL_##call,ret); \
   8.465 +    tgt = FW_HYPERCALL_##call##_PADDR + ((d==dom0)?dom0_start:0); \
   8.466 +    } while (0)
   8.467 +	FW_HYPERCALL_PATCH(sal_ed->pal_proc,PAL_CALL,0);
   8.468 +	FW_HYPERCALL_PATCH(sal_ed->sal_proc,SAL_CALL,1);
   8.469 +	sal_ed->gp = 0;  // will be ignored
   8.470 +
   8.471 +	for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp)
   8.472 +		checksum += *cp;
   8.473 +
   8.474 +	sal_systab->checksum = -checksum;
   8.475 +
   8.476 +	/* simulate 1MB free memory at physical address zero */
   8.477 +	i = 0;
   8.478 +	MAKE_MD(EFI_BOOT_SERVICES_DATA,EFI_MEMORY_WB,0*MB,1*MB);
   8.479 +	/* hypercall patches live here, masquerade as reserved PAL memory */
   8.480 +	MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB,HYPERCALL_START,HYPERCALL_END);
   8.481 +	MAKE_MD(EFI_CONVENTIONAL_MEMORY,EFI_MEMORY_WB,HYPERCALL_END,maxmem);
   8.482 +#ifdef PASS_THRU_IOPORT_SPACE
   8.483 +	if (d == dom0 && !running_on_sim) {
   8.484 +		/* pass through the I/O port space */
   8.485 +		efi_memory_desc_t efi_get_io_md(void);
   8.486 +		efi_memory_desc_t ia64_efi_io_md = efi_get_io_md();
   8.487 +		u32 type;
   8.488 +		u64 iostart, ioend, ioattr;
   8.489 +		
   8.490 +		type = ia64_efi_io_md.type;
   8.491 +		iostart = ia64_efi_io_md.phys_addr;
   8.492 +		ioend = ia64_efi_io_md.phys_addr +
   8.493 +			(ia64_efi_io_md.num_pages << 12);
   8.494 +		ioattr = ia64_efi_io_md.attribute;
   8.495 +		MAKE_MD(type,ioattr,iostart,ioend);
   8.496 +	}
   8.497 +	else
   8.498 +		MAKE_MD(EFI_RESERVED_TYPE,0,0,0);
   8.499 +#endif
   8.500 +
   8.501 +	bp->efi_systab = dom_pa(fw_mem);
   8.502 +	bp->efi_memmap = dom_pa(efi_memmap);
   8.503 +	bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t);
   8.504 +	bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
   8.505 +	bp->efi_memdesc_version = 1;
   8.506 +	bp->command_line = dom_pa(cmd_line);
   8.507 +	bp->console_info.num_cols = 80;
   8.508 +	bp->console_info.num_rows = 25;
   8.509 +	bp->console_info.orig_x = 0;
   8.510 +	bp->console_info.orig_y = 24;
   8.511 +	bp->fpswa = 0;
   8.512 +
   8.513 +	return bp;
   8.514 +}
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/ia64/domain.c	Mon Feb 14 12:25:30 2005 +0000
     9.3 @@ -0,0 +1,689 @@
     9.4 +/*
     9.5 + *  Copyright (C) 1995  Linus Torvalds
     9.6 + *
     9.7 + *  Pentium III FXSR, SSE support
     9.8 + *	Gareth Hughes <gareth@valinux.com>, May 2000
     9.9 + */
    9.10 +
    9.11 +#include <xen/config.h>
    9.12 +#include <xen/lib.h>
    9.13 +#include <xen/errno.h>
    9.14 +#include <xen/sched.h>
    9.15 +#include <xen/smp.h>
    9.16 +#include <xen/delay.h>
    9.17 +#include <xen/softirq.h>
    9.18 +#include <xen/mm.h>
    9.19 +#include <asm/ptrace.h>
    9.20 +#include <asm/system.h>
    9.21 +#include <asm/io.h>
    9.22 +#include <asm/processor.h>
    9.23 +#include <asm/desc.h>
    9.24 +//#include <asm/mpspec.h>
    9.25 +#include <xen/irq.h>
    9.26 +#include <xen/event.h>
    9.27 +//#include <xen/shadow.h>
    9.28 +#include <xen/console.h>
    9.29 +
    9.30 +#include <xen/elf.h>
    9.31 +//#include <asm/page.h>
    9.32 +#include <asm/pgalloc.h>
    9.33 +#include <asm/dma.h>	/* for MAX_DMA_ADDRESS */
    9.34 +
    9.35 +#include <asm/asm-offsets.h>  /* for IA64_THREAD_INFO_SIZE */
    9.36 +
    9.37 +#include <asm/vcpu.h>   /* for function declarations */
    9.38 +
    9.39 +#define CONFIG_DOMAIN0_CONTIGUOUS
    9.40 +unsigned long dom0_start = -1L;
    9.41 +unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
    9.42 +//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    9.43 +unsigned long dom0_align = 64*1024*1024;
    9.44 +
    9.45 +extern kmem_cache_t *domain_struct_cachep;
    9.46 +
    9.47 +// initialized by arch/ia64/setup.c:find_initrd()
    9.48 +unsigned long initrd_start = 0, initrd_end = 0;
    9.49 +
    9.50 +extern int loadelfimage(char *);
    9.51 +extern int readelfimage_base_and_size(char *, unsigned long,
    9.52 +                  unsigned long *, unsigned long *, unsigned long *);
    9.53 +
    9.54 +unsigned long map_domain_page0(struct domain *);
    9.55 +extern unsigned long dom_fw_setup(struct domain *, char *, int);
    9.56 +
    9.57 +/* this belongs in include/asm, but there doesn't seem to be a suitable place */
    9.58 +void free_perdomain_pt(struct domain *d)
    9.59 +{
    9.60 +    dummy();
    9.61 +    //free_page((unsigned long)d->mm.perdomain_pt);
    9.62 +}
    9.63 +
    9.64 +int hlt_counter;
    9.65 +
    9.66 +void disable_hlt(void)
    9.67 +{
    9.68 +    hlt_counter++;
    9.69 +}
    9.70 +
    9.71 +void enable_hlt(void)
    9.72 +{
    9.73 +    hlt_counter--;
    9.74 +}
    9.75 +
    9.76 +static void default_idle(void)
    9.77 +{
    9.78 +    if ( hlt_counter == 0 )
    9.79 +    {
    9.80 +	local_irq_disable();
    9.81 +        if ( !softirq_pending(smp_processor_id()) )
    9.82 +            safe_halt();
    9.83 +        //else
    9.84 +		local_irq_enable();
    9.85 +    }
    9.86 +}
    9.87 +
    9.88 +void continue_cpu_idle_loop(void)
    9.89 +{
    9.90 +    int cpu = smp_processor_id();
    9.91 +    for ( ; ; )
    9.92 +    {
    9.93 +#ifdef IA64
    9.94 +//        __IRQ_STAT(cpu, idle_timestamp) = jiffies
    9.95 +#else
    9.96 +        irq_stat[cpu].idle_timestamp = jiffies;
    9.97 +#endif
    9.98 +        while ( !softirq_pending(cpu) )
    9.99 +            default_idle();
   9.100 +        do_softirq();
   9.101 +    }
   9.102 +}
   9.103 +
   9.104 +void startup_cpu_idle_loop(void)
   9.105 +{
   9.106 +    /* Just some sanity to ensure that the scheduler is set up okay. */
   9.107 +    ASSERT(current->domain == IDLE_DOMAIN_ID);
   9.108 +    domain_unpause_by_systemcontroller(current);
   9.109 +    __enter_scheduler();
   9.110 +
   9.111 +    /*
   9.112 +     * Declares CPU setup done to the boot processor.
   9.113 +     * Therefore memory barrier to ensure state is visible.
   9.114 +     */
   9.115 +    smp_mb();
   9.116 +    init_idle();
   9.117 +#if 0
   9.118 +//do we have to ensure the idle task has a shared page so that, for example,
   9.119 +//region registers can be loaded from it.  Apparently not...
   9.120 +	idle0_task.shared_info = (void *)alloc_xenheap_page();
   9.121 +	memset(idle0_task.shared_info, 0, PAGE_SIZE);
   9.122 +	/* pin mapping */
   9.123 +	// FIXME: Does this belong here?  Or do only at domain switch time?
   9.124 +	{
   9.125 +		/* WARNING: following must be inlined to avoid nested fault */
   9.126 +		unsigned long psr = ia64_clear_ic();
   9.127 +		ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
   9.128 +		 pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
   9.129 +		 PAGE_SHIFT);
   9.130 +		ia64_set_psr(psr);
   9.131 +		ia64_srlz_i();
   9.132 +	}
   9.133 +#endif
   9.134 +
   9.135 +    continue_cpu_idle_loop();
   9.136 +}
   9.137 +
   9.138 +struct domain *arch_alloc_domain_struct(void)
   9.139 +{
   9.140 +	return xmem_cache_alloc(domain_struct_cachep);
   9.141 +}
   9.142 +
   9.143 +void arch_free_domain_struct(struct domain *d)
   9.144 +{
   9.145 +	xmem_cache_free(domain_struct_cachep,d);
   9.146 +}
   9.147 +
   9.148 +struct exec_domain *arch_alloc_exec_domain_struct(void)
   9.149 +{
   9.150 +	return alloc_task_struct();
   9.151 +}
   9.152 +
   9.153 +void arch_free_exec_domain_struct(struct exec_domain *ed)
   9.154 +{
   9.155 +	free_task_struct(ed);
   9.156 +}
   9.157 +
   9.158 +void arch_do_createdomain(struct exec_domain *ed)
   9.159 +{
   9.160 +	struct domain *d = ed->domain;
   9.161 +
   9.162 +	d->shared_info = (void *)alloc_xenheap_page();
   9.163 +	ed->vcpu_info = (void *)alloc_xenheap_page();
   9.164 +	if (!ed->vcpu_info) {
   9.165 +   		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
   9.166 +   		while (1);
   9.167 +	}
   9.168 +	memset(ed->vcpu_info, 0, PAGE_SIZE);
   9.169 +	/* pin mapping */
   9.170 +	// FIXME: Does this belong here?  Or do only at domain switch time?
   9.171 +#if 0
   9.172 +	// this is now done in ia64_new_rr7
   9.173 +	{
   9.174 +		/* WARNING: following must be inlined to avoid nested fault */
   9.175 +		unsigned long psr = ia64_clear_ic();
   9.176 +		ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
   9.177 +		 pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
   9.178 +		 PAGE_SHIFT);
   9.179 +		ia64_set_psr(psr);
   9.180 +		ia64_srlz_i();
   9.181 +	}
   9.182 +#endif
   9.183 +	d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
   9.184 +	if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
   9.185 +		BUG();
   9.186 +	ed->vcpu_info->arch.metaphysical_mode = 1;
   9.187 +#define DOMAIN_RID_BITS_DEFAULT 18
   9.188 +	if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
   9.189 +		BUG();
   9.190 +	// the following will eventually need to be negotiated dynamically
   9.191 +	d->xen_vastart = 0xfffc000000000000;
   9.192 +	d->xen_vaend = 0xfffe000000000000;
   9.193 +	d->shared_info_va = 0xfffd000000000000;
   9.194 +	d->breakimm = 0x1000;
   9.195 +	// stay on kernel stack because may get interrupts!
   9.196 +	// ia64_ret_from_clone (which b0 gets in new_thread) switches
   9.197 +	// to user stack
   9.198 +	ed->thread.on_ustack = 0;
   9.199 +}
   9.200 +
   9.201 +void arch_do_boot_vcpu(struct exec_domain *p)
   9.202 +{
   9.203 +	return;
   9.204 +}
   9.205 +
   9.206 +int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c)
   9.207 +{
   9.208 +	dummy();
   9.209 +	return 1;
   9.210 +}
   9.211 +
   9.212 +void domain_relinquish_memory(struct domain *d)
   9.213 +{
   9.214 +	dummy();
   9.215 +}
   9.216 +
   9.217 +// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
   9.218 +// and linux/arch/ia64/kernel/process.c:kernel_thread()
   9.219 +void new_thread(struct exec_domain *ed,
   9.220 +                unsigned long start_pc,
   9.221 +                unsigned long start_stack,
   9.222 +                unsigned long start_info)
   9.223 +{
   9.224 +	struct domain *d = ed->domain;
   9.225 +	struct switch_stack *sw;
   9.226 +	struct pt_regs *regs;
   9.227 +	unsigned long new_rbs;
   9.228 +	struct ia64_boot_param *bp;
   9.229 +	extern char ia64_ret_from_clone;
   9.230 +	extern char saved_command_line[];
   9.231 +
   9.232 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   9.233 +	if (d == dom0) start_pc += dom0_start;
   9.234 +#endif
   9.235 +	regs = (struct pt_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
   9.236 +	sw = (struct switch_stack *) regs - 1;
   9.237 +	new_rbs = (unsigned long) ed + IA64_RBS_OFFSET;
   9.238 +	regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   9.239 +		| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
   9.240 +		& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
   9.241 +	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   9.242 +	regs->cr_iip = start_pc;
   9.243 +	regs->ar_rsc = 0xf;		/* eager mode, privilege level 1 */
   9.244 +	regs->ar_rnat = 0;
   9.245 +	regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
   9.246 +	regs->loadrs = 0;
   9.247 +	//regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */
   9.248 +	//regs->r8 = 0x01234567890abcdef; // FIXME: temp marker
   9.249 +	//regs->r12 = ((unsigned long) regs - 16);	/* 16 byte scratch */
   9.250 +	regs->cr_ifs = 1UL << 63;
   9.251 +	regs->pr = 0;
   9.252 +	sw->pr = 0;
   9.253 +	regs->ar_pfs = 0;
   9.254 +	sw->ar_pfs = 0;
   9.255 +	sw->ar_bspstore = new_rbs;
   9.256 +	//regs->r13 = (unsigned long) ed;
   9.257 +printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   9.258 +ed,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
   9.259 +	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   9.260 +	ed->thread.ksp = (unsigned long) sw - 16;
   9.261 +	//ed->thread_info->flags = 0;
   9.262 +printk("new_thread, about to call init_all_rr\n");
   9.263 +	init_all_rr(ed);
   9.264 +	// set up boot parameters (and fake firmware)
   9.265 +printk("new_thread, about to call dom_fw_setup\n");
   9.266 +	regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   9.267 +printk("new_thread, done with dom_fw_setup\n");
   9.268 +	// don't forget to set this!
   9.269 +	ed->vcpu_info->arch.banknum = 1;
   9.270 +}
   9.271 +
   9.272 +static struct page * map_new_domain0_page(unsigned long mpaddr)
   9.273 +{
   9.274 +	if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   9.275 +		printk("map_new_domain0_page: bad domain0 mpaddr %p!\n",mpaddr);
   9.276 +printk("map_new_domain0_page: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
   9.277 +		while(1);
   9.278 +	}
   9.279 +	return pfn_to_page((mpaddr >> PAGE_SHIFT));
   9.280 +}
   9.281 +
   9.282 +/* allocate new page for domain and map it to the specified metaphysical addr */
   9.283 +struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
   9.284 +{
   9.285 +	struct mm_struct *mm = d->arch.mm;
   9.286 +	struct page *p = (struct page *)0;
   9.287 +	pgd_t *pgd;
   9.288 +	pmd_t *pmd;
   9.289 +	pte_t *pte;
   9.290 +extern unsigned long vhpt_paddr, vhpt_pend;
   9.291 +
   9.292 +	if (!mm->pgd) {
   9.293 +		printk("map_new_domain_page: domain pgd must exist!\n");
   9.294 +		return(p);
   9.295 +	}
   9.296 +	pgd = pgd_offset(mm,mpaddr);
   9.297 +	if (pgd_none(*pgd))
   9.298 +		pgd_populate(mm, pgd, pmd_alloc_one(mm,mpaddr));
   9.299 +
   9.300 +	pmd = pmd_offset(pgd, mpaddr);
   9.301 +	if (pmd_none(*pmd))
   9.302 +		pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
   9.303 +
   9.304 +	pte = pte_offset_map(pmd, mpaddr);
   9.305 +	if (pte_none(*pte)) {
   9.306 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   9.307 +		if (d == dom0) p = map_new_domain0_page(mpaddr);
   9.308 +		else
   9.309 +#endif
   9.310 +			p = alloc_page(GFP_KERNEL);
   9.311 +		if (unlikely(!p)) {
   9.312 +printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
   9.313 +			return(p);
   9.314 +		}
   9.315 +if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
   9.316 +  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
   9.317 +}
   9.318 +		set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
   9.319 +			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   9.320 +	}
   9.321 +	else printk("map_new_domain_page: page %p already mapped!\n",p);
   9.322 +	return p;
   9.323 +}
   9.324 +
   9.325 +void mpafoo(unsigned long mpaddr)
   9.326 +{
   9.327 +	extern unsigned long privop_trace;
   9.328 +	if (mpaddr == 0x3800)
   9.329 +		privop_trace = 1;
   9.330 +}
   9.331 +
   9.332 +unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
   9.333 +{
   9.334 +	struct mm_struct *mm = d->arch.mm;
   9.335 +	pgd_t *pgd = pgd_offset(mm, mpaddr);
   9.336 +	pmd_t *pmd;
   9.337 +	pte_t *pte;
   9.338 +
   9.339 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   9.340 +	if (d == dom0) {
   9.341 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   9.342 +			//printk("lookup_domain_mpa: bad dom0 mpaddr %p!\n",mpaddr);
   9.343 +//printk("lookup_domain_mpa: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
   9.344 +			mpafoo(mpaddr);
   9.345 +		}
   9.346 +		pte_t pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
   9.347 +			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
   9.348 +		pte = &pteval;
   9.349 +		return *(unsigned long *)pte;
   9.350 +	}
   9.351 +#endif
   9.352 +tryagain:
   9.353 +	if (pgd_present(*pgd)) {
   9.354 +		pmd = pmd_offset(pgd,mpaddr);
   9.355 +		if (pmd_present(*pmd)) {
   9.356 +			pte = pte_offset_map(pmd,mpaddr);
   9.357 +			if (pte_present(*pte)) {
   9.358 +//printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
   9.359 +				return *(unsigned long *)pte;
   9.360 +			}
   9.361 +		}	
   9.362 +	}
   9.363 +	/* if lookup fails and mpaddr is "legal", "create" the page */
   9.364 +	if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
   9.365 +		// FIXME: should zero out pages for security reasons
   9.366 +		if (map_new_domain_page(d,mpaddr)) goto tryagain;
   9.367 +	}
   9.368 +	printk("lookup_domain_mpa: bad mpa %p (> %p\n",
   9.369 +		mpaddr,d->max_pages<<PAGE_SHIFT);
   9.370 +	mpafoo(mpaddr);
   9.371 +	return 0;
   9.372 +}
   9.373 +
   9.374 +// FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
   9.375 +unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
   9.376 +{
   9.377 +	unsigned long pte = lookup_domain_mpa(d,mpaddr);
   9.378 +	unsigned long imva;
   9.379 +
   9.380 +	pte &= _PAGE_PPN_MASK;
   9.381 +	imva = __va(pte);
   9.382 +	imva |= mpaddr & ~PAGE_MASK;
   9.383 +	return(imva);
   9.384 +}
   9.385 +
   9.386 +// remove following line if not privifying in memory
   9.387 +//#define HAVE_PRIVIFY_MEMORY
   9.388 +#ifndef HAVE_PRIVIFY_MEMORY
   9.389 +#define	privify_memory(x,y) do {} while(0)
   9.390 +#endif
   9.391 +
   9.392 +// see arch/x86/xxx/domain_build.c
   9.393 +int elf_sanity_check(Elf_Ehdr *ehdr)
   9.394 +{
   9.395 +	return (IS_ELF(*ehdr));
   9.396 +}
   9.397 +
   9.398 +void loaddomainelfimage(struct domain *d, unsigned long image_start)
   9.399 +{
   9.400 +    char *elfbase = image_start;
   9.401 +    Elf_Ehdr *ehdr = (Elf_Ehdr *)image_start;
   9.402 +    Elf_Phdr *phdr;
   9.403 +    int h, filesz, memsz, paddr;
   9.404 +    unsigned long elfaddr, dom_mpaddr, dom_imva;
   9.405 +    struct page *p;
   9.406 +  
   9.407 +    for ( h = 0; h < ehdr->e_phnum; h++ ) {
   9.408 +        phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
   9.409 +        //if ( !is_loadable_phdr(phdr) )
   9.410 +        if ((phdr->p_type != PT_LOAD)) {
   9.411 +            continue;
   9.412 +	}
   9.413 +	filesz = phdr->p_filesz; memsz = phdr->p_memsz;
   9.414 +	elfaddr = elfbase + phdr->p_offset;
   9.415 +	dom_mpaddr = phdr->p_paddr;
   9.416 +//printf("p_offset: %x, size=%x\n",elfaddr,filesz);
   9.417 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   9.418 +	if (d == dom0) {
   9.419 +		if (dom_mpaddr+memsz>dom0_size || dom_mpaddr+filesz>dom0_size) {
   9.420 +			printf("Domain0 doesn't fit in allocated space!\n");
   9.421 +			while(1);
   9.422 +		}
   9.423 +		dom_imva = __va(dom_mpaddr + dom0_start);
   9.424 +		memcpy(dom_imva,elfaddr,filesz);
   9.425 +		if (memsz > filesz) memset(dom_imva+filesz,0,memsz-filesz);
   9.426 +//FIXME: This test for code seems to find a lot more than objdump -x does
   9.427 +		if (phdr->p_flags & PF_X) privify_memory(dom_imva,filesz);
   9.428 +	}
   9.429 +	else
   9.430 +#endif
   9.431 +	while (memsz > 0) {
   9.432 +		p = map_new_domain_page(d,dom_mpaddr);
   9.433 +		if (unlikely(!p)) BUG();
   9.434 +		dom_imva = __va(page_to_phys(p));
   9.435 +		if (filesz > 0) {
   9.436 +			if (filesz >= PAGE_SIZE)
   9.437 +				memcpy(dom_imva,elfaddr,PAGE_SIZE);
   9.438 +			else { // copy partial page, zero the rest of page
   9.439 +				memcpy(dom_imva,elfaddr,filesz);
   9.440 +				memset(dom_imva+filesz,0,PAGE_SIZE-filesz);
   9.441 +			}
   9.442 +//FIXME: This test for code seems to find a lot more than objdump -x does
   9.443 +			if (phdr->p_flags & PF_X)
   9.444 +				privify_memory(dom_imva,PAGE_SIZE);
   9.445 +		}
   9.446 +		else if (memsz > 0) // always zero out entire page
   9.447 +			memset(dom_imva,0,PAGE_SIZE);
   9.448 +		memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
   9.449 +		elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
   9.450 +	}
   9.451 +    }
   9.452 +}
   9.453 +
   9.454 +
   9.455 +void alloc_dom0(void)
   9.456 +{
   9.457 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   9.458 +    if (platform_is_hp_ski()) {
   9.459 +	dom0_size = 128*1024*1024; //FIXME: Should be configurable
   9.460 +    }
   9.461 +    printf("alloc_dom0: starting (initializing %d MB...)\n",dom0_size/(1024*1024));
   9.462 +    dom0_start = __alloc_bootmem(dom0_size,dom0_align,__pa(MAX_DMA_ADDRESS));
   9.463 +    if (!dom0_start) {
   9.464 +	printf("construct_dom0: can't allocate contiguous memory size=%p\n",
   9.465 +		dom0_size);
   9.466 +	while(1);
   9.467 +    }
   9.468 +    printf("alloc_dom0: dom0_start=%p\n",dom0_start);
   9.469 +#else
   9.470 +    dom0_start = 0;
   9.471 +#endif
   9.472 +
   9.473 +}
   9.474 +
   9.475 +int construct_dom0(struct domain *d, 
   9.476 +                   unsigned long alloc_start,
   9.477 +                   unsigned long alloc_end,
   9.478 +                   unsigned long image_start, unsigned long image_len, 
   9.479 +                   unsigned long initrd_start, unsigned long initrd_len,
   9.480 +                   char *cmdline)
   9.481 +{
   9.482 +    char *dst;
   9.483 +    int i, rc;
   9.484 +    unsigned long pfn, mfn;
   9.485 +    unsigned long nr_pt_pages;
   9.486 +    unsigned long count;
   9.487 +    //l2_pgentry_t *l2tab, *l2start;
   9.488 +    //l1_pgentry_t *l1tab = NULL, *l1start = NULL;
   9.489 +    struct pfn_info *page = NULL;
   9.490 +    start_info_t *si;
   9.491 +    struct exec_domain *ed = d->exec_domain[0];
   9.492 +
   9.493 +    struct domain_setup_info dsi;
   9.494 +    unsigned long p_start;
   9.495 +    unsigned long pkern_start;
   9.496 +    unsigned long pkern_entry;
   9.497 +    unsigned long pkern_end;
   9.498 +
   9.499 +    extern void physdev_init_dom0(struct domain *);
   9.500 +
   9.501 +//printf("construct_dom0: starting\n");
   9.502 +    /* Sanity! */
   9.503 +#ifndef CLONE_DOMAIN0
   9.504 +    if ( d != dom0 ) 
   9.505 +        BUG();
   9.506 +    if ( test_bit(DF_CONSTRUCTED, &d->d_flags) ) 
   9.507 +        BUG();
   9.508 +#endif
   9.509 +
   9.510 +    memset(&dsi, 0, sizeof(struct domain_setup_info));
   9.511 +
   9.512 +    printk("*** LOADING DOMAIN 0 ***\n");
   9.513 +
   9.514 +	d->max_pages = dom0_size/PAGE_SIZE;
   9.515 +	image_start = __va(ia64_boot_param->initrd_start);
   9.516 +	image_len = ia64_boot_param->initrd_size;
   9.517 +//printk("image_start=%lx, image_len=%lx\n",image_start,image_len);
   9.518 +//printk("First word of image: %lx\n",*(unsigned long *)image_start);
   9.519 +
   9.520 +//printf("construct_dom0: about to call parseelfimage\n");
   9.521 +    rc = parseelfimage(image_start, image_len, &dsi);
   9.522 +    if ( rc != 0 )
   9.523 +        return rc;
   9.524 +
   9.525 +    p_start = dsi.v_start;
   9.526 +    pkern_start = dsi.v_kernstart;
   9.527 +    pkern_end = dsi.v_kernend;
   9.528 +    pkern_entry = dsi.v_kernentry;
   9.529 +
   9.530 +//printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",p_start,pkern_start,pkern_end,pkern_entry);
   9.531 +
   9.532 +    if ( (p_start & (PAGE_SIZE-1)) != 0 )
   9.533 +    {
   9.534 +        printk("Initial guest OS must load to a page boundary.\n");
   9.535 +        return -EINVAL;
   9.536 +    }
   9.537 +
   9.538 +    printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
   9.539 +           " Kernel image:  %lx->%lx\n"
   9.540 +           " Entry address: %lx\n"
   9.541 +           " Init. ramdisk:   (NOT IMPLEMENTED YET)\n",
   9.542 +           pkern_start, pkern_end, pkern_entry);
   9.543 +
   9.544 +    if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
   9.545 +    {
   9.546 +        printk("Initial guest OS requires too much space\n"
   9.547 +               "(%luMB is greater than %luMB limit)\n",
   9.548 +               (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
   9.549 +        return -ENOMEM;
   9.550 +    }
   9.551 +
   9.552 +    // if high 3 bits of pkern start are non-zero, error
   9.553 +
   9.554 +    // if pkern end is after end of metaphysical memory, error
   9.555 +    //  (we should be able to deal with this... later)
   9.556 +
   9.557 +
   9.558 +    //
   9.559 +
   9.560 +#if 0
   9.561 +    strcpy(d->name,"Domain0");
   9.562 +#endif
   9.563 +    /* Set up shared-info area. */
   9.564 +    update_dom_time(d);
   9.565 +    d->shared_info->domain_time = 0;
   9.566 +
   9.567 +	// prepare domain0 pagetable (maps METAphysical to physical)
   9.568 +	// following is roughly mm_init() in linux/kernel/fork.c
   9.569 +	d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
   9.570 +	if (unlikely(!d->arch.mm)) {
   9.571 +        	printk("Can't allocate mm_struct for domain0\n");
   9.572 +        	return -ENOMEM;
   9.573 +	}
   9.574 +	memset(d->arch.mm, 0, sizeof(*d->arch.mm));
   9.575 +	d->arch.mm->pgd = pgd_alloc(d->arch.mm);
   9.576 +	if (unlikely(!d->arch.mm->pgd)) {
   9.577 +        	printk("Can't allocate pgd for domain0\n");
   9.578 +        	return -ENOMEM;
   9.579 +	}
   9.580 +
   9.581 +
   9.582 +    /* Mask all upcalls... */
   9.583 +    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   9.584 +        d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   9.585 +
   9.586 +    /* Copy the OS image. */
   9.587 +    //(void)loadelfimage(image_start);
   9.588 +	loaddomainelfimage(d,image_start);
   9.589 +
   9.590 +    /* Copy the initial ramdisk. */
   9.591 +    //if ( initrd_len != 0 )
   9.592 +    //    memcpy((void *)vinitrd_start, initrd_start, initrd_len);
   9.593 +
   9.594 +#if 0
   9.595 +    /* Set up start info area. */
   9.596 +    //si = (start_info_t *)vstartinfo_start;
   9.597 +    memset(si, 0, PAGE_SIZE);
   9.598 +    si->nr_pages     = d->tot_pages;
   9.599 +    si->shared_info  = virt_to_phys(d->shared_info);
   9.600 +    si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
   9.601 +    //si->pt_base      = vpt_start;
   9.602 +    //si->nr_pt_frames = nr_pt_pages;
   9.603 +    //si->mfn_list     = vphysmap_start;
   9.604 +
   9.605 +    if ( initrd_len != 0 )
   9.606 +    {
   9.607 +        //si->mod_start = vinitrd_start;
   9.608 +        si->mod_len   = initrd_len;
   9.609 +        printk("Initrd len 0x%lx, start at 0x%08lx\n",
   9.610 +               si->mod_len, si->mod_start);
   9.611 +    }
   9.612 +
   9.613 +    dst = si->cmd_line;
   9.614 +    if ( cmdline != NULL )
   9.615 +    {
   9.616 +        for ( i = 0; i < 255; i++ )
   9.617 +        {
   9.618 +            if ( cmdline[i] == '\0' )
   9.619 +                break;
   9.620 +            *dst++ = cmdline[i];
   9.621 +        }
   9.622 +    }
   9.623 +    *dst = '\0';
   9.624 +
   9.625 +    zap_low_mappings(); /* Do the same for the idle page tables. */
   9.626 +#endif
   9.627 +    
   9.628 +    /* Give up the VGA console if DOM0 is configured to grab it. */
   9.629 +#ifdef IA64
   9.630 +	if (cmdline != NULL)
   9.631 +#endif
   9.632 +    console_endboot(strstr(cmdline, "tty0") != NULL);
   9.633 +
   9.634 +    /* DOM0 gets access to everything. */
   9.635 +#ifdef CLONE_DOMAIN0
   9.636 +if (d == dom0)
   9.637 +#endif
   9.638 +    physdev_init_dom0(d);
   9.639 +
   9.640 +    set_bit(DF_CONSTRUCTED, &d->d_flags);
   9.641 +
   9.642 +    new_thread(ed, pkern_entry, 0, 0);
   9.643 +    // FIXME: Hack for keyboard input
   9.644 +#ifdef CLONE_DOMAIN0
   9.645 +if (d == dom0)
   9.646 +#endif
   9.647 +    serial_input_init();
   9.648 +    if (d == dom0) {
   9.649 +    	ed->vcpu_info->arch.delivery_mask[0] = -1L;
   9.650 +    	ed->vcpu_info->arch.delivery_mask[1] = -1L;
   9.651 +    	ed->vcpu_info->arch.delivery_mask[2] = -1L;
   9.652 +    	ed->vcpu_info->arch.delivery_mask[3] = -1L;
   9.653 +    }
   9.654 +    else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
   9.655 +
   9.656 +    return 0;
   9.657 +}
   9.658 +
   9.659 +void machine_restart(char * __unused)
   9.660 +{
   9.661 +	if (platform_is_hp_ski()) dummy();
   9.662 +	printf("machine_restart called: spinning....\n");
   9.663 +	while(1);
   9.664 +}
   9.665 +
   9.666 +void machine_halt(void)
   9.667 +{
   9.668 +	if (platform_is_hp_ski()) dummy();
   9.669 +	printf("machine_halt called: spinning....\n");
   9.670 +	while(1);
   9.671 +}
   9.672 +
   9.673 +void dummy(void)
   9.674 +{
   9.675 +	if (platform_is_hp_ski()) asm("break 0;;");
   9.676 +	printf("dummy called: spinning....\n");
   9.677 +	while(1);
   9.678 +}
   9.679 +
   9.680 +
   9.681 +void switch_to(struct exec_domain *prev, struct exec_domain *next)
   9.682 +{
   9.683 + 	struct exec_domain *last;
   9.684 +
   9.685 +	__switch_to(prev,next,last);
   9.686 +	//set_current(next);
   9.687 +}
   9.688 +
   9.689 +void domain_pend_keyboard_interrupt(int irq)
   9.690 +{
   9.691 +	vcpu_pend_interrupt(dom0->exec_domain[0],irq);
   9.692 +}
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/ia64/idle0_task.c	Mon Feb 14 12:25:30 2005 +0000
    10.3 @@ -0,0 +1,58 @@
    10.4 +#include <xen/config.h>
    10.5 +#include <xen/sched.h>
    10.6 +#include <asm/desc.h>
    10.7 +
    10.8 +#define INIT_MM(name) \
    10.9 +{			 					\
   10.10 +	.pgd		= swapper_pg_dir, 			\
   10.11 +	.mm_users	= ATOMIC_INIT(2), 			\
   10.12 +	.mm_count	= ATOMIC_INIT(1), 			\
   10.13 +	.page_table_lock =  SPIN_LOCK_UNLOCKED, 		\
   10.14 +	.mmlist		= LIST_HEAD_INIT(name.mmlist),		\
   10.15 +}
   10.16 +
   10.17 +#define IDLE0_EXEC_DOMAIN(_ed,_d)    \
   10.18 +{                                    \
   10.19 +    processor:   0,                  \
   10.20 +    mm:          0,                  \
   10.21 +    thread:      INIT_THREAD,        \
   10.22 +    domain:      (_d)                \
   10.23 +}
   10.24 +
   10.25 +#define IDLE0_DOMAIN(_t)             \
   10.26 +{                                    \
   10.27 +    id:          IDLE_DOMAIN_ID,     \
   10.28 +    d_flags:     1<<DF_IDLETASK,     \
   10.29 +    refcnt:      ATOMIC_INIT(1)      \
   10.30 +}
   10.31 +
   10.32 +struct mm_struct init_mm = INIT_MM(init_mm);
   10.33 +EXPORT_SYMBOL(init_mm);
   10.34 +
   10.35 +struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain);
   10.36 +#if 0
   10.37 +struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain,
   10.38 +                                                         &idle0_domain);
   10.39 +#endif
   10.40 +
   10.41 +
   10.42 +/*
   10.43 + * Initial task structure.
   10.44 + *
   10.45 + * We need to make sure that this is properly aligned due to the way process stacks are
   10.46 + * handled. This is done by having a special ".data.init_task" section...
   10.47 + */
   10.48 +union {
   10.49 +	struct {
   10.50 +		struct domain task;
   10.51 +	} s;
   10.52 +	unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
   10.53 +} init_task_mem asm ("init_task") __attribute__((section(".data.init_task")));
   10.54 +// = {{
   10.55 +	;
   10.56 +//.task =		IDLE0_EXEC_DOMAIN(init_task_mem.s.task,&idle0_domain),
   10.57 +//};
   10.58 +//};
   10.59 +
   10.60 +EXPORT_SYMBOL(init_task);
   10.61 +
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/arch/ia64/irq.c	Mon Feb 14 12:25:30 2005 +0000
    11.3 @@ -0,0 +1,1517 @@
    11.4 +/*
    11.5 + *	linux/arch/ia64/kernel/irq.c
    11.6 + *
    11.7 + *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
    11.8 + *
    11.9 + * This file contains the code used by various IRQ handling routines:
   11.10 + * asking for different IRQ's should be done through these routines
   11.11 + * instead of just grabbing them. Thus setups with different IRQ numbers
   11.12 + * shouldn't result in any weird surprises, and installing new handlers
   11.13 + * should be easier.
   11.14 + *
   11.15 + * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
   11.16 + *
   11.17 + * 4/14/2004: Added code to handle cpu migration and do safe irq
   11.18 + *			migration without lossing interrupts for iosapic
   11.19 + *			architecture.
   11.20 + */
   11.21 +
   11.22 +/*
   11.23 + * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
   11.24 + *
   11.25 + * IRQs are in fact implemented a bit like signal handlers for the kernel.
   11.26 + * Naturally it's not a 1:1 relation, but there are similarities.
   11.27 + */
   11.28 +
   11.29 +#include <linux/config.h>
   11.30 +#include <linux/errno.h>
   11.31 +#include <linux/module.h>
   11.32 +#ifndef XEN
   11.33 +#include <linux/signal.h>
   11.34 +#endif
   11.35 +#include <linux/sched.h>
   11.36 +#include <linux/ioport.h>
   11.37 +#include <linux/interrupt.h>
   11.38 +#include <linux/timex.h>
   11.39 +#include <linux/slab.h>
   11.40 +#ifndef XEN
   11.41 +#include <linux/random.h>
   11.42 +#include <linux/cpu.h>
   11.43 +#endif
   11.44 +#include <linux/ctype.h>
   11.45 +#ifndef XEN
   11.46 +#include <linux/smp_lock.h>
   11.47 +#endif
   11.48 +#include <linux/init.h>
   11.49 +#ifndef XEN
   11.50 +#include <linux/kernel_stat.h>
   11.51 +#endif
   11.52 +#include <linux/irq.h>
   11.53 +#ifndef XEN
   11.54 +#include <linux/proc_fs.h>
   11.55 +#endif
   11.56 +#include <linux/seq_file.h>
   11.57 +#ifndef XEN
   11.58 +#include <linux/kallsyms.h>
   11.59 +#include <linux/notifier.h>
   11.60 +#endif
   11.61 +
   11.62 +#include <asm/atomic.h>
   11.63 +#ifndef XEN
   11.64 +#include <asm/cpu.h>
   11.65 +#endif
   11.66 +#include <asm/io.h>
   11.67 +#include <asm/smp.h>
   11.68 +#include <asm/system.h>
   11.69 +#include <asm/bitops.h>
   11.70 +#include <asm/uaccess.h>
   11.71 +#include <asm/pgalloc.h>
   11.72 +#ifndef XEN
   11.73 +#include <asm/tlbflush.h>
   11.74 +#endif
   11.75 +#include <asm/delay.h>
   11.76 +#include <asm/irq.h>
   11.77 +
   11.78 +#ifdef XEN
   11.79 +#include <xen/event.h>
   11.80 +#define _irq_desc irq_desc
   11.81 +#define irq_descp(irq) &irq_desc[irq]
   11.82 +#define apicid_to_phys_cpu_present(x)	1
   11.83 +#endif
   11.84 +
   11.85 +
   11.86 +/*
   11.87 + * Linux has a controller-independent x86 interrupt architecture.
   11.88 + * every controller has a 'controller-template', that is used
   11.89 + * by the main code to do the right thing. Each driver-visible
   11.90 + * interrupt source is transparently wired to the appropriate
   11.91 + * controller. Thus drivers need not be aware of the
   11.92 + * interrupt-controller.
   11.93 + *
   11.94 + * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
   11.95 + * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
   11.96 + * (IO-APICs assumed to be messaging to Pentium local-APICs)
   11.97 + *
   11.98 + * the code is designed to be easily extended with new/different
   11.99 + * interrupt controllers, without having to do assembly magic.
  11.100 + */
  11.101 +
  11.102 +/*
  11.103 + * Controller mappings for all interrupt sources:
  11.104 + */
  11.105 +irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
  11.106 +	[0 ... NR_IRQS-1] = {
  11.107 +		.status = IRQ_DISABLED,
  11.108 +		.handler = &no_irq_type,
  11.109 +		.lock = SPIN_LOCK_UNLOCKED
  11.110 +	}
  11.111 +};
  11.112 +
  11.113 +/*
  11.114 + * This is updated when the user sets irq affinity via /proc
  11.115 + */
  11.116 +cpumask_t    __cacheline_aligned pending_irq_cpumask[NR_IRQS];
  11.117 +
  11.118 +#ifdef CONFIG_IA64_GENERIC
  11.119 +irq_desc_t * __ia64_irq_desc (unsigned int irq)
  11.120 +{
  11.121 +	return _irq_desc + irq;
  11.122 +}
  11.123 +
  11.124 +ia64_vector __ia64_irq_to_vector (unsigned int irq)
  11.125 +{
  11.126 +	return (ia64_vector) irq;
  11.127 +}
  11.128 +
  11.129 +unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
  11.130 +{
  11.131 +	return (unsigned int) vec;
  11.132 +}
  11.133 +#endif
  11.134 +
  11.135 +static void register_irq_proc (unsigned int irq);
  11.136 +
  11.137 +/*
  11.138 + * Special irq handlers.
  11.139 + */
  11.140 +
  11.141 +#ifdef XEN
  11.142 +void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
  11.143 +#else
  11.144 +irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
  11.145 +{ return IRQ_NONE; }
  11.146 +#endif
  11.147 +
  11.148 +/*
  11.149 + * Generic no controller code
  11.150 + */
  11.151 +
  11.152 +static void enable_none(unsigned int irq) { }
  11.153 +static unsigned int startup_none(unsigned int irq) { return 0; }
  11.154 +static void disable_none(unsigned int irq) { }
  11.155 +static void ack_none(unsigned int irq)
  11.156 +{
  11.157 +/*
  11.158 + * 'what should we do if we get a hw irq event on an illegal vector'.
  11.159 + * each architecture has to answer this themselves, it doesn't deserve
  11.160 + * a generic callback i think.
  11.161 + */
  11.162 +#ifdef CONFIG_X86
  11.163 +	printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
  11.164 +#ifdef CONFIG_X86_LOCAL_APIC
  11.165 +	/*
  11.166 +	 * Currently unexpected vectors happen only on SMP and APIC.
  11.167 +	 * We _must_ ack these because every local APIC has only N
  11.168 +	 * irq slots per priority level, and a 'hanging, unacked' IRQ
  11.169 +	 * holds up an irq slot - in excessive cases (when multiple
  11.170 +	 * unexpected vectors occur) that might lock up the APIC
  11.171 +	 * completely.
  11.172 +	 */
  11.173 +	ack_APIC_irq();
  11.174 +#endif
  11.175 +#endif
  11.176 +#ifdef CONFIG_IA64
  11.177 +	printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
  11.178 +#endif
  11.179 +}
  11.180 +
  11.181 +/* startup is the same as "enable", shutdown is same as "disable" */
  11.182 +#define shutdown_none	disable_none
  11.183 +#define end_none	enable_none
  11.184 +
  11.185 +struct hw_interrupt_type no_irq_type = {
  11.186 +	"none",
  11.187 +	startup_none,
  11.188 +	shutdown_none,
  11.189 +	enable_none,
  11.190 +	disable_none,
  11.191 +	ack_none,
  11.192 +	end_none
  11.193 +};
  11.194 +
  11.195 +atomic_t irq_err_count;
  11.196 +#ifdef CONFIG_X86_IO_APIC
  11.197 +#ifdef APIC_MISMATCH_DEBUG
  11.198 +atomic_t irq_mis_count;
  11.199 +#endif
  11.200 +#endif
  11.201 +
  11.202 +/*
  11.203 + * Generic, controller-independent functions:
  11.204 + */
  11.205 +
  11.206 +#ifndef XEN
  11.207 +int show_interrupts(struct seq_file *p, void *v)
  11.208 +{
  11.209 +	int j, i = *(loff_t *) v;
  11.210 +	struct irqaction * action;
  11.211 +	irq_desc_t *idesc;
  11.212 +	unsigned long flags;
  11.213 +
  11.214 +	if (i == 0) {
  11.215 +		seq_puts(p, "           ");
  11.216 +		for (j=0; j<NR_CPUS; j++)
  11.217 +			if (cpu_online(j))
  11.218 +				seq_printf(p, "CPU%d       ",j);
  11.219 +		seq_putc(p, '\n');
  11.220 +	}
  11.221 +
  11.222 +	if (i < NR_IRQS) {
  11.223 +		idesc = irq_descp(i);
  11.224 +		spin_lock_irqsave(&idesc->lock, flags);
  11.225 +		action = idesc->action;
  11.226 +		if (!action)
  11.227 +			goto skip;
  11.228 +		seq_printf(p, "%3d: ",i);
  11.229 +#ifndef CONFIG_SMP
  11.230 +		seq_printf(p, "%10u ", kstat_irqs(i));
  11.231 +#else
  11.232 +		for (j = 0; j < NR_CPUS; j++)
  11.233 +			if (cpu_online(j))
  11.234 +				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  11.235 +#endif
  11.236 +		seq_printf(p, " %14s", idesc->handler->typename);
  11.237 +		seq_printf(p, "  %s", action->name);
  11.238 +
  11.239 +		for (action=action->next; action; action = action->next)
  11.240 +			seq_printf(p, ", %s", action->name);
  11.241 +
  11.242 +		seq_putc(p, '\n');
  11.243 +skip:
  11.244 +		spin_unlock_irqrestore(&idesc->lock, flags);
  11.245 +	} else if (i == NR_IRQS) {
  11.246 +		seq_puts(p, "NMI: ");
  11.247 +		for (j = 0; j < NR_CPUS; j++)
  11.248 +			if (cpu_online(j))
  11.249 +				seq_printf(p, "%10u ", nmi_count(j));
  11.250 +		seq_putc(p, '\n');
  11.251 +#ifdef CONFIG_X86_LOCAL_APIC
  11.252 +		seq_puts(p, "LOC: ");
  11.253 +		for (j = 0; j < NR_CPUS; j++)
  11.254 +			if (cpu_online(j))
  11.255 +				seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
  11.256 +		seq_putc(p, '\n');
  11.257 +#endif
  11.258 +		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  11.259 +#ifdef CONFIG_X86_IO_APIC
  11.260 +#ifdef APIC_MISMATCH_DEBUG
  11.261 +		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
  11.262 +#endif
  11.263 +#endif
  11.264 +	}
  11.265 +	return 0;
  11.266 +}
  11.267 +#endif
  11.268 +
  11.269 +#ifdef CONFIG_SMP
  11.270 +inline void synchronize_irq(unsigned int irq)
  11.271 +{
  11.272 +	while (irq_descp(irq)->status & IRQ_INPROGRESS)
  11.273 +		cpu_relax();
  11.274 +}
  11.275 +EXPORT_SYMBOL(synchronize_irq);
  11.276 +#endif
  11.277 +
  11.278 +/*
  11.279 + * This should really return information about whether
  11.280 + * we should do bottom half handling etc. Right now we
  11.281 + * end up _always_ checking the bottom half, which is a
  11.282 + * waste of time and is not what some drivers would
  11.283 + * prefer.
  11.284 + */
  11.285 +int handle_IRQ_event(unsigned int irq,
  11.286 +		struct pt_regs *regs, struct irqaction *action)
  11.287 +{
  11.288 +	int status = 1;	/* Force the "do bottom halves" bit */
  11.289 +	int retval = 0;
  11.290 +
  11.291 +#ifndef XEN
  11.292 +	if (!(action->flags & SA_INTERRUPT))
  11.293 +#endif
  11.294 +		local_irq_enable();
  11.295 +
  11.296 +#ifdef XEN
  11.297 +		action->handler(irq, action->dev_id, regs);
  11.298 +#else
  11.299 +	do {
  11.300 +		status |= action->flags;
  11.301 +		retval |= action->handler(irq, action->dev_id, regs);
  11.302 +		action = action->next;
  11.303 +	} while (action);
  11.304 +	if (status & SA_SAMPLE_RANDOM)
  11.305 +		add_interrupt_randomness(irq);
  11.306 +#endif
  11.307 +	local_irq_disable();
  11.308 +	return retval;
  11.309 +}
  11.310 +
  11.311 +#ifndef XEN
  11.312 +static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
  11.313 +{
  11.314 +	struct irqaction *action;
  11.315 +
  11.316 +	if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
  11.317 +		printk(KERN_ERR "irq event %d: bogus return value %x\n",
  11.318 +				irq, action_ret);
  11.319 +	} else {
  11.320 +		printk(KERN_ERR "irq %d: nobody cared!\n", irq);
  11.321 +	}
  11.322 +	dump_stack();
  11.323 +	printk(KERN_ERR "handlers:\n");
  11.324 +	action = desc->action;
  11.325 +	do {
  11.326 +		printk(KERN_ERR "[<%p>]", action->handler);
  11.327 +		print_symbol(" (%s)",
  11.328 +			(unsigned long)action->handler);
  11.329 +		printk("\n");
  11.330 +		action = action->next;
  11.331 +	} while (action);
  11.332 +}
  11.333 +
  11.334 +static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
  11.335 +{
  11.336 +	static int count = 100;
  11.337 +
  11.338 +	if (count) {
  11.339 +		count--;
  11.340 +		__report_bad_irq(irq, desc, action_ret);
  11.341 +	}
  11.342 +}
  11.343 +#endif
  11.344 +
  11.345 +static int noirqdebug;
  11.346 +
  11.347 +static int __init noirqdebug_setup(char *str)
  11.348 +{
  11.349 +	noirqdebug = 1;
  11.350 +	printk("IRQ lockup detection disabled\n");
  11.351 +	return 1;
  11.352 +}
  11.353 +
  11.354 +__setup("noirqdebug", noirqdebug_setup);
  11.355 +
  11.356 +/*
  11.357 + * If 99,900 of the previous 100,000 interrupts have not been handled then
  11.358 + * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
  11.359 + * turn the IRQ off.
  11.360 + *
  11.361 + * (The other 100-of-100,000 interrupts may have been a correctly-functioning
  11.362 + *  device sharing an IRQ with the failing one)
  11.363 + *
  11.364 + * Called under desc->lock
  11.365 + */
  11.366 +#ifndef XEN
  11.367 +static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
  11.368 +{
  11.369 +	if (action_ret != IRQ_HANDLED) {
  11.370 +		desc->irqs_unhandled++;
  11.371 +		if (action_ret != IRQ_NONE)
  11.372 +			report_bad_irq(irq, desc, action_ret);
  11.373 +	}
  11.374 +
  11.375 +	desc->irq_count++;
  11.376 +	if (desc->irq_count < 100000)
  11.377 +		return;
  11.378 +
  11.379 +	desc->irq_count = 0;
  11.380 +	if (desc->irqs_unhandled > 99900) {
  11.381 +		/*
  11.382 +		 * The interrupt is stuck
  11.383 +		 */
  11.384 +		__report_bad_irq(irq, desc, action_ret);
  11.385 +		/*
  11.386 +		 * Now kill the IRQ
  11.387 +		 */
  11.388 +		printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
  11.389 +		desc->status |= IRQ_DISABLED;
  11.390 +		desc->handler->disable(irq);
  11.391 +	}
  11.392 +	desc->irqs_unhandled = 0;
  11.393 +}
  11.394 +#endif
  11.395 +
  11.396 +/*
  11.397 + * Generic enable/disable code: this just calls
  11.398 + * down into the PIC-specific version for the actual
  11.399 + * hardware disable after having gotten the irq
  11.400 + * controller lock.
  11.401 + */
  11.402 +
  11.403 +/**
  11.404 + *	disable_irq_nosync - disable an irq without waiting
  11.405 + *	@irq: Interrupt to disable
  11.406 + *
  11.407 + *	Disable the selected interrupt line.  Disables and Enables are
  11.408 + *	nested.
  11.409 + *	Unlike disable_irq(), this function does not ensure existing
  11.410 + *	instances of the IRQ handler have completed before returning.
  11.411 + *
  11.412 + *	This function may be called from IRQ context.
  11.413 + */
  11.414 +
  11.415 +inline void disable_irq_nosync(unsigned int irq)
  11.416 +{
  11.417 +	irq_desc_t *desc = irq_descp(irq);
  11.418 +	unsigned long flags;
  11.419 +
  11.420 +	spin_lock_irqsave(&desc->lock, flags);
  11.421 +	if (!desc->depth++) {
  11.422 +		desc->status |= IRQ_DISABLED;
  11.423 +		desc->handler->disable(irq);
  11.424 +	}
  11.425 +	spin_unlock_irqrestore(&desc->lock, flags);
  11.426 +}
  11.427 +EXPORT_SYMBOL(disable_irq_nosync);
  11.428 +
  11.429 +/**
  11.430 + *	disable_irq - disable an irq and wait for completion
  11.431 + *	@irq: Interrupt to disable
  11.432 + *
  11.433 + *	Disable the selected interrupt line.  Enables and Disables are
  11.434 + *	nested.
  11.435 + *	This function waits for any pending IRQ handlers for this interrupt
  11.436 + *	to complete before returning. If you use this function while
  11.437 + *	holding a resource the IRQ handler may need you will deadlock.
  11.438 + *
  11.439 + *	This function may be called - with care - from IRQ context.
  11.440 + */
  11.441 +
  11.442 +void disable_irq(unsigned int irq)
  11.443 +{
  11.444 +	irq_desc_t *desc = irq_descp(irq);
  11.445 +
  11.446 +	disable_irq_nosync(irq);
  11.447 +	if (desc->action)
  11.448 +		synchronize_irq(irq);
  11.449 +}
  11.450 +EXPORT_SYMBOL(disable_irq);
  11.451 +
  11.452 +/**
  11.453 + *	enable_irq - enable handling of an irq
  11.454 + *	@irq: Interrupt to enable
  11.455 + *
  11.456 + *	Undoes the effect of one call to disable_irq().  If this
  11.457 + *	matches the last disable, processing of interrupts on this
  11.458 + *	IRQ line is re-enabled.
  11.459 + *
  11.460 + *	This function may be called from IRQ context.
  11.461 + */
  11.462 +
  11.463 +void enable_irq(unsigned int irq)
  11.464 +{
  11.465 +	irq_desc_t *desc = irq_descp(irq);
  11.466 +	unsigned long flags;
  11.467 +
  11.468 +	spin_lock_irqsave(&desc->lock, flags);
  11.469 +	switch (desc->depth) {
  11.470 +	case 1: {
  11.471 +		unsigned int status = desc->status & ~IRQ_DISABLED;
  11.472 +		desc->status = status;
  11.473 +#ifndef XEN
  11.474 +		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
  11.475 +			desc->status = status | IRQ_REPLAY;
  11.476 +			hw_resend_irq(desc->handler,irq);
  11.477 +		}
  11.478 +#endif
  11.479 +		desc->handler->enable(irq);
  11.480 +		/* fall-through */
  11.481 +	}
  11.482 +	default:
  11.483 +		desc->depth--;
  11.484 +		break;
  11.485 +	case 0:
  11.486 +		printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
  11.487 +		       irq, (void *) __builtin_return_address(0));
  11.488 +	}
  11.489 +	spin_unlock_irqrestore(&desc->lock, flags);
  11.490 +}
  11.491 +EXPORT_SYMBOL(enable_irq);
  11.492 +
  11.493 +/*
  11.494 + * do_IRQ handles all normal device IRQ's (the special
  11.495 + * SMP cross-CPU interrupts have their own specific
  11.496 + * handlers).
  11.497 + */
  11.498 +unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
  11.499 +{
  11.500 +	/*
  11.501 +	 * We ack quickly, we don't want the irq controller
  11.502 +	 * thinking we're snobs just because some other CPU has
  11.503 +	 * disabled global interrupts (we have already done the
  11.504 +	 * INT_ACK cycles, it's too late to try to pretend to the
  11.505 +	 * controller that we aren't taking the interrupt).
  11.506 +	 *
  11.507 +	 * 0 return value means that this irq is already being
  11.508 +	 * handled by some other CPU. (or is disabled)
  11.509 +	 */
  11.510 +	irq_desc_t *desc = irq_descp(irq);
  11.511 +	struct irqaction * action;
  11.512 +	irqreturn_t action_ret;
  11.513 +	unsigned int status;
  11.514 +	int cpu;
  11.515 +
  11.516 +	cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
  11.517 +
  11.518 +#ifndef XEN
  11.519 +	kstat_cpu(cpu).irqs[irq]++;
  11.520 +#endif
  11.521 +
  11.522 +	if (desc->status & IRQ_PER_CPU) {
  11.523 +		/* no locking required for CPU-local interrupts: */
  11.524 +		desc->handler->ack(irq);
  11.525 +		action_ret = handle_IRQ_event(irq, regs, desc->action);
  11.526 +		desc->handler->end(irq);
  11.527 +	} else {
  11.528 +		spin_lock(&desc->lock);
  11.529 +		desc->handler->ack(irq);
  11.530 +		/*
  11.531 +		 * REPLAY is when Linux resends an IRQ that was dropped earlier
  11.532 +		 * WAITING is used by probe to mark irqs that are being tested
  11.533 +		 */
  11.534 +#ifdef XEN
  11.535 +		status = desc->status & ~IRQ_REPLAY;
  11.536 +#else
  11.537 +		status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
  11.538 +#endif
  11.539 +		status |= IRQ_PENDING; /* we _want_ to handle it */
  11.540 +
  11.541 +		/*
  11.542 +		 * If the IRQ is disabled for whatever reason, we cannot
  11.543 +		 * use the action we have.
  11.544 +		 */
  11.545 +		action = NULL;
  11.546 +		if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
  11.547 +			action = desc->action;
  11.548 +			status &= ~IRQ_PENDING; /* we commit to handling */
  11.549 +			status |= IRQ_INPROGRESS; /* we are handling it */
  11.550 +		}
  11.551 +		desc->status = status;
  11.552 +
  11.553 +		/*
  11.554 +		 * If there is no IRQ handler or it was disabled, exit early.
  11.555 +		 * Since we set PENDING, if another processor is handling
  11.556 +		 * a different instance of this same irq, the other processor
  11.557 +		 * will take care of it.
  11.558 +		 */
  11.559 +		if (unlikely(!action))
  11.560 +			goto out;
  11.561 +
  11.562 +		/*
  11.563 +		 * Edge triggered interrupts need to remember
  11.564 +		 * pending events.
  11.565 +		 * This applies to any hw interrupts that allow a second
  11.566 +		 * instance of the same irq to arrive while we are in do_IRQ
  11.567 +		 * or in the handler. But the code here only handles the _second_
  11.568 +		 * instance of the irq, not the third or fourth. So it is mostly
  11.569 +		 * useful for irq hardware that does not mask cleanly in an
  11.570 +		 * SMP environment.
  11.571 +		 */
  11.572 +		for (;;) {
  11.573 +			spin_unlock(&desc->lock);
  11.574 +			action_ret = handle_IRQ_event(irq, regs, action);
  11.575 +			spin_lock(&desc->lock);
  11.576 +#ifndef XEN
  11.577 +			if (!noirqdebug)
  11.578 +				note_interrupt(irq, desc, action_ret);
  11.579 +#endif
  11.580 +			if (!(desc->status & IRQ_PENDING))
  11.581 +				break;
  11.582 +			desc->status &= ~IRQ_PENDING;
  11.583 +		}
  11.584 +		desc->status &= ~IRQ_INPROGRESS;
  11.585 +	  out:
  11.586 +		/*
  11.587 +		 * The ->end() handler has to deal with interrupts which got
  11.588 +		 * disabled while the handler was running.
  11.589 +		 */
  11.590 +		desc->handler->end(irq);
  11.591 +		spin_unlock(&desc->lock);
  11.592 +	}
  11.593 +	return 1;
  11.594 +}
  11.595 +
  11.596 +/**
  11.597 + *	request_irq - allocate an interrupt line
  11.598 + *	@irq: Interrupt line to allocate
  11.599 + *	@handler: Function to be called when the IRQ occurs
  11.600 + *	@irqflags: Interrupt type flags
  11.601 + *	@devname: An ascii name for the claiming device
  11.602 + *	@dev_id: A cookie passed back to the handler function
  11.603 + *
  11.604 + *	This call allocates interrupt resources and enables the
  11.605 + *	interrupt line and IRQ handling. From the point this
  11.606 + *	call is made your handler function may be invoked. Since
  11.607 + *	your handler function must clear any interrupt the board 
  11.608 + *	raises, you must take care both to initialise your hardware
  11.609 + *	and to set up the interrupt handler in the right order.
  11.610 + *
  11.611 + *	Dev_id must be globally unique. Normally the address of the
  11.612 + *	device data structure is used as the cookie. Since the handler
  11.613 + *	receives this value it makes sense to use it.
  11.614 + *
  11.615 + *	If your interrupt is shared you must pass a non NULL dev_id
  11.616 + *	as this is required when freeing the interrupt.
  11.617 + *
  11.618 + *	Flags:
  11.619 + *
  11.620 + *	SA_SHIRQ		Interrupt is shared
  11.621 + *
  11.622 + *	SA_INTERRUPT		Disable local interrupts while processing
  11.623 + *
  11.624 + *	SA_SAMPLE_RANDOM	The interrupt can be used for entropy
  11.625 + *
  11.626 + */
  11.627 +
  11.628 +int request_irq(unsigned int irq,
  11.629 +		irqreturn_t (*handler)(int, void *, struct pt_regs *),
  11.630 +		unsigned long irqflags,
  11.631 +		const char * devname,
  11.632 +		void *dev_id)
  11.633 +{
  11.634 +	int retval;
  11.635 +	struct irqaction * action;
  11.636 +
  11.637 +#if 1
  11.638 +	/*
  11.639 +	 * Sanity-check: shared interrupts should REALLY pass in
  11.640 +	 * a real dev-ID, otherwise we'll have trouble later trying
  11.641 +	 * to figure out which interrupt is which (messes up the
  11.642 +	 * interrupt freeing logic etc).
  11.643 +	 */
  11.644 +	if (irqflags & SA_SHIRQ) {
  11.645 +		if (!dev_id)
  11.646 +			printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
  11.647 +	}
  11.648 +#endif
  11.649 +
  11.650 +	if (irq >= NR_IRQS)
  11.651 +		return -EINVAL;
  11.652 +	if (!handler)
  11.653 +		return -EINVAL;
  11.654 +
  11.655 +	action = (struct irqaction *)
  11.656 +			kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
  11.657 +	if (!action)
  11.658 +		return -ENOMEM;
  11.659 +
  11.660 +	action->handler = handler;
  11.661 +#ifndef XEN
  11.662 +	action->flags = irqflags;
  11.663 +	action->mask = 0;
  11.664 +#endif
  11.665 +	action->name = devname;
  11.666 +#ifndef XEN
  11.667 +	action->next = NULL;
  11.668 +#endif
  11.669 +	action->dev_id = dev_id;
  11.670 +
  11.671 +	retval = setup_irq(irq, action);
  11.672 +	if (retval)
  11.673 +		kfree(action);
  11.674 +	return retval;
  11.675 +}
  11.676 +
  11.677 +EXPORT_SYMBOL(request_irq);
  11.678 +
  11.679 +/**
  11.680 + *	free_irq - free an interrupt
  11.681 + *	@irq: Interrupt line to free
  11.682 + *	@dev_id: Device identity to free
  11.683 + *
  11.684 + *	Remove an interrupt handler. The handler is removed and if the
  11.685 + *	interrupt line is no longer in use by any driver it is disabled.
  11.686 + *	On a shared IRQ the caller must ensure the interrupt is disabled
  11.687 + *	on the card it drives before calling this function. The function
  11.688 + *	does not return until any executing interrupts for this IRQ
  11.689 + *	have completed.
  11.690 + *
  11.691 + *	This function must not be called from interrupt context.
  11.692 + */
  11.693 +
  11.694 +#ifdef XEN
  11.695 +void free_irq(unsigned int irq)
  11.696 +#else
  11.697 +void free_irq(unsigned int irq, void *dev_id)
  11.698 +#endif
  11.699 +{
  11.700 +	irq_desc_t *desc;
  11.701 +	struct irqaction **p;
  11.702 +	unsigned long flags;
  11.703 +
  11.704 +	if (irq >= NR_IRQS)
  11.705 +		return;
  11.706 +
  11.707 +	desc = irq_descp(irq);
  11.708 +	spin_lock_irqsave(&desc->lock,flags);
  11.709 +#ifdef XEN
  11.710 +	if (desc->action) {
  11.711 +		struct irqaction * action = desc->action;
  11.712 +		desc->action = NULL;
  11.713 +#else
  11.714 +	p = &desc->action;
  11.715 +	for (;;) {
  11.716 +		struct irqaction * action = *p;
  11.717 +		if (action) {
  11.718 +			struct irqaction **pp = p;
  11.719 +			p = &action->next;
  11.720 +			if (action->dev_id != dev_id)
  11.721 +				continue;
  11.722 +
  11.723 +			/* Found it - now remove it from the list of entries */
  11.724 +			*pp = action->next;
  11.725 +			if (!desc->action) {
  11.726 +#endif
  11.727 +				desc->status |= IRQ_DISABLED;
  11.728 +				desc->handler->shutdown(irq);
  11.729 +#ifndef XEN
  11.730 +			}
  11.731 +#endif
  11.732 +			spin_unlock_irqrestore(&desc->lock,flags);
  11.733 +
  11.734 +			/* Wait to make sure it's not being used on another CPU */
  11.735 +			synchronize_irq(irq);
  11.736 +			kfree(action);
  11.737 +			return;
  11.738 +		}
  11.739 +		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
  11.740 +		spin_unlock_irqrestore(&desc->lock,flags);
  11.741 +#ifndef XEN
  11.742 +		return;
  11.743 +	}
  11.744 +#endif
  11.745 +}
  11.746 +
  11.747 +EXPORT_SYMBOL(free_irq);
  11.748 +
  11.749 +/*
  11.750 + * IRQ autodetection code..
  11.751 + *
  11.752 + * This depends on the fact that any interrupt that
  11.753 + * comes in on to an unassigned handler will get stuck
  11.754 + * with "IRQ_WAITING" cleared and the interrupt
  11.755 + * disabled.
  11.756 + */
  11.757 +
  11.758 +static DECLARE_MUTEX(probe_sem);
  11.759 +
  11.760 +/**
  11.761 + *	probe_irq_on	- begin an interrupt autodetect
  11.762 + *
  11.763 + *	Commence probing for an interrupt. The interrupts are scanned
  11.764 + *	and a mask of potential interrupt lines is returned.
  11.765 + *
  11.766 + */
  11.767 +
  11.768 +#ifndef XEN
  11.769 +unsigned long probe_irq_on(void)
  11.770 +{
  11.771 +	unsigned int i;
  11.772 +	irq_desc_t *desc;
  11.773 +	unsigned long val;
  11.774 +	unsigned long delay;
  11.775 +
  11.776 +	down(&probe_sem);
  11.777 +	/*
  11.778 +	 * something may have generated an irq long ago and we want to
  11.779 +	 * flush such a longstanding irq before considering it as spurious.
  11.780 +	 */
  11.781 +	for (i = NR_IRQS-1; i > 0; i--)  {
  11.782 +		desc = irq_descp(i);
  11.783 +
  11.784 +		spin_lock_irq(&desc->lock);
  11.785 +		if (!desc->action)
  11.786 +			desc->handler->startup(i);
  11.787 +		spin_unlock_irq(&desc->lock);
  11.788 +	}
  11.789 +
  11.790 +	/* Wait for longstanding interrupts to trigger. */
  11.791 +	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
  11.792 +		/* about 20ms delay */ barrier();
  11.793 +
  11.794 +	/*
  11.795 +	 * enable any unassigned irqs
  11.796 +	 * (we must startup again here because if a longstanding irq
  11.797 +	 * happened in the previous stage, it may have masked itself)
  11.798 +	 */
  11.799 +	for (i = NR_IRQS-1; i > 0; i--) {
  11.800 +		desc = irq_descp(i);
  11.801 +
  11.802 +		spin_lock_irq(&desc->lock);
  11.803 +		if (!desc->action) {
  11.804 +			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
  11.805 +			if (desc->handler->startup(i))
  11.806 +				desc->status |= IRQ_PENDING;
  11.807 +		}
  11.808 +		spin_unlock_irq(&desc->lock);
  11.809 +	}
  11.810 +
  11.811 +	/*
  11.812 +	 * Wait for spurious interrupts to trigger
  11.813 +	 */
  11.814 +	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
  11.815 +		/* about 100ms delay */ barrier();
  11.816 +
  11.817 +	/*
  11.818 +	 * Now filter out any obviously spurious interrupts
  11.819 +	 */
  11.820 +	val = 0;
  11.821 +	for (i = 0; i < NR_IRQS; i++) {
  11.822 +		irq_desc_t *desc = irq_descp(i);
  11.823 +		unsigned int status;
  11.824 +
  11.825 +		spin_lock_irq(&desc->lock);
  11.826 +		status = desc->status;
  11.827 +
  11.828 +		if (status & IRQ_AUTODETECT) {
  11.829 +			/* It triggered already - consider it spurious. */
  11.830 +			if (!(status & IRQ_WAITING)) {
  11.831 +				desc->status = status & ~IRQ_AUTODETECT;
  11.832 +				desc->handler->shutdown(i);
  11.833 +			} else
  11.834 +				if (i < 32)
  11.835 +					val |= 1 << i;
  11.836 +		}
  11.837 +		spin_unlock_irq(&desc->lock);
  11.838 +	}
  11.839 +
  11.840 +	return val;
  11.841 +}
  11.842 +
  11.843 +EXPORT_SYMBOL(probe_irq_on);
  11.844 +
  11.845 +/**
  11.846 + *	probe_irq_mask - scan a bitmap of interrupt lines
  11.847 + *	@val:	mask of interrupts to consider
  11.848 + *
  11.849 + *	Scan the ISA bus interrupt lines and return a bitmap of
  11.850 + *	active interrupts. The interrupt probe logic state is then
  11.851 + *	returned to its previous value.
  11.852 + *
  11.853 + *	Note: we need to scan all the irq's even though we will
  11.854 + *	only return ISA irq numbers - just so that we reset them
  11.855 + *	all to a known state.
  11.856 + */
  11.857 +unsigned int probe_irq_mask(unsigned long val)
  11.858 +{
  11.859 +	int i;
  11.860 +	unsigned int mask;
  11.861 +
  11.862 +	mask = 0;
  11.863 +	for (i = 0; i < 16; i++) {
  11.864 +		irq_desc_t *desc = irq_descp(i);
  11.865 +		unsigned int status;
  11.866 +
  11.867 +		spin_lock_irq(&desc->lock);
  11.868 +		status = desc->status;
  11.869 +
  11.870 +		if (status & IRQ_AUTODETECT) {
  11.871 +			if (!(status & IRQ_WAITING))
  11.872 +				mask |= 1 << i;
  11.873 +
  11.874 +			desc->status = status & ~IRQ_AUTODETECT;
  11.875 +			desc->handler->shutdown(i);
  11.876 +		}
  11.877 +		spin_unlock_irq(&desc->lock);
  11.878 +	}
  11.879 +	up(&probe_sem);
  11.880 +
  11.881 +	return mask & val;
  11.882 +}
  11.883 +EXPORT_SYMBOL(probe_irq_mask);
  11.884 +
  11.885 +/**
  11.886 + *	probe_irq_off	- end an interrupt autodetect
  11.887 + *	@val: mask of potential interrupts (unused)
  11.888 + *
  11.889 + *	Scans the unused interrupt lines and returns the line which
  11.890 + *	appears to have triggered the interrupt. If no interrupt was
  11.891 + *	found then zero is returned. If more than one interrupt is
  11.892 + *	found then minus the first candidate is returned to indicate
  11.893 + *	their is doubt.
  11.894 + *
  11.895 + *	The interrupt probe logic state is returned to its previous
  11.896 + *	value.
  11.897 + *
  11.898 + *	BUGS: When used in a module (which arguably shouldn't happen)
  11.899 + *	nothing prevents two IRQ probe callers from overlapping. The
  11.900 + *	results of this are non-optimal.
  11.901 + */
  11.902 +
  11.903 +int probe_irq_off(unsigned long val)
  11.904 +{
  11.905 +	int i, irq_found, nr_irqs;
  11.906 +
  11.907 +	nr_irqs = 0;
  11.908 +	irq_found = 0;
  11.909 +	for (i = 0; i < NR_IRQS; i++) {
  11.910 +		irq_desc_t *desc = irq_descp(i);
  11.911 +		unsigned int status;
  11.912 +
  11.913 +		spin_lock_irq(&desc->lock);
  11.914 +		status = desc->status;
  11.915 +
  11.916 +		if (status & IRQ_AUTODETECT) {
  11.917 +			if (!(status & IRQ_WAITING)) {
  11.918 +				if (!nr_irqs)
  11.919 +					irq_found = i;
  11.920 +				nr_irqs++;
  11.921 +			}
  11.922 +			desc->status = status & ~IRQ_AUTODETECT;
  11.923 +			desc->handler->shutdown(i);
  11.924 +		}
  11.925 +		spin_unlock_irq(&desc->lock);
  11.926 +	}
  11.927 +	up(&probe_sem);
  11.928 +
  11.929 +	if (nr_irqs > 1)
  11.930 +		irq_found = -irq_found;
  11.931 +	return irq_found;
  11.932 +}
  11.933 +
  11.934 +EXPORT_SYMBOL(probe_irq_off);
  11.935 +#endif
  11.936 +
  11.937 +int setup_irq(unsigned int irq, struct irqaction * new)
  11.938 +{
  11.939 +	int shared = 0;
  11.940 +	unsigned long flags;
  11.941 +	struct irqaction *old, **p;
  11.942 +	irq_desc_t *desc = irq_descp(irq);
  11.943 +
  11.944 +#ifndef XEN
  11.945 +	if (desc->handler == &no_irq_type)
  11.946 +		return -ENOSYS;
  11.947 +	/*
  11.948 +	 * Some drivers like serial.c use request_irq() heavily,
  11.949 +	 * so we have to be careful not to interfere with a
  11.950 +	 * running system.
  11.951 +	 */
  11.952 +	if (new->flags & SA_SAMPLE_RANDOM) {
  11.953 +		/*
  11.954 +		 * This function might sleep, we want to call it first,
  11.955 +		 * outside of the atomic block.
  11.956 +		 * Yes, this might clear the entropy pool if the wrong
  11.957 +		 * driver is attempted to be loaded, without actually
  11.958 +		 * installing a new handler, but is this really a problem,
  11.959 +		 * only the sysadmin is able to do this.
  11.960 +		 */
  11.961 +		rand_initialize_irq(irq);
  11.962 +	}
  11.963 +
  11.964 +	if (new->flags & SA_PERCPU_IRQ) {
  11.965 +		desc->status |= IRQ_PER_CPU;
  11.966 +		desc->handler = &irq_type_ia64_lsapic;
  11.967 +	}
  11.968 +#endif
  11.969 +
  11.970 +	/*
  11.971 +	 * The following block of code has to be executed atomically
  11.972 +	 */
  11.973 +	spin_lock_irqsave(&desc->lock,flags);
  11.974 +	p = &desc->action;
  11.975 +	if ((old = *p) != NULL) {
  11.976 +#ifdef XEN
  11.977 +		if (1) {
  11.978 +		/* Can't share interrupts unless both agree to */
  11.979 +#else
  11.980 +		if (!(old->flags & new->flags & SA_SHIRQ)) {
  11.981 +#endif
  11.982 +			spin_unlock_irqrestore(&desc->lock,flags);
  11.983 +			return -EBUSY;
  11.984 +		}
  11.985 +
  11.986 +#ifndef XEN
  11.987 +		/* add new interrupt at end of irq queue */
  11.988 +		do {
  11.989 +			p = &old->next;
  11.990 +			old = *p;
  11.991 +		} while (old);
  11.992 +		shared = 1;
  11.993 +#endif
  11.994 +	}
  11.995 +
  11.996 +	*p = new;
  11.997 +
  11.998 +#ifndef XEN
  11.999 +	if (!shared) {
 11.1000 +#else
 11.1001 +	{
 11.1002 +#endif
 11.1003 +		desc->depth = 0;
 11.1004 +#ifdef XEN
 11.1005 +		desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
 11.1006 +#else
 11.1007 +		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
 11.1008 +#endif
 11.1009 +		desc->handler->startup(irq);
 11.1010 +	}
 11.1011 +	spin_unlock_irqrestore(&desc->lock,flags);
 11.1012 +
 11.1013 +#ifndef XEN
 11.1014 +	register_irq_proc(irq);
 11.1015 +#endif
 11.1016 +	return 0;
 11.1017 +}
 11.1018 +
 11.1019 +static struct proc_dir_entry * root_irq_dir;
 11.1020 +static struct proc_dir_entry * irq_dir [NR_IRQS];
 11.1021 +
 11.1022 +#ifdef CONFIG_SMP
 11.1023 +
 11.1024 +static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 11.1025 +
 11.1026 +static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 11.1027 +
 11.1028 +static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 11.1029 +
 11.1030 +void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 11.1031 +{
 11.1032 +	cpumask_t mask = CPU_MASK_NONE;
 11.1033 +
 11.1034 +	cpu_set(cpu_logical_id(hwid), mask);
 11.1035 +
 11.1036 +	if (irq < NR_IRQS) {
 11.1037 +		irq_affinity[irq] = mask;
 11.1038 +		irq_redir[irq] = (char) (redir & 0xff);
 11.1039 +	}
 11.1040 +}
 11.1041 +
 11.1042 +static int irq_affinity_read_proc (char *page, char **start, off_t off,
 11.1043 +			int count, int *eof, void *data)
 11.1044 +{
 11.1045 +	int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
 11.1046 +
 11.1047 +	len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
 11.1048 +	if (count - len < 2)
 11.1049 +		return -EINVAL;
 11.1050 +	len += sprintf(page + len, "\n");
 11.1051 +	return len;
 11.1052 +}
 11.1053 +
 11.1054 +static int irq_affinity_write_proc (struct file *file, const char *buffer,
 11.1055 +				    unsigned long count, void *data)
 11.1056 +{
 11.1057 +	unsigned int irq = (unsigned long) data;
 11.1058 +	int full_count = count, err;
 11.1059 +	cpumask_t new_value, tmp;
 11.1060 +#	define R_PREFIX_LEN 16
 11.1061 +	char rbuf[R_PREFIX_LEN];
 11.1062 +	int rlen;
 11.1063 +	int prelen;
 11.1064 +	irq_desc_t *desc = irq_descp(irq);
 11.1065 +	unsigned long flags;
 11.1066 +
 11.1067 +	if (!desc->handler->set_affinity)
 11.1068 +		return -EIO;
 11.1069 +
 11.1070 +	/*
 11.1071 +	 * If string being written starts with a prefix of 'r' or 'R'
 11.1072 +	 * and some limited number of spaces, set IA64_IRQ_REDIRECTED.
 11.1073 +	 * If more than (R_PREFIX_LEN - 2) spaces are passed, they won't
 11.1074 +	 * all be trimmed as part of prelen, the untrimmed spaces will
 11.1075 +	 * cause the hex parsing to fail, and this write() syscall will
 11.1076 +	 * fail with EINVAL.
 11.1077 +	 */
 11.1078 +
 11.1079 +	if (!count)
 11.1080 +		return -EINVAL;
 11.1081 +	rlen = min(sizeof(rbuf)-1, count);
 11.1082 +	if (copy_from_user(rbuf, buffer, rlen))
 11.1083 +		return -EFAULT;
 11.1084 +	rbuf[rlen] = 0;
 11.1085 +	prelen = 0;
 11.1086 +	if (tolower(*rbuf) == 'r') {
 11.1087 +		prelen = strspn(rbuf, "Rr ");
 11.1088 +		irq |= IA64_IRQ_REDIRECTED;
 11.1089 +	}
 11.1090 +
 11.1091 +	err = cpumask_parse(buffer+prelen, count-prelen, new_value);
 11.1092 +	if (err)
 11.1093 +		return err;
 11.1094 +
 11.1095 +	/*
 11.1096 +	 * Do not allow disabling IRQs completely - it's a too easy
 11.1097 +	 * way to make the system unusable accidentally :-) At least
 11.1098 +	 * one online CPU still has to be targeted.
 11.1099 +	 */
 11.1100 +	cpus_and(tmp, new_value, cpu_online_map);
 11.1101 +	if (cpus_empty(tmp))
 11.1102 +		return -EINVAL;
 11.1103 +
 11.1104 +	spin_lock_irqsave(&desc->lock, flags);
 11.1105 +	pending_irq_cpumask[irq] = new_value;
 11.1106 +	spin_unlock_irqrestore(&desc->lock, flags);
 11.1107 +
 11.1108 +	return full_count;
 11.1109 +}
 11.1110 +
 11.1111 +void move_irq(int irq)
 11.1112 +{
 11.1113 +	/* note - we hold desc->lock */
 11.1114 +	cpumask_t tmp;
 11.1115 +	irq_desc_t *desc = irq_descp(irq);
 11.1116 +
 11.1117 +	if (!cpus_empty(pending_irq_cpumask[irq])) {
 11.1118 +		cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
 11.1119 +		if (unlikely(!cpus_empty(tmp))) {
 11.1120 +			desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
 11.1121 +		}
 11.1122 +		cpus_clear(pending_irq_cpumask[irq]);
 11.1123 +	}
 11.1124 +}
 11.1125 +
 11.1126 +
 11.1127 +#endif /* CONFIG_SMP */
 11.1128 +
 11.1129 +#ifdef CONFIG_HOTPLUG_CPU
 11.1130 +unsigned int vectors_in_migration[NR_IRQS];
 11.1131 +
 11.1132 +/*
 11.1133 + * Since cpu_online_map is already updated, we just need to check for
 11.1134 + * affinity that has zeros
 11.1135 + */
 11.1136 +static void migrate_irqs(void)
 11.1137 +{
 11.1138 +	cpumask_t	mask;
 11.1139 +	irq_desc_t *desc;
 11.1140 +	int 		irq, new_cpu;
 11.1141 +
 11.1142 +	for (irq=0; irq < NR_IRQS; irq++) {
 11.1143 +		desc = irq_descp(irq);
 11.1144 +
 11.1145 +		/*
 11.1146 +		 * No handling for now.
 11.1147 +		 * TBD: Implement a disable function so we can now
 11.1148 +		 * tell CPU not to respond to these local intr sources.
 11.1149 +		 * such as ITV,CPEI,MCA etc.
 11.1150 +		 */
 11.1151 +		if (desc->status == IRQ_PER_CPU)
 11.1152 +			continue;
 11.1153 +
 11.1154 +		cpus_and(mask, irq_affinity[irq], cpu_online_map);
 11.1155 +		if (any_online_cpu(mask) == NR_CPUS) {
 11.1156 +			/*
 11.1157 +			 * Save it for phase 2 processing
 11.1158 +			 */
 11.1159 +			vectors_in_migration[irq] = irq;
 11.1160 +
 11.1161 +			new_cpu = any_online_cpu(cpu_online_map);
 11.1162 +			mask = cpumask_of_cpu(new_cpu);
 11.1163 +
 11.1164 +			/*
 11.1165 +			 * Al three are essential, currently WARN_ON.. maybe panic?
 11.1166 +			 */
 11.1167 +			if (desc->handler && desc->handler->disable &&
 11.1168 +				desc->handler->enable && desc->handler->set_affinity) {
 11.1169 +				desc->handler->disable(irq);
 11.1170 +				desc->handler->set_affinity(irq, mask);
 11.1171 +				desc->handler->enable(irq);
 11.1172 +			} else {
 11.1173 +				WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
 11.1174 +						!(desc->handler->enable) ||
 11.1175 +						!(desc->handler->set_affinity)));
 11.1176 +			}
 11.1177 +		}
 11.1178 +	}
 11.1179 +}
 11.1180 +
 11.1181 +void fixup_irqs(void)
 11.1182 +{
 11.1183 +	unsigned int irq;
 11.1184 +	extern void ia64_process_pending_intr(void);
 11.1185 +
 11.1186 +	ia64_set_itv(1<<16);
 11.1187 +	/*
 11.1188 +	 * Phase 1: Locate irq's bound to this cpu and
 11.1189 +	 * relocate them for cpu removal.
 11.1190 +	 */
 11.1191 +	migrate_irqs();
 11.1192 +
 11.1193 +	/*
 11.1194 +	 * Phase 2: Perform interrupt processing for all entries reported in
 11.1195 +	 * local APIC.
 11.1196 +	 */
 11.1197 +	ia64_process_pending_intr();
 11.1198 +
 11.1199 +	/*
 11.1200 +	 * Phase 3: Now handle any interrupts not captured in local APIC.
 11.1201 +	 * This is to account for cases that device interrupted during the time the
 11.1202 +	 * rte was being disabled and re-programmed.
 11.1203 +	 */
 11.1204 +	for (irq=0; irq < NR_IRQS; irq++) {
 11.1205 +		if (vectors_in_migration[irq]) {
 11.1206 +			vectors_in_migration[irq]=0;
 11.1207 +			do_IRQ(irq, NULL);
 11.1208 +		}
 11.1209 +	}
 11.1210 +
 11.1211 +	/*
 11.1212 +	 * Now let processor die. We do irq disable and max_xtp() to
 11.1213 +	 * ensure there is no more interrupts routed to this processor.
 11.1214 +	 * But the local timer interrupt can have 1 pending which we
 11.1215 +	 * take care in timer_interrupt().
 11.1216 +	 */
 11.1217 +	max_xtp();
 11.1218 +	local_irq_disable();
 11.1219 +}
 11.1220 +#endif
 11.1221 +
 11.1222 +#ifndef XEN
 11.1223 +static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 11.1224 +			int count, int *eof, void *data)
 11.1225 +{
 11.1226 +	int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
 11.1227 +	if (count - len < 2)
 11.1228 +		return -EINVAL;
 11.1229 +	len += sprintf(page + len, "\n");
 11.1230 +	return len;
 11.1231 +}
 11.1232 +
 11.1233 +static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 11.1234 +					unsigned long count, void *data)
 11.1235 +{
 11.1236 +	cpumask_t *mask = (cpumask_t *)data;
 11.1237 +	unsigned long full_count = count, err;
 11.1238 +	cpumask_t new_value;
 11.1239 +
 11.1240 +	err = cpumask_parse(buffer, count, new_value);
 11.1241 +	if (err)
 11.1242 +		return err;
 11.1243 +
 11.1244 +	*mask = new_value;
 11.1245 +	return full_count;
 11.1246 +}
 11.1247 +
 11.1248 +#define MAX_NAMELEN 10
 11.1249 +
 11.1250 +static void register_irq_proc (unsigned int irq)
 11.1251 +{
 11.1252 +	char name [MAX_NAMELEN];
 11.1253 +
 11.1254 +	if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
 11.1255 +		return;
 11.1256 +
 11.1257 +	memset(name, 0, MAX_NAMELEN);
 11.1258 +	sprintf(name, "%d", irq);
 11.1259 +
 11.1260 +	/* create /proc/irq/1234 */
 11.1261 +	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
 11.1262 +
 11.1263 +#ifdef CONFIG_SMP
 11.1264 +	{
 11.1265 +		struct proc_dir_entry *entry;
 11.1266 +
 11.1267 +		/* create /proc/irq/1234/smp_affinity */
 11.1268 +		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
 11.1269 +
 11.1270 +		if (entry) {
 11.1271 +			entry->nlink = 1;
 11.1272 +			entry->data = (void *)(long)irq;
 11.1273 +			entry->read_proc = irq_affinity_read_proc;
 11.1274 +			entry->write_proc = irq_affinity_write_proc;
 11.1275 +		}
 11.1276 +
 11.1277 +		smp_affinity_entry[irq] = entry;
 11.1278 +	}
 11.1279 +#endif
 11.1280 +}
 11.1281 +
 11.1282 +cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 11.1283 +
 11.1284 +void init_irq_proc (void)
 11.1285 +{
 11.1286 +	struct proc_dir_entry *entry;
 11.1287 +	int i;
 11.1288 +
 11.1289 +	/* create /proc/irq */
 11.1290 +	root_irq_dir = proc_mkdir("irq", 0);
 11.1291 +
 11.1292 +	/* create /proc/irq/prof_cpu_mask */
 11.1293 +	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
 11.1294 +
 11.1295 +	if (!entry)
 11.1296 +		return;
 11.1297 +
 11.1298 +	entry->nlink = 1;
 11.1299 +	entry->data = (void *)&prof_cpu_mask;
 11.1300 +	entry->read_proc = prof_cpu_mask_read_proc;
 11.1301 +	entry->write_proc = prof_cpu_mask_write_proc;
 11.1302 +
 11.1303 +	/*
 11.1304 +	 * Create entries for all existing IRQs.
 11.1305 +	 */
 11.1306 +	for (i = 0; i < NR_IRQS; i++) {
 11.1307 +		if (irq_descp(i)->handler == &no_irq_type)
 11.1308 +			continue;
 11.1309 +		register_irq_proc(i);
 11.1310 +	}
 11.1311 +}
 11.1312 +#endif
 11.1313 +
 11.1314 +
 11.1315 +#ifdef XEN
 11.1316 +/*
 11.1317 + * HANDLING OF GUEST-BOUND PHYSICAL IRQS
 11.1318 + */
 11.1319 +
 11.1320 +#define IRQ_MAX_GUESTS 7
 11.1321 +typedef struct {
 11.1322 +    u8 nr_guests;
 11.1323 +    u8 in_flight;
 11.1324 +    u8 shareable;
 11.1325 +    struct domain *guest[IRQ_MAX_GUESTS];
 11.1326 +} irq_guest_action_t;
 11.1327 +
 11.1328 +static void __do_IRQ_guest(int irq)
 11.1329 +{
 11.1330 +    irq_desc_t         *desc = &irq_desc[irq];
 11.1331 +    irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
 11.1332 +    struct domain      *d;
 11.1333 +    int                 i;
 11.1334 +
 11.1335 +    for ( i = 0; i < action->nr_guests; i++ )
 11.1336 +    {
 11.1337 +        d = action->guest[i];
 11.1338 +        if ( !test_and_set_bit(irq, &d->pirq_mask) )
 11.1339 +            action->in_flight++;
 11.1340 +        send_guest_pirq(d, irq);
 11.1341 +    }
 11.1342 +}
 11.1343 +
 11.1344 +int pirq_guest_unmask(struct domain *d)
 11.1345 +{
 11.1346 +    irq_desc_t    *desc;
 11.1347 +    int            i, j, pirq;
 11.1348 +    u32            m;
 11.1349 +    shared_info_t *s = d->shared_info;
 11.1350 +
 11.1351 +    for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
 11.1352 +    {
 11.1353 +        m = d->pirq_mask[i];
 11.1354 +        while ( (j = ffs(m)) != 0 )
 11.1355 +        {
 11.1356 +            m &= ~(1 << --j);
 11.1357 +            pirq = (i << 5) + j;
 11.1358 +            desc = &irq_desc[pirq];
 11.1359 +            spin_lock_irq(&desc->lock);
 11.1360 +            if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
 11.1361 +                 test_and_clear_bit(pirq, &d->pirq_mask) &&
 11.1362 +                 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
 11.1363 +                desc->handler->end(pirq);
 11.1364 +            spin_unlock_irq(&desc->lock);
 11.1365 +        }
 11.1366 +    }
 11.1367 +
 11.1368 +    return 0;
 11.1369 +}
 11.1370 +
 11.1371 +int pirq_guest_bind(struct exec_domain *d, int irq, int will_share)
 11.1372 +{
 11.1373 +    irq_desc_t         *desc = &irq_desc[irq];
 11.1374 +    irq_guest_action_t *action;
 11.1375 +    unsigned long       flags;
 11.1376 +    int                 rc = 0;
 11.1377 +
 11.1378 +    if ( !IS_CAPABLE_PHYSDEV(d->domain) )
 11.1379 +        return -EPERM;
 11.1380 +
 11.1381 +    spin_lock_irqsave(&desc->lock, flags);
 11.1382 +
 11.1383 +    action = (irq_guest_action_t *)desc->action;
 11.1384 +
 11.1385 +    if ( !(desc->status & IRQ_GUEST) )
 11.1386 +    {
 11.1387 +        if ( desc->action != NULL )
 11.1388 +        {
 11.1389 +            DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
 11.1390 +                    irq, desc->action->name);
 11.1391 +            rc = -EBUSY;
 11.1392 +            goto out;
 11.1393 +        }
 11.1394 +
 11.1395 +        action = xmalloc(sizeof(irq_guest_action_t));
 11.1396 +        if ( (desc->action = (struct irqaction *)action) == NULL )
 11.1397 +        {
 11.1398 +            DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
 11.1399 +            rc = -ENOMEM;
 11.1400 +            goto out;
 11.1401 +        }
 11.1402 +
 11.1403 +        action->nr_guests = 0;
 11.1404 +        action->in_flight = 0;
 11.1405 +        action->shareable = will_share;
 11.1406 +        
 11.1407 +        desc->depth = 0;
 11.1408 +        desc->status |= IRQ_GUEST;
 11.1409 +        desc->status &= ~IRQ_DISABLED;
 11.1410 +        desc->handler->startup(irq);
 11.1411 +
 11.1412 +        /* Attempt to bind the interrupt target to the correct CPU. */
 11.1413 +        if ( desc->handler->set_affinity != NULL )
 11.1414 +            desc->handler->set_affinity(
 11.1415 +                irq, apicid_to_phys_cpu_present(d->processor));
 11.1416 +    }
 11.1417 +    else if ( !will_share || !action->shareable )
 11.1418 +    {
 11.1419 +        DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
 11.1420 +                irq);
 11.1421 +        rc = -EBUSY;
 11.1422 +        goto out;
 11.1423 +    }
 11.1424 +
 11.1425 +    if ( action->nr_guests == IRQ_MAX_GUESTS )
 11.1426 +    {
 11.1427 +        DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
 11.1428 +        rc = -EBUSY;
 11.1429 +        goto out;
 11.1430 +    }
 11.1431 +
 11.1432 +    action->guest[action->nr_guests++] = d;
 11.1433 +
 11.1434 + out:
 11.1435 +    spin_unlock_irqrestore(&desc->lock, flags);
 11.1436 +    return rc;
 11.1437 +}
 11.1438 +
 11.1439 +int pirq_guest_unbind(struct domain *d, int irq)
 11.1440 +{
 11.1441 +    irq_desc_t         *desc = &irq_desc[irq];
 11.1442 +    irq_guest_action_t *action;
 11.1443 +    unsigned long       flags;
 11.1444 +    int                 i;
 11.1445 +
 11.1446 +    spin_lock_irqsave(&desc->lock, flags);
 11.1447 +
 11.1448 +    action = (irq_guest_action_t *)desc->action;
 11.1449 +
 11.1450 +    if ( test_and_clear_bit(irq, &d->pirq_mask) &&
 11.1451 +         (--action->in_flight == 0) )
 11.1452 +        desc->handler->end(irq);
 11.1453 +
 11.1454 +    if ( action->nr_guests == 1 )
 11.1455 +    {
 11.1456 +        desc->action = NULL;
 11.1457 +        xfree(action);
 11.1458 +        desc->depth   = 1;
 11.1459 +        desc->status |= IRQ_DISABLED;
 11.1460 +        desc->status &= ~IRQ_GUEST;
 11.1461 +        desc->handler->shutdown(irq);
 11.1462 +    }
 11.1463 +    else
 11.1464 +    {
 11.1465 +        i = 0;
 11.1466 +        while ( action->guest[i] != d )
 11.1467 +            i++;
 11.1468 +        memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
 11.1469 +        action->nr_guests--;
 11.1470 +    }
 11.1471 +
 11.1472 +    spin_unlock_irqrestore(&desc->lock, flags);    
 11.1473 +    return 0;
 11.1474 +}
 11.1475 +
 11.1476 +int pirq_guest_bindable(int irq, int will_share)
 11.1477 +{
 11.1478 +    irq_desc_t         *desc = &irq_desc[irq];
 11.1479 +    irq_guest_action_t *action;
 11.1480 +    unsigned long       flags;
 11.1481 +    int                 okay;
 11.1482 +
 11.1483 +    spin_lock_irqsave(&desc->lock, flags);
 11.1484 +
 11.1485 +    action = (irq_guest_action_t *)desc->action;
 11.1486 +
 11.1487 +    /*
 11.1488 +     * To be bindable the IRQ must either be not currently bound (1), or
 11.1489 +     * it must be shareable (2) and not at its share limit (3).
 11.1490 +     */
 11.1491 +    okay = ((!(desc->status & IRQ_GUEST) && (action == NULL)) || /* 1 */
 11.1492 +            (action->shareable && will_share &&                  /* 2 */
 11.1493 +             (action->nr_guests != IRQ_MAX_GUESTS)));            /* 3 */
 11.1494 +
 11.1495 +    spin_unlock_irqrestore(&desc->lock, flags);
 11.1496 +    return okay;
 11.1497 +}
 11.1498 +#endif
 11.1499 +
 11.1500 +#ifdef XEN
 11.1501 +#ifdef IA64
 11.1502 +// this is a temporary hack until real console input is implemented
 11.1503 +irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
 11.1504 +{
 11.1505 +	domain_pend_keyboard_interrupt(irq);
 11.1506 +}
 11.1507 +
 11.1508 +void serial_input_init(void)
 11.1509 +{
 11.1510 +	int retval;
 11.1511 +	int irq = 0x30;	// FIXME
 11.1512 +
 11.1513 +	retval = request_irq(irq,guest_forward_keyboard_input,SA_INTERRUPT,"siminput",NULL);
 11.1514 +	if (retval) {
 11.1515 +		printk("serial_input_init: broken request_irq call\n");
 11.1516 +		while(1);
 11.1517 +	}
 11.1518 +}
 11.1519 +#endif
 11.1520 +#endif
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/arch/ia64/lib/Makefile	Mon Feb 14 12:25:30 2005 +0000
    12.3 @@ -0,0 +1,44 @@
    12.4 +#
    12.5 +# Makefile for ia64-specific library routines..
    12.6 +#
    12.7 +
    12.8 +include $(BASEDIR)/Rules.mk
    12.9 +
   12.10 +OBJS := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o			\
   12.11 +	__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o			\
   12.12 +	bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o	\
   12.13 +	clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o	\
   12.14 +	flush.o ip_fast_csum.o do_csum.o copy_user.o			\
   12.15 +	memset.o strlen.o memcpy.o swiotlb.o
   12.16 +
   12.17 +default: $(OBJS)
   12.18 +	$(LD) -r -o ia64lib.o $(OBJS)
   12.19 +
   12.20 +AFLAGS += -I$(BASEDIR)/include -D__ASSEMBLY__
   12.21 +
   12.22 +__divdi3.o: idiv64.S
   12.23 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
   12.24 +
   12.25 +__udivdi3.o: idiv64.S
   12.26 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
   12.27 +
   12.28 +__moddi3.o: idiv64.S
   12.29 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
   12.30 +
   12.31 +__umoddi3.o: idiv64.S
   12.32 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
   12.33 +
   12.34 +__divsi3.o: idiv32.S
   12.35 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
   12.36 +
   12.37 +__udivsi3.o: idiv32.S
   12.38 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
   12.39 +
   12.40 +__modsi3.o: idiv32.S
   12.41 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
   12.42 +
   12.43 +__umodsi3.o: idiv32.S
   12.44 +	$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
   12.45 +
   12.46 +clean:
   12.47 +	rm -f *.o *~
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xen/arch/ia64/mm_init.c	Mon Feb 14 12:25:30 2005 +0000
    13.3 @@ -0,0 +1,724 @@
    13.4 +/*
    13.5 + * Initialize MMU support.
    13.6 + *
    13.7 + * Copyright (C) 1998-2003 Hewlett-Packard Co
    13.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    13.9 + */
   13.10 +#include <linux/config.h>
   13.11 +#include <linux/kernel.h>
   13.12 +#include <linux/init.h>
   13.13 +
   13.14 +#ifdef XEN
   13.15 +#include <xen/sched.h>
   13.16 +#endif
   13.17 +#include <linux/bootmem.h>
   13.18 +#include <linux/efi.h>
   13.19 +#include <linux/elf.h>
   13.20 +#include <linux/mm.h>
   13.21 +#include <linux/mmzone.h>
   13.22 +#include <linux/module.h>
   13.23 +#ifndef XEN
   13.24 +#include <linux/personality.h>
   13.25 +#endif
   13.26 +#include <linux/reboot.h>
   13.27 +#include <linux/slab.h>
   13.28 +#include <linux/swap.h>
   13.29 +#ifndef XEN
   13.30 +#include <linux/proc_fs.h>
   13.31 +#endif
   13.32 +
   13.33 +#ifndef XEN
   13.34 +#include <asm/a.out.h>
   13.35 +#endif
   13.36 +#include <asm/bitops.h>
   13.37 +#include <asm/dma.h>
   13.38 +#ifndef XEN
   13.39 +#include <asm/ia32.h>
   13.40 +#endif
   13.41 +#include <asm/io.h>
   13.42 +#include <asm/machvec.h>
   13.43 +#include <asm/numa.h>
   13.44 +#include <asm/patch.h>
   13.45 +#include <asm/pgalloc.h>
   13.46 +#include <asm/sal.h>
   13.47 +#include <asm/sections.h>
   13.48 +#include <asm/system.h>
   13.49 +#include <asm/tlb.h>
   13.50 +#include <asm/uaccess.h>
   13.51 +#include <asm/unistd.h>
   13.52 +#include <asm/mca.h>
   13.53 +
   13.54 +#ifndef XEN
   13.55 +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
   13.56 +#endif
   13.57 +
   13.58 +extern void ia64_tlb_init (void);
   13.59 +
   13.60 +unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
   13.61 +
   13.62 +#ifdef CONFIG_VIRTUAL_MEM_MAP
   13.63 +unsigned long vmalloc_end = VMALLOC_END_INIT;
   13.64 +EXPORT_SYMBOL(vmalloc_end);
   13.65 +struct page *vmem_map;
   13.66 +EXPORT_SYMBOL(vmem_map);
   13.67 +#endif
   13.68 +
   13.69 +static int pgt_cache_water[2] = { 25, 50 };
   13.70 +
   13.71 +struct page *zero_page_memmap_ptr;		/* map entry for zero page */
   13.72 +EXPORT_SYMBOL(zero_page_memmap_ptr);
   13.73 +
   13.74 +#ifdef XEN
   13.75 +void *high_memory;
   13.76 +EXPORT_SYMBOL(high_memory);
   13.77 +
   13.78 +/////////////////////////////////////////////
   13.79 +// following from linux-2.6.7/mm/mmap.c
   13.80 +/* description of effects of mapping type and prot in current implementation.
   13.81 + * this is due to the limited x86 page protection hardware.  The expected
   13.82 + * behavior is in parens:
   13.83 + *
   13.84 + * map_type	prot
   13.85 + *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
   13.86 + * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
   13.87 + *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
   13.88 + *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
   13.89 + *		
   13.90 + * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
   13.91 + *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
   13.92 + *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
   13.93 + *
   13.94 + */
   13.95 +pgprot_t protection_map[16] = {
   13.96 +	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
   13.97 +	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
   13.98 +};
   13.99 +
  13.100 +void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  13.101 +{
  13.102 +	printf("insert_vm_struct: called, not implemented yet\n");
  13.103 +}
  13.104 +
  13.105 +/////////////////////////////////////////////
  13.106 +//following from linux/mm/memory.c
  13.107 +
  13.108 +/*
  13.109 + * Allocate page middle directory.
  13.110 + *
  13.111 + * We've already handled the fast-path in-line, and we own the
  13.112 + * page table lock.
  13.113 + *
  13.114 + * On a two-level page table, this ends up actually being entirely
  13.115 + * optimized away.
  13.116 + */
  13.117 +pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  13.118 +{
  13.119 +	pmd_t *new;
  13.120 +
  13.121 +	spin_unlock(&mm->page_table_lock);
  13.122 +	new = pmd_alloc_one(mm, address);
  13.123 +	spin_lock(&mm->page_table_lock);
  13.124 +	if (!new)
  13.125 +		return NULL;
  13.126 +
  13.127 +	/*
  13.128 +	 * Because we dropped the lock, we should re-check the
  13.129 +	 * entry, as somebody else could have populated it..
  13.130 +	 */
  13.131 +	if (pgd_present(*pgd)) {
  13.132 +		pmd_free(new);
  13.133 +		goto out;
  13.134 +	}
  13.135 +	pgd_populate(mm, pgd, new);
  13.136 +out:
  13.137 +	return pmd_offset(pgd, address);
  13.138 +}
  13.139 +
  13.140 +pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
  13.141 +{
  13.142 +	if (!pmd_present(*pmd)) {
  13.143 +		struct page *new;
  13.144 +
  13.145 +		spin_unlock(&mm->page_table_lock);
  13.146 +		new = pte_alloc_one(mm, address);
  13.147 +		spin_lock(&mm->page_table_lock);
  13.148 +		if (!new)
  13.149 +			return NULL;
  13.150 +
  13.151 +		/*
  13.152 +		 * Because we dropped the lock, we should re-check the
  13.153 +		 * entry, as somebody else could have populated it..
  13.154 +		 */
  13.155 +		if (pmd_present(*pmd)) {
  13.156 +			pte_free(new);
  13.157 +			goto out;
  13.158 +		}
  13.159 +		inc_page_state(nr_page_table_pages);
  13.160 +		pmd_populate(mm, pmd, new);
  13.161 +	}
  13.162 +out:
  13.163 +	return pte_offset_map(pmd, address);
  13.164 +}
  13.165 +/////////////////////////////////////////////
  13.166 +#endif /* XEN */
  13.167 +
  13.168 +void
  13.169 +check_pgt_cache (void)
  13.170 +{
  13.171 +	int low, high;
  13.172 +
  13.173 +	low = pgt_cache_water[0];
  13.174 +	high = pgt_cache_water[1];
  13.175 +
  13.176 +	if (pgtable_cache_size > (u64) high) {
  13.177 +		do {
  13.178 +			if (pgd_quicklist)
  13.179 +				free_page((unsigned long)pgd_alloc_one_fast(0));
  13.180 +			if (pmd_quicklist)
  13.181 +				free_page((unsigned long)pmd_alloc_one_fast(0, 0));
  13.182 +		} while (pgtable_cache_size > (u64) low);
  13.183 +	}
  13.184 +}
  13.185 +
  13.186 +void
  13.187 +update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
  13.188 +{
  13.189 +	unsigned long addr;
  13.190 +	struct page *page;
  13.191 +
  13.192 +	if (!pte_exec(pte))
  13.193 +		return;				/* not an executable page... */
  13.194 +
  13.195 +	page = pte_page(pte);
  13.196 +	/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
  13.197 +	addr = (unsigned long) page_address(page);
  13.198 +
  13.199 +	if (test_bit(PG_arch_1, &page->flags))
  13.200 +		return;				/* i-cache is already coherent with d-cache */
  13.201 +
  13.202 +	flush_icache_range(addr, addr + PAGE_SIZE);
  13.203 +	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
  13.204 +}
  13.205 +
  13.206 +inline void
  13.207 +ia64_set_rbs_bot (void)
  13.208 +{
  13.209 +#ifdef XEN
  13.210 +	unsigned stack_size = MAX_USER_STACK_SIZE;
  13.211 +#else
  13.212 +	unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
  13.213 +#endif
  13.214 +
  13.215 +	if (stack_size > MAX_USER_STACK_SIZE)
  13.216 +		stack_size = MAX_USER_STACK_SIZE;
  13.217 +	current->thread.rbs_bot = STACK_TOP - stack_size;
  13.218 +}
  13.219 +
  13.220 +/*
  13.221 + * This performs some platform-dependent address space initialization.
  13.222 + * On IA-64, we want to setup the VM area for the register backing
  13.223 + * store (which grows upwards) and install the gateway page which is
  13.224 + * used for signal trampolines, etc.
  13.225 + */
  13.226 +void
  13.227 +ia64_init_addr_space (void)
  13.228 +{
  13.229 +#ifdef XEN
  13.230 +printf("ia64_init_addr_space: called, not implemented\n");
  13.231 +#else
  13.232 +	struct vm_area_struct *vma;
  13.233 +
  13.234 +	ia64_set_rbs_bot();
  13.235 +
  13.236 +	/*
  13.237 +	 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
  13.238 +	 * the problem.  When the process attempts to write to the register backing store
  13.239 +	 * for the first time, it will get a SEGFAULT in this case.
  13.240 +	 */
  13.241 +	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  13.242 +	if (vma) {
  13.243 +		memset(vma, 0, sizeof(*vma));
  13.244 +		vma->vm_mm = current->mm;
  13.245 +		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
  13.246 +		vma->vm_end = vma->vm_start + PAGE_SIZE;
  13.247 +		vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
  13.248 +		vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
  13.249 +		insert_vm_struct(current->mm, vma);
  13.250 +	}
  13.251 +
  13.252 +	/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
  13.253 +	if (!(current->personality & MMAP_PAGE_ZERO)) {
  13.254 +		vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  13.255 +		if (vma) {
  13.256 +			memset(vma, 0, sizeof(*vma));
  13.257 +			vma->vm_mm = current->mm;
  13.258 +			vma->vm_end = PAGE_SIZE;
  13.259 +			vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
  13.260 +			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
  13.261 +			insert_vm_struct(current->mm, vma);
  13.262 +		}
  13.263 +	}
  13.264 +#endif
  13.265 +}
  13.266 +
  13.267 +void
  13.268 +free_initmem (void)
  13.269 +{
  13.270 +	unsigned long addr, eaddr;
  13.271 +
  13.272 +	addr = (unsigned long) ia64_imva(__init_begin);
  13.273 +	eaddr = (unsigned long) ia64_imva(__init_end);
  13.274 +	while (addr < eaddr) {
  13.275 +		ClearPageReserved(virt_to_page(addr));
  13.276 +		set_page_count(virt_to_page(addr), 1);
  13.277 +		free_page(addr);
  13.278 +		++totalram_pages;
  13.279 +		addr += PAGE_SIZE;
  13.280 +	}
  13.281 +	printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
  13.282 +	       (__init_end - __init_begin) >> 10);
  13.283 +}
  13.284 +
  13.285 +void
  13.286 +free_initrd_mem (unsigned long start, unsigned long end)
  13.287 +{
  13.288 +	struct page *page;
  13.289 +	/*
  13.290 +	 * EFI uses 4KB pages while the kernel can use 4KB  or bigger.
  13.291 +	 * Thus EFI and the kernel may have different page sizes. It is
  13.292 +	 * therefore possible to have the initrd share the same page as
  13.293 +	 * the end of the kernel (given current setup).
  13.294 +	 *
  13.295 +	 * To avoid freeing/using the wrong page (kernel sized) we:
  13.296 +	 *	- align up the beginning of initrd
  13.297 +	 *	- align down the end of initrd
  13.298 +	 *
  13.299 +	 *  |             |
  13.300 +	 *  |=============| a000
  13.301 +	 *  |             |
  13.302 +	 *  |             |
  13.303 +	 *  |             | 9000
  13.304 +	 *  |/////////////|
  13.305 +	 *  |/////////////|
  13.306 +	 *  |=============| 8000
  13.307 +	 *  |///INITRD////|
  13.308 +	 *  |/////////////|
  13.309 +	 *  |/////////////| 7000
  13.310 +	 *  |             |
  13.311 +	 *  |KKKKKKKKKKKKK|
  13.312 +	 *  |=============| 6000
  13.313 +	 *  |KKKKKKKKKKKKK|
  13.314 +	 *  |KKKKKKKKKKKKK|
  13.315 +	 *  K=kernel using 8KB pages
  13.316 +	 *
  13.317 +	 * In this example, we must free page 8000 ONLY. So we must align up
  13.318 +	 * initrd_start and keep initrd_end as is.
  13.319 +	 */
  13.320 +	start = PAGE_ALIGN(start);
  13.321 +	end = end & PAGE_MASK;
  13.322 +
  13.323 +	if (start < end)
  13.324 +		printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
  13.325 +
  13.326 +	for (; start < end; start += PAGE_SIZE) {
  13.327 +		if (!virt_addr_valid(start))
  13.328 +			continue;
  13.329 +		page = virt_to_page(start);
  13.330 +		ClearPageReserved(page);
  13.331 +		set_page_count(page, 1);
  13.332 +		free_page(start);
  13.333 +		++totalram_pages;
  13.334 +	}
  13.335 +}
  13.336 +
  13.337 +/*
  13.338 + * This installs a clean page in the kernel's page table.
  13.339 + */
  13.340 +struct page *
  13.341 +put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
  13.342 +{
  13.343 +	pgd_t *pgd;
  13.344 +	pmd_t *pmd;
  13.345 +	pte_t *pte;
  13.346 +
  13.347 +	if (!PageReserved(page))
  13.348 +		printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
  13.349 +		       page_address(page));
  13.350 +
  13.351 +	pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */
  13.352 +
  13.353 +	spin_lock(&init_mm.page_table_lock);
  13.354 +	{
  13.355 +		pmd = pmd_alloc(&init_mm, pgd, address);
  13.356 +		if (!pmd)
  13.357 +			goto out;
  13.358 +		pte = pte_alloc_map(&init_mm, pmd, address);
  13.359 +		if (!pte)
  13.360 +			goto out;
  13.361 +		if (!pte_none(*pte)) {
  13.362 +			pte_unmap(pte);
  13.363 +			goto out;
  13.364 +		}
  13.365 +		set_pte(pte, mk_pte(page, pgprot));
  13.366 +		pte_unmap(pte);
  13.367 +	}
  13.368 +  out:	spin_unlock(&init_mm.page_table_lock);
  13.369 +	/* no need for flush_tlb */
  13.370 +	return page;
  13.371 +}
  13.372 +
  13.373 +static void
  13.374 +setup_gate (void)
  13.375 +{
  13.376 +#ifndef XEN
  13.377 +	struct page *page;
  13.378 +
  13.379 +	/*
  13.380 +	 * Map the gate page twice: once read-only to export the ELF headers etc. and once
  13.381 +	 * execute-only page to enable privilege-promotion via "epc":
  13.382 +	 */
  13.383 +	page = virt_to_page(ia64_imva(__start_gate_section));
  13.384 +	put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
  13.385 +#ifdef HAVE_BUGGY_SEGREL
  13.386 +	page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
  13.387 +	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
  13.388 +#else
  13.389 +	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
  13.390 +#endif
  13.391 +	ia64_patch_gate();
  13.392 +#endif
  13.393 +}
  13.394 +
  13.395 +void __devinit
  13.396 +ia64_mmu_init (void *my_cpu_data)
  13.397 +{
  13.398 +	unsigned long psr, pta, impl_va_bits;
  13.399 +	extern void __devinit tlb_init (void);
  13.400 +	int cpu;
  13.401 +
  13.402 +#ifdef CONFIG_DISABLE_VHPT
  13.403 +#	define VHPT_ENABLE_BIT	0
  13.404 +#else
  13.405 +#	define VHPT_ENABLE_BIT	1
  13.406 +#endif
  13.407 +
  13.408 +	/* Pin mapping for percpu area into TLB */
  13.409 +	psr = ia64_clear_ic();
  13.410 +	ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
  13.411 +		 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
  13.412 +		 PERCPU_PAGE_SHIFT);
  13.413 +
  13.414 +	ia64_set_psr(psr);
  13.415 +	ia64_srlz_i();
  13.416 +
  13.417 +	/*
  13.418 +	 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
  13.419 +	 * address space.  The IA-64 architecture guarantees that at least 50 bits of
  13.420 +	 * virtual address space are implemented but if we pick a large enough page size
  13.421 +	 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
  13.422 +	 * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
  13.423 +	 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
  13.424 +	 * problem in practice.  Alternatively, we could truncate the top of the mapped
  13.425 +	 * address space to not permit mappings that would overlap with the VMLPT.
  13.426 +	 * --davidm 00/12/06
  13.427 +	 */
  13.428 +#	define pte_bits			3
  13.429 +#	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
  13.430 +	/*
  13.431 +	 * The virtual page table has to cover the entire implemented address space within
  13.432 +	 * a region even though not all of this space may be mappable.  The reason for
  13.433 +	 * this is that the Access bit and Dirty bit fault handlers perform
  13.434 +	 * non-speculative accesses to the virtual page table, so the address range of the
  13.435 +	 * virtual page table itself needs to be covered by virtual page table.
  13.436 +	 */
  13.437 +#	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits)
  13.438 +#	define POW2(n)			(1ULL << (n))
  13.439 +
  13.440 +	impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
  13.441 +
  13.442 +	if (impl_va_bits < 51 || impl_va_bits > 61)
  13.443 +		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
  13.444 +
  13.445 +#ifdef XEN
  13.446 +	vhpt_init();
  13.447 +	alloc_dom0();
  13.448 +#else
  13.449 +	/* place the VMLPT at the end of each page-table mapped region: */
  13.450 +	pta = POW2(61) - POW2(vmlpt_bits);
  13.451 +
  13.452 +	if (POW2(mapped_space_bits) >= pta)
  13.453 +		panic("mm/init: overlap between virtually mapped linear page table and "
  13.454 +		      "mapped kernel space!");
  13.455 +	/*
  13.456 +	 * Set the (virtually mapped linear) page table address.  Bit
  13.457 +	 * 8 selects between the short and long format, bits 2-7 the
  13.458 +	 * size of the table, and bit 0 whether the VHPT walker is
  13.459 +	 * enabled.
  13.460 +	 */
  13.461 +	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
  13.462 +#endif
  13.463 +
  13.464 +	ia64_tlb_init();
  13.465 +
  13.466 +#ifdef	CONFIG_HUGETLB_PAGE
  13.467 +	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
  13.468 +	ia64_srlz_d();
  13.469 +#endif
  13.470 +
  13.471 +	cpu = smp_processor_id();
  13.472 +
  13.473 +#ifndef XEN
  13.474 +	/* mca handler uses cr.lid as key to pick the right entry */
  13.475 +	ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
  13.476 +
  13.477 +	/* insert this percpu data information into our list for MCA recovery purposes */
  13.478 +	ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
  13.479 +	/* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
  13.480 +	ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
  13.481 +	ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
  13.482 +	ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
  13.483 +	ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
  13.484 +	ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
  13.485 +#endif
  13.486 +}
  13.487 +
  13.488 +#ifdef CONFIG_VIRTUAL_MEM_MAP
  13.489 +
  13.490 +int
  13.491 +create_mem_map_page_table (u64 start, u64 end, void *arg)
  13.492 +{
  13.493 +	unsigned long address, start_page, end_page;
  13.494 +	struct page *map_start, *map_end;
  13.495 +	int node;
  13.496 +	pgd_t *pgd;
  13.497 +	pmd_t *pmd;
  13.498 +	pte_t *pte;
  13.499 +
  13.500 +	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
  13.501 +	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
  13.502 +
  13.503 +	start_page = (unsigned long) map_start & PAGE_MASK;
  13.504 +	end_page = PAGE_ALIGN((unsigned long) map_end);
  13.505 +	node = paddr_to_nid(__pa(start));
  13.506 +
  13.507 +	for (address = start_page; address < end_page; address += PAGE_SIZE) {
  13.508 +		pgd = pgd_offset_k(address);
  13.509 +		if (pgd_none(*pgd))
  13.510 +			pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
  13.511 +		pmd = pmd_offset(pgd, address);
  13.512 +
  13.513 +		if (pmd_none(*pmd))
  13.514 +			pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
  13.515 +		pte = pte_offset_kernel(pmd, address);
  13.516 +
  13.517 +		if (pte_none(*pte))
  13.518 +			set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
  13.519 +					     PAGE_KERNEL));
  13.520 +	}
  13.521 +	return 0;
  13.522 +}
  13.523 +
  13.524 +struct memmap_init_callback_data {
  13.525 +	struct page *start;
  13.526 +	struct page *end;
  13.527 +	int nid;
  13.528 +	unsigned long zone;
  13.529 +};
  13.530 +
  13.531 +static int
  13.532 +virtual_memmap_init (u64 start, u64 end, void *arg)
  13.533 +{
  13.534 +	struct memmap_init_callback_data *args;
  13.535 +	struct page *map_start, *map_end;
  13.536 +
  13.537 +	args = (struct memmap_init_callback_data *) arg;
  13.538 +
  13.539 +	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
  13.540 +	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
  13.541 +
  13.542 +	if (map_start < args->start)
  13.543 +		map_start = args->start;
  13.544 +	if (map_end > args->end)
  13.545 +		map_end = args->end;
  13.546 +
  13.547 +	/*
  13.548 +	 * We have to initialize "out of bounds" struct page elements that fit completely
  13.549 +	 * on the same pages that were allocated for the "in bounds" elements because they
  13.550 +	 * may be referenced later (and found to be "reserved").
  13.551 +	 */
  13.552 +	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
  13.553 +	map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
  13.554 +		    / sizeof(struct page));
  13.555 +
  13.556 +	if (map_start < map_end)
  13.557 +		memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
  13.558 +				 args->nid, args->zone, page_to_pfn(map_start));
  13.559 +	return 0;
  13.560 +}
  13.561 +
  13.562 +void
  13.563 +memmap_init (struct page *start, unsigned long size, int nid,
  13.564 +	     unsigned long zone, unsigned long start_pfn)
  13.565 +{
  13.566 +	if (!vmem_map)
  13.567 +		memmap_init_zone(start, size, nid, zone, start_pfn);
  13.568 +	else {
  13.569 +		struct memmap_init_callback_data args;
  13.570 +
  13.571 +		args.start = start;
  13.572 +		args.end = start + size;
  13.573 +		args.nid = nid;
  13.574 +		args.zone = zone;
  13.575 +
  13.576 +		efi_memmap_walk(virtual_memmap_init, &args);
  13.577 +	}
  13.578 +}
  13.579 +
  13.580 +int
  13.581 +ia64_pfn_valid (unsigned long pfn)
  13.582 +{
  13.583 +	char byte;
  13.584 +	struct page *pg = pfn_to_page(pfn);
  13.585 +
  13.586 +	return     (__get_user(byte, (char *) pg) == 0)
  13.587 +		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
  13.588 +			|| (__get_user(byte, (char *) (pg + 1) - 1) == 0));
  13.589 +}
  13.590 +EXPORT_SYMBOL(ia64_pfn_valid);
  13.591 +
  13.592 +int
  13.593 +find_largest_hole (u64 start, u64 end, void *arg)
  13.594 +{
  13.595 +	u64 *max_gap = arg;
  13.596 +
  13.597 +	static u64 last_end = PAGE_OFFSET;
  13.598 +
  13.599 +	/* NOTE: this algorithm assumes efi memmap table is ordered */
  13.600 +
  13.601 +#ifdef XEN
  13.602 +//printf("find_largest_hole: start=%lx,end=%lx,max_gap=%lx\n",start,end,*(unsigned long *)arg);
  13.603 +#endif
  13.604 +	if (*max_gap < (start - last_end))
  13.605 +		*max_gap = start - last_end;
  13.606 +	last_end = end;
  13.607 +#ifdef XEN
  13.608 +//printf("find_largest_hole2: max_gap=%lx,last_end=%lx\n",*max_gap,last_end);
  13.609 +#endif
  13.610 +	return 0;
  13.611 +}
  13.612 +#endif /* CONFIG_VIRTUAL_MEM_MAP */
  13.613 +
  13.614 +static int
  13.615 +count_reserved_pages (u64 start, u64 end, void *arg)
  13.616 +{
  13.617 +	unsigned long num_reserved = 0;
  13.618 +	unsigned long *count = arg;
  13.619 +
  13.620 +	for (; start < end; start += PAGE_SIZE)
  13.621 +		if (PageReserved(virt_to_page(start)))
  13.622 +			++num_reserved;
  13.623 +	*count += num_reserved;
  13.624 +	return 0;
  13.625 +}
  13.626 +
  13.627 +/*
  13.628 + * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
  13.629 + * system call handler.  When this option is in effect, all fsyscalls will end up bubbling
  13.630 + * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is
  13.631 + * useful for performance testing, but conceivably could also come in handy for debugging
  13.632 + * purposes.
  13.633 + */
  13.634 +
  13.635 +static int nolwsys;
  13.636 +
  13.637 +static int __init
  13.638 +nolwsys_setup (char *s)
  13.639 +{
  13.640 +	nolwsys = 1;
  13.641 +	return 1;
  13.642 +}
  13.643 +
  13.644 +__setup("nolwsys", nolwsys_setup);
  13.645 +
  13.646 +void
  13.647 +mem_init (void)
  13.648 +{
  13.649 +	long reserved_pages, codesize, datasize, initsize;
  13.650 +	unsigned long num_pgt_pages;
  13.651 +	pg_data_t *pgdat;
  13.652 +	int i;
  13.653 +#ifndef XEN
  13.654 +	static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
  13.655 +#endif
  13.656 +
  13.657 +#ifdef CONFIG_PCI
  13.658 +	/*
  13.659 +	 * This needs to be called _after_ the command line has been parsed but _before_
  13.660 +	 * any drivers that may need the PCI DMA interface are initialized or bootmem has
  13.661 +	 * been freed.
  13.662 +	 */
  13.663 +	platform_dma_init();
  13.664 +#endif
  13.665 +
  13.666 +#ifndef CONFIG_DISCONTIGMEM
  13.667 +	if (!mem_map)
  13.668 +		BUG();
  13.669 +	max_mapnr = max_low_pfn;
  13.670 +#endif
  13.671 +
  13.672 +	high_memory = __va(max_low_pfn * PAGE_SIZE);
  13.673 +
  13.674 +#ifndef XEN
  13.675 +	kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
  13.676 +	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
  13.677 +	kclist_add(&kcore_kernel, _stext, _end - _stext);
  13.678 +#endif
  13.679 +
  13.680 +	for_each_pgdat(pgdat)
  13.681 +		totalram_pages += free_all_bootmem_node(pgdat);
  13.682 +
  13.683 +	reserved_pages = 0;
  13.684 +	efi_memmap_walk(count_reserved_pages, &reserved_pages);
  13.685 +
  13.686 +	codesize =  (unsigned long) _etext - (unsigned long) _stext;
  13.687 +	datasize =  (unsigned long) _edata - (unsigned long) _etext;
  13.688 +	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
  13.689 +
  13.690 +	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
  13.691 +	       "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
  13.692 +	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
  13.693 +	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
  13.694 +
  13.695 +	/*
  13.696 +	 * Allow for enough (cached) page table pages so that we can map the entire memory
  13.697 +	 * at least once.  Each task also needs a couple of page tables pages, so add in a
  13.698 +	 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
  13.699 +	 * Don't allow the cache to be more than 10% of total memory, though.
  13.700 +	 */
  13.701 +#	define NUM_TASKS	500	/* typical number of tasks */
  13.702 +	num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
  13.703 +	if (num_pgt_pages > nr_free_pages() / 10)
  13.704 +		num_pgt_pages = nr_free_pages() / 10;
  13.705 +	if (num_pgt_pages > (u64) pgt_cache_water[1])
  13.706 +		pgt_cache_water[1] = num_pgt_pages;
  13.707 +
  13.708 +#ifndef XEN
  13.709 +	/*
  13.710 +	 * For fsyscall entrpoints with no light-weight handler, use the ordinary
  13.711 +	 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
  13.712 +	 * code can tell them apart.
  13.713 +	 */
  13.714 +	for (i = 0; i < NR_syscalls; ++i) {
  13.715 +		extern unsigned long fsyscall_table[NR_syscalls];
  13.716 +		extern unsigned long sys_call_table[NR_syscalls];
  13.717 +
  13.718 +		if (!fsyscall_table[i] || nolwsys)
  13.719 +			fsyscall_table[i] = sys_call_table[i] | 1;
  13.720 +	}
  13.721 +#endif
  13.722 +	setup_gate();	/* setup gate pages before we free up boot memory... */
  13.723 +
  13.724 +#ifdef CONFIG_IA32_SUPPORT
  13.725 +	ia32_boot_gdt_init();
  13.726 +#endif
  13.727 +}
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/bootmem.h	Mon Feb 14 12:25:30 2005 +0000
    14.3 @@ -0,0 +1,12 @@
    14.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/bootmem.h	2004-06-15 23:19:52.000000000 -0600
    14.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/bootmem.h	2004-08-25 19:28:13.000000000 -0600
    14.6 +@@ -41,7 +41,9 @@
    14.7 + extern void __init free_bootmem (unsigned long addr, unsigned long size);
    14.8 + extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
    14.9 + #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
   14.10 ++#ifndef XEN
   14.11 + extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
   14.12 ++#endif
   14.13 + #define alloc_bootmem(x) \
   14.14 + 	__alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
   14.15 + #define alloc_bootmem_low(x) \
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/current.h	Mon Feb 14 12:25:30 2005 +0000
    15.3 @@ -0,0 +1,17 @@
    15.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/current.h	2004-06-15 23:19:52.000000000 -0600
    15.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/current.h	2004-08-25 19:28:12.000000000 -0600
    15.6 +@@ -12,6 +12,14 @@
    15.7 +  * In kernel mode, thread pointer (r13) is used to point to the current task
    15.8 +  * structure.
    15.9 +  */
   15.10 ++#ifdef XEN
   15.11 ++struct domain;
   15.12 ++#define get_current()	((struct exec_domain *) ia64_getreg(_IA64_REG_TP))
   15.13 ++#define current get_current()
   15.14 ++//#define set_current(d)	ia64_setreg(_IA64_REG_TP,(void *)d);
   15.15 ++#define set_current(d)		(ia64_r13 = (void *)d)
   15.16 ++#else
   15.17 + #define current	((struct task_struct *) ia64_getreg(_IA64_REG_TP))
   15.18 ++#endif
   15.19 + 
   15.20 + #endif /* _ASM_IA64_CURRENT_H */
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/efi.c	Mon Feb 14 12:25:30 2005 +0000
    16.3 @@ -0,0 +1,85 @@
    16.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/efi.c	2004-06-15 23:18:55.000000000 -0600
    16.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/efi.c	2004-12-17 13:47:03.000000000 -0700
    16.6 +@@ -25,6 +25,9 @@
    16.7 + #include <linux/types.h>
    16.8 + #include <linux/time.h>
    16.9 + #include <linux/efi.h>
   16.10 ++#ifdef XEN
   16.11 ++#include <xen/sched.h>
   16.12 ++#endif
   16.13 + 
   16.14 + #include <asm/io.h>
   16.15 + #include <asm/kregs.h>
   16.16 +@@ -49,7 +52,10 @@
   16.17 + {												\
   16.18 + 	struct ia64_fpreg fr[6];								\
   16.19 + 	efi_status_t ret;									\
   16.20 ++	efi_time_cap_t *atc = NULL;								\
   16.21 + 												\
   16.22 ++	if (tc)											\
   16.23 ++		atc = adjust_arg(tc);								\
   16.24 + 	ia64_save_scratch_fpregs(fr);								\
   16.25 + 	ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm),	\
   16.26 + 				adjust_arg(tc));						\
   16.27 +@@ -201,6 +207,7 @@
   16.28 + 	if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS)
   16.29 + 		return;
   16.30 + 
   16.31 ++	dummy();
   16.32 + 	ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
   16.33 + 	ts->tv_nsec = tm.nanosecond;
   16.34 + }
   16.35 +@@ -303,6 +310,10 @@
   16.36 + 		if (!(md->attribute & EFI_MEMORY_WB))
   16.37 + 			continue;
   16.38 + 
   16.39 ++#ifdef XEN
   16.40 ++// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
   16.41 ++		if (md->phys_addr >= 0x100000000) continue;
   16.42 ++#endif
   16.43 + 		/*
   16.44 + 		 * granule_addr is the base of md's first granule.
   16.45 + 		 * [granule_addr - first_non_wb_addr) is guaranteed to
   16.46 +@@ -456,9 +467,11 @@
   16.47 + 
   16.48 + 		cpu = smp_processor_id();
   16.49 + 
   16.50 ++#ifndef XEN
   16.51 + 		/* insert this TR into our list for MCA recovery purposes */
   16.52 + 		ia64_mca_tlb_list[cpu].pal_base = vaddr & mask;
   16.53 + 		ia64_mca_tlb_list[cpu].pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
   16.54 ++#endif
   16.55 + 	}
   16.56 + }
   16.57 + 
   16.58 +@@ -680,6 +693,30 @@
   16.59 + 	return 0;
   16.60 + }
   16.61 + 
   16.62 ++#ifdef XEN
   16.63 ++// variation of efi_get_iobase which returns entire memory descriptor
   16.64 ++efi_memory_desc_t *
   16.65 ++efi_get_io_md (void)
   16.66 ++{
   16.67 ++	void *efi_map_start, *efi_map_end, *p;
   16.68 ++	efi_memory_desc_t *md;
   16.69 ++	u64 efi_desc_size;
   16.70 ++
   16.71 ++	efi_map_start = __va(ia64_boot_param->efi_memmap);
   16.72 ++	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
   16.73 ++	efi_desc_size = ia64_boot_param->efi_memdesc_size;
   16.74 ++
   16.75 ++	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
   16.76 ++		md = p;
   16.77 ++		if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
   16.78 ++			if (md->attribute & EFI_MEMORY_UC)
   16.79 ++				return md;
   16.80 ++		}
   16.81 ++	}
   16.82 ++	return 0;
   16.83 ++}
   16.84 ++#endif
   16.85 ++
   16.86 + u32
   16.87 + efi_mem_type (unsigned long phys_addr)
   16.88 + {
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/efi.h	Mon Feb 14 12:25:30 2005 +0000
    17.3 @@ -0,0 +1,13 @@
    17.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/efi.h	2004-06-15 23:20:03.000000000 -0600
    17.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/efi.h	2004-08-25 19:28:13.000000000 -0600
    17.6 +@@ -15,8 +15,10 @@
    17.7 + #include <linux/string.h>
    17.8 + #include <linux/time.h>
    17.9 + #include <linux/types.h>
   17.10 ++#ifndef XEN
   17.11 + #include <linux/proc_fs.h>
   17.12 + #include <linux/rtc.h>
   17.13 ++#endif
   17.14 + #include <linux/ioport.h>
   17.15 + 
   17.16 + #include <asm/page.h>
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/entry.S	Mon Feb 14 12:25:30 2005 +0000
    18.3 @@ -0,0 +1,195 @@
    18.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/entry.S	2005-01-23 13:23:36.000000000 -0700
    18.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/entry.S	2004-12-17 13:47:03.000000000 -0700
    18.6 +@@ -35,7 +35,9 @@
    18.7 + 
    18.8 + #include <asm/asmmacro.h>
    18.9 + #include <asm/cache.h>
   18.10 ++#ifndef XEN
   18.11 + #include <asm/errno.h>
   18.12 ++#endif
   18.13 + #include <asm/kregs.h>
   18.14 + #include <asm/offsets.h>
   18.15 + #include <asm/pgtable.h>
   18.16 +@@ -46,6 +48,23 @@
   18.17 + 
   18.18 + #include "minstate.h"
   18.19 + 
   18.20 ++#ifdef XEN
   18.21 ++#define	sys_execve 0
   18.22 ++#define do_fork 0
   18.23 ++#define	syscall_trace 0
   18.24 ++#define schedule 0
   18.25 ++#define do_notify_resume_user 0
   18.26 ++#define ia64_rt_sigsuspend 0
   18.27 ++#define ia64_rt_sigreturn 0
   18.28 ++#define	ia64_handle_unaligned 0
   18.29 ++#define	errno 0
   18.30 ++#define	sys_ni_syscall 0
   18.31 ++#define unw_init_frame_info 0
   18.32 ++#define sys_call_table 0
   18.33 ++#endif
   18.34 ++
   18.35 ++	/*
   18.36 ++
   18.37 + 	/*
   18.38 + 	 * execve() is special because in case of success, we need to
   18.39 + 	 * setup a null register window frame.
   18.40 +@@ -178,11 +197,14 @@
   18.41 + 	DO_SAVE_SWITCH_STACK
   18.42 + 	.body
   18.43 + 
   18.44 ++#ifdef XEN
   18.45 ++//#undef IA64_TASK_THREAD_KSP_OFFSET
   18.46 ++//#define	IA64_TASK_THREAD_KSP_OFFSET	0x38
   18.47 + 	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
   18.48 + 	movl r25=init_task
   18.49 + 	mov r27=IA64_KR(CURRENT_STACK)
   18.50 + 	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
   18.51 +-	dep r20=0,in0,61,3		// physical address of "current"
   18.52 ++	dep r20=0,in0,50,14		// physical address of "current"
   18.53 + 	;;
   18.54 + 	st8 [r22]=sp			// save kernel stack pointer of old task
   18.55 + 	shr.u r26=r20,IA64_GRANULE_SHIFT
   18.56 +@@ -194,6 +216,22 @@
   18.57 + (p6)	cmp.eq p7,p6=r26,r27
   18.58 + (p6)	br.cond.dpnt .map
   18.59 + 	;;
   18.60 ++#else
   18.61 ++	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
   18.62 ++	mov r27=IA64_KR(CURRENT_STACK)
   18.63 ++	dep r20=0,in0,61,3		// physical address of "current"
   18.64 ++	;;
   18.65 ++	st8 [r22]=sp			// save kernel stack pointer of old task
   18.66 ++	shr.u r26=r20,IA64_GRANULE_SHIFT
   18.67 ++	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
   18.68 ++	;;
   18.69 ++	/*
   18.70 ++	 * If we've already mapped this task's page, we can skip doing it again.
   18.71 ++	 */
   18.72 ++	cmp.eq p7,p6=r26,r27
   18.73 ++(p6)	br.cond.dpnt .map
   18.74 ++	;;
   18.75 ++#endif
   18.76 + .done:
   18.77 + (p6)	ssm psr.ic			// if we we had to map, renable the psr.ic bit FIRST!!!
   18.78 + 	;;
   18.79 +@@ -211,6 +249,16 @@
   18.80 + 	br.ret.sptk.many rp		// boogie on out in new context
   18.81 + 
   18.82 + .map:
   18.83 ++#ifdef XEN
   18.84 ++	// avoid overlapping with kernel TR
   18.85 ++	movl r25=KERNEL_START
   18.86 ++	dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
   18.87 ++	;;
   18.88 ++	cmp.eq p7,p0=r25,r23
   18.89 ++	;;
   18.90 ++(p7)	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...
   18.91 ++(p7)	br.cond.sptk .done
   18.92 ++#endif
   18.93 + 	rsm psr.ic			// interrupts (psr.i) are already disabled here
   18.94 + 	movl r25=PAGE_KERNEL
   18.95 + 	;;
   18.96 +@@ -367,7 +415,11 @@
   18.97 +  *	- b7 holds address to return to
   18.98 +  *	- must not touch r8-r11
   18.99 +  */
  18.100 ++#ifdef XEN
  18.101 ++GLOBAL_ENTRY(load_switch_stack)
  18.102 ++#else
  18.103 + ENTRY(load_switch_stack)
  18.104 ++#endif
  18.105 + 	.prologue
  18.106 + 	.altrp b7
  18.107 + 
  18.108 +@@ -595,6 +647,11 @@
  18.109 + 	 */
  18.110 + 	br.call.sptk.many rp=ia64_invoke_schedule_tail
  18.111 + }
  18.112 ++#ifdef XEN
  18.113 ++	// new domains are cloned but not exec'ed so switch to user mode here
  18.114 ++	cmp.ne pKStk,pUStk=r0,r0
  18.115 ++	br.cond.spnt ia64_leave_kernel
  18.116 ++#else
  18.117 + .ret8:
  18.118 + 	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
  18.119 + 	;;
  18.120 +@@ -603,6 +660,7 @@
  18.121 + 	mov r8=0
  18.122 + 	tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
  18.123 + (p6)	br.cond.spnt .strace_check_retval
  18.124 ++#endif
  18.125 + 	;;					// added stop bits to prevent r8 dependency
  18.126 + END(ia64_ret_from_clone)
  18.127 + 	// fall through
  18.128 +@@ -684,9 +742,14 @@
  18.129 + #endif /* CONFIG_PREEMPT */
  18.130 + 	adds r16=PT(LOADRS)+16,r12
  18.131 + 	adds r17=PT(AR_BSPSTORE)+16,r12
  18.132 ++#ifdef XEN
  18.133 ++	mov r31=r0
  18.134 ++	;;
  18.135 ++#else
  18.136 + 	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
  18.137 + 	;;
  18.138 + (p6)	ld4 r31=[r18]				// load current_thread_info()->flags
  18.139 ++#endif
  18.140 + 	ld8 r19=[r16],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
  18.141 + 	nop.i 0
  18.142 + 	;;
  18.143 +@@ -745,7 +808,11 @@
  18.144 + 	mov b7=r0		// clear b7
  18.145 + 	;;
  18.146 + (pUStk) st1 [r14]=r3
  18.147 ++#ifdef XEN
  18.148 ++	movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
  18.149 ++#else
  18.150 + 	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
  18.151 ++#endif
  18.152 + 	;;
  18.153 + 	mov r16=ar.bsp		// get existing backing store pointer
  18.154 + 	srlz.i			// ensure interruption collection is off
  18.155 +@@ -796,9 +863,18 @@
  18.156 + 	;;
  18.157 + (p6)	cmp.eq.unc p6,p0=r21,r0		// p6 <- p6 && (r21 == 0)
  18.158 + #endif /* CONFIG_PREEMPT */
  18.159 ++#ifdef XEN
  18.160 ++	alloc loc0=ar.pfs,0,1,1,0
  18.161 ++	adds out0=16,r12
  18.162 ++	;;
  18.163 ++(p6)	br.call.sptk.many b0=deliver_pending_interrupt
  18.164 ++	mov ar.pfs=loc0
  18.165 ++	mov r31=r0
  18.166 ++#else
  18.167 + 	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
  18.168 + 	;;
  18.169 + (p6)	ld4 r31=[r17]				// load current_thread_info()->flags
  18.170 ++#endif
  18.171 + 	adds r21=PT(PR)+16,r12
  18.172 + 	;;
  18.173 + 
  18.174 +@@ -912,7 +988,11 @@
  18.175 + 	shr.u r18=r19,16	// get byte size of existing "dirty" partition
  18.176 + 	;;
  18.177 + 	mov r16=ar.bsp		// get existing backing store pointer
  18.178 ++#ifdef XEN
  18.179 ++	movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
  18.180 ++#else
  18.181 + 	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
  18.182 ++#endif
  18.183 + 	;;
  18.184 + 	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
  18.185 + (pKStk)	br.cond.dpnt skip_rbs_switch
  18.186 +@@ -1264,6 +1344,7 @@
  18.187 + 	br.ret.sptk.many rp
  18.188 + END(unw_init_running)
  18.189 + 
  18.190 ++#ifndef XEN
  18.191 + 	.rodata
  18.192 + 	.align 8
  18.193 + 	.globl sys_call_table
  18.194 +@@ -1526,3 +1607,4 @@
  18.195 + 	data8 sys_ni_syscall
  18.196 + 
  18.197 + 	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
  18.198 ++#endif
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/gcc_intrin.h	Mon Feb 14 12:25:30 2005 +0000
    19.3 @@ -0,0 +1,20 @@
    19.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/gcc_intrin.h	2005-01-23 13:23:36.000000000 -0700
    19.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/gcc_intrin.h	2004-08-25 19:28:13.000000000 -0600
    19.6 +@@ -92,6 +92,9 @@
    19.7 + 
    19.8 + #define ia64_hint_pause 0
    19.9 + 
   19.10 ++#ifdef XEN
   19.11 ++#define ia64_hint(mode)	0
   19.12 ++#else
   19.13 + #define ia64_hint(mode)						\
   19.14 + ({								\
   19.15 + 	switch (mode) {						\
   19.16 +@@ -100,6 +103,7 @@
   19.17 + 		break;						\
   19.18 + 	}							\
   19.19 + })
   19.20 ++#endif
   19.21 + 
   19.22 + 
   19.23 + /* Integer values for mux1 instruction */
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/hardirq.h	Mon Feb 14 12:25:30 2005 +0000
    20.3 @@ -0,0 +1,22 @@
    20.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/hardirq.h	2004-06-15 23:19:02.000000000 -0600
    20.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hardirq.h	2004-12-17 13:47:03.000000000 -0700
    20.6 +@@ -81,10 +81,19 @@
    20.7 +  */
    20.8 + #define in_irq()		(hardirq_count())
    20.9 + #define in_softirq()		(softirq_count())
   20.10 ++#ifdef XEN
   20.11 + #define in_interrupt()		(irq_count())
   20.12 ++#else
   20.13 ++#define in_interrupt()		0		// FIXME LATER
   20.14 ++#endif
   20.15 + 
   20.16 ++#ifdef XEN
   20.17 ++#define hardirq_trylock(cpu)	(!in_interrupt())
   20.18 ++#define hardirq_endlock(cpu)	do { } while (0)
   20.19 ++#else
   20.20 + #define hardirq_trylock()	(!in_interrupt())
   20.21 + #define hardirq_endlock()	do { } while (0)
   20.22 ++#endif
   20.23 + 
   20.24 + #ifdef CONFIG_PREEMPT
   20.25 + # include <linux/smp_lock.h>
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/head.S	Mon Feb 14 12:25:30 2005 +0000
    21.3 @@ -0,0 +1,93 @@
    21.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/head.S	2005-01-23 13:23:36.000000000 -0700
    21.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/head.S	2004-12-17 13:47:03.000000000 -0700
    21.6 +@@ -1,3 +1,8 @@
    21.7 ++#ifdef XEN
    21.8 ++#define	console_print	printf
    21.9 ++#define kernel_thread_helper 0
   21.10 ++#define sys_exit 0
   21.11 ++#endif
   21.12 + /*
   21.13 +  * Here is where the ball gets rolling as far as the kernel is concerned.
   21.14 +  * When control is transferred to _start, the bootload has already
   21.15 +@@ -166,7 +171,11 @@
   21.16 + 	dep r18=0,r3,0,12
   21.17 + 	;;
   21.18 + 	or r18=r17,r18
   21.19 ++#ifdef XEN
   21.20 ++	dep r2=-1,r3,50,14	// IMVA of task
   21.21 ++#else
   21.22 + 	dep r2=-1,r3,61,3	// IMVA of task
   21.23 ++#endif
   21.24 + 	;;
   21.25 + 	mov r17=rr[r2]
   21.26 + 	;;
   21.27 +@@ -205,7 +214,11 @@
   21.28 + 	;;
   21.29 + 	mov ar.rsc=0x3		// place RSE in eager mode
   21.30 + 
   21.31 ++#ifdef XEN
   21.32 ++(isBP)	dep r28=-1,r28,50,14	// make address virtual
   21.33 ++#else
   21.34 + (isBP)	dep r28=-1,r28,61,3	// make address virtual
   21.35 ++#endif
   21.36 + (isBP)	movl r2=ia64_boot_param
   21.37 + 	;;
   21.38 + (isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
   21.39 +@@ -238,14 +251,30 @@
   21.40 + 	br.call.sptk.many rp=sys_fw_init
   21.41 + .ret1:
   21.42 + #endif
   21.43 ++#ifdef XEN
   21.44 ++	alloc r2=ar.pfs,8,0,2,0
   21.45 ++	;;
   21.46 ++#define fake_mbi_magic 0
   21.47 ++#define MULTIBOOT_INFO_SIZE	1024
   21.48 ++	.rodata
   21.49 ++fake_mbi:
   21.50 ++	.skip MULTIBOOT_INFO_SIZE
   21.51 ++	.previous
   21.52 ++	movl out0=fake_mbi
   21.53 ++	;;
   21.54 ++	br.call.sptk.many rp=cmain
   21.55 ++#else
   21.56 + 	br.call.sptk.many rp=start_kernel
   21.57 ++#endif
   21.58 + .ret2:	addl r3=@ltoff(halt_msg),gp
   21.59 + 	;;
   21.60 + 	alloc r2=ar.pfs,8,0,2,0
   21.61 + 	;;
   21.62 + 	ld8 out0=[r3]
   21.63 + 	br.call.sptk.many b0=console_print
   21.64 ++	;;
   21.65 + self:	br.sptk.many self		// endless loop
   21.66 ++	;;
   21.67 + END(_start)
   21.68 + 
   21.69 + GLOBAL_ENTRY(ia64_save_debug_regs)
   21.70 +@@ -781,8 +810,13 @@
   21.71 + 	movl r18=KERNEL_START
   21.72 + 	dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
   21.73 + 	dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
   21.74 ++#ifdef XEN
   21.75 ++	dep r17=-1,r17,50,14
   21.76 ++	dep sp=-1,sp,50,14
   21.77 ++#else
   21.78 + 	dep r17=-1,r17,61,3
   21.79 + 	dep sp=-1,sp,61,3
   21.80 ++#endif
   21.81 + 	;;
   21.82 + 	or r3=r3,r18
   21.83 + 	or r14=r14,r18
   21.84 +@@ -838,7 +872,12 @@
   21.85 +  * intermediate precision so that we can produce a full 64-bit result.
   21.86 +  */
   21.87 + GLOBAL_ENTRY(sched_clock)
   21.88 ++#ifdef XEN
   21.89 ++	break 0;;	// FIX IA64_CPUINFO_NSEC_PER_CYC_OFFSET
   21.90 ++	//movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
   21.91 ++#else
   21.92 + 	addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
   21.93 ++#endif
   21.94 + 	mov.m r9=ar.itc		// fetch cycle-counter				(35 cyc)
   21.95 + 	;;
   21.96 + 	ldf8 f8=[r8]
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/hpsim_irq.c	Mon Feb 14 12:25:30 2005 +0000
    22.3 @@ -0,0 +1,36 @@
    22.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/hp/sim/hpsim_irq.c	2004-06-15 23:20:26.000000000 -0600
    22.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/hpsim_irq.c	2004-11-01 17:54:15.000000000 -0700
    22.6 +@@ -9,7 +9,17 @@
    22.7 + #include <linux/kernel.h>
    22.8 + #include <linux/sched.h>
    22.9 + #include <linux/irq.h>
   22.10 ++#ifdef XEN
   22.11 ++#include <asm/hw_irq.h>
   22.12 ++#endif
   22.13 + 
   22.14 ++#if 1
   22.15 ++void __init
   22.16 ++hpsim_irq_init (void)
   22.17 ++{
   22.18 ++	printf("*** hpsim_irq_init called: NOT NEEDED?!?!?\n");
   22.19 ++}
   22.20 ++#else
   22.21 + static unsigned int
   22.22 + hpsim_irq_startup (unsigned int irq)
   22.23 + {
   22.24 +@@ -19,6 +29,10 @@
   22.25 + static void
   22.26 + hpsim_irq_noop (unsigned int irq)
   22.27 + {
   22.28 ++#if 1
   22.29 ++printf("hpsim_irq_noop: irq=%d\n",irq);
   22.30 ++while(irq);
   22.31 ++#endif
   22.32 + }
   22.33 + 
   22.34 + static struct hw_interrupt_type irq_type_hp_sim = {
   22.35 +@@ -44,3 +58,4 @@
   22.36 + 			idesc->handler = &irq_type_hp_sim;
   22.37 + 	}
   22.38 + }
   22.39 ++#endif
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/hpsim_ssc.h	Mon Feb 14 12:25:30 2005 +0000
    23.3 @@ -0,0 +1,26 @@
    23.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/hp/sim/hpsim_ssc.h	2004-06-15 23:19:43.000000000 -0600
    23.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hpsim_ssc.h	2004-08-29 01:04:23.000000000 -0600
    23.6 +@@ -33,4 +33,23 @@
    23.7 +  */
    23.8 + extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
    23.9 + 
   23.10 ++#ifdef XEN
   23.11 ++/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
   23.12 ++ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
   23.13 ++#define SSC_OPEN			50
   23.14 ++#define SSC_CLOSE			51
   23.15 ++#define SSC_READ			52
   23.16 ++#define SSC_WRITE			53
   23.17 ++#define SSC_GET_COMPLETION		54
   23.18 ++#define SSC_WAIT_COMPLETION		55
   23.19 ++
   23.20 ++#define SSC_WRITE_ACCESS		2
   23.21 ++#define SSC_READ_ACCESS			1
   23.22 ++
   23.23 ++struct ssc_disk_req {
   23.24 ++	unsigned long addr;
   23.25 ++	unsigned long len;
   23.26 ++};
   23.27 ++#endif
   23.28 ++
   23.29 + #endif /* _IA64_PLATFORM_HPSIM_SSC_H */
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/hw_irq.h	Mon Feb 14 12:25:30 2005 +0000
    24.3 @@ -0,0 +1,24 @@
    24.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/hw_irq.h	2004-06-15 23:19:22.000000000 -0600
    24.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hw_irq.h	2004-08-27 09:07:38.000000000 -0600
    24.6 +@@ -9,7 +9,9 @@
    24.7 + #include <linux/interrupt.h>
    24.8 + #include <linux/sched.h>
    24.9 + #include <linux/types.h>
   24.10 ++#ifndef XEN
   24.11 + #include <linux/profile.h>
   24.12 ++#endif
   24.13 + 
   24.14 + #include <asm/machvec.h>
   24.15 + #include <asm/ptrace.h>
   24.16 +@@ -96,7 +98,11 @@
   24.17 +  * Default implementations for the irq-descriptor API:
   24.18 +  */
   24.19 + 
   24.20 ++#ifdef XEN
   24.21 ++#define _irq_desc irq_desc
   24.22 ++#else
   24.23 + extern irq_desc_t _irq_desc[NR_IRQS];
   24.24 ++#endif
   24.25 + 
   24.26 + #ifndef CONFIG_IA64_GENERIC
   24.27 + static inline irq_desc_t *
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/ide.h	Mon Feb 14 12:25:30 2005 +0000
    25.3 @@ -0,0 +1,35 @@
    25.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/ide.h	2004-06-15 23:19:36.000000000 -0600
    25.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/ide.h	2004-08-25 19:28:13.000000000 -0600
    25.6 +@@ -64,6 +64,32 @@
    25.7 + #define ide_init_default_irq(base)	ide_default_irq(base)
    25.8 + #endif
    25.9 + 
   25.10 ++#ifdef XEN
   25.11 ++// this is moved to linux/ide.h in newer versions of linux
   25.12 ++typedef union {
   25.13 ++	unsigned all			: 8;	/* all of the bits together */
   25.14 ++	struct {
   25.15 ++		unsigned head		: 4;	/* always zeros here */
   25.16 ++		unsigned unit		: 1;	/* drive select number, 0 or 1 */
   25.17 ++		unsigned bit5		: 1;	/* always 1 */
   25.18 ++		unsigned lba		: 1;	/* using LBA instead of CHS */
   25.19 ++		unsigned bit7		: 1;	/* always 1 */
   25.20 ++	} b;
   25.21 ++} select_t;
   25.22 ++
   25.23 ++typedef union {
   25.24 ++	unsigned all			: 8;	/* all of the bits together */
   25.25 ++	struct {
   25.26 ++		unsigned bit0		: 1;
   25.27 ++		unsigned nIEN		: 1;	/* device INTRQ to host */
   25.28 ++		unsigned SRST		: 1;	/* host soft reset bit */
   25.29 ++		unsigned bit3		: 1;	/* ATA-2 thingy */
   25.30 ++		unsigned reserved456	: 3;
   25.31 ++		unsigned HOB		: 1;	/* 48-bit address ordering */
   25.32 ++	} b;
   25.33 ++} control_t;
   25.34 ++#endif
   25.35 ++
   25.36 + #include <asm-generic/ide_iops.h>
   25.37 + 
   25.38 + #endif /* __KERNEL__ */
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/init_task.c	Mon Feb 14 12:25:30 2005 +0000
    26.3 @@ -0,0 +1,35 @@
    26.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/init_task.c	2004-06-15 23:20:26.000000000 -0600
    26.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/init_task.c	2004-08-27 00:06:35.000000000 -0600
    26.6 +@@ -15,10 +15,12 @@
    26.7 + #include <asm/uaccess.h>
    26.8 + #include <asm/pgtable.h>
    26.9 + 
   26.10 ++#ifndef XEN
   26.11 + static struct fs_struct init_fs = INIT_FS;
   26.12 + static struct files_struct init_files = INIT_FILES;
   26.13 + static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
   26.14 + static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
   26.15 ++#endif
   26.16 + struct mm_struct init_mm = INIT_MM(init_mm);
   26.17 + 
   26.18 + EXPORT_SYMBOL(init_mm);
   26.19 +@@ -33,13 +35,19 @@
   26.20 + 
   26.21 + union {
   26.22 + 	struct {
   26.23 ++#ifdef XEN
   26.24 ++		struct domain task;
   26.25 ++#else
   26.26 + 		struct task_struct task;
   26.27 + 		struct thread_info thread_info;
   26.28 ++#endif
   26.29 + 	} s;
   26.30 + 	unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
   26.31 + } init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
   26.32 + 	.task =		INIT_TASK(init_task_mem.s.task),
   26.33 ++#ifndef XEN
   26.34 + 	.thread_info =	INIT_THREAD_INFO(init_task_mem.s.task)
   26.35 ++#endif
   26.36 + }};
   26.37 + 
   26.38 + EXPORT_SYMBOL(init_task);
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/init_task.h	Mon Feb 14 12:25:30 2005 +0000
    27.3 @@ -0,0 +1,53 @@
    27.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/init_task.h	2004-06-15 23:18:57.000000000 -0600
    27.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/init_task.h	2004-11-15 17:06:20.000000000 -0700
    27.6 +@@ -31,6 +31,18 @@
    27.7 + 	.max_reqs	= ~0U,				\
    27.8 + }
    27.9 + 
   27.10 ++#ifdef XEN
   27.11 ++#define INIT_MM(name) \
   27.12 ++{			 					\
   27.13 ++	.mm_rb		= RB_ROOT,				\
   27.14 ++	.pgd		= swapper_pg_dir, 			\
   27.15 ++	.mm_users	= ATOMIC_INIT(2), 			\
   27.16 ++	.mm_count	= ATOMIC_INIT(1), 			\
   27.17 ++	.page_table_lock =  SPIN_LOCK_UNLOCKED, 		\
   27.18 ++	.mmlist		= LIST_HEAD_INIT(name.mmlist),		\
   27.19 ++	.cpu_vm_mask	= CPU_MASK_ALL,				\
   27.20 ++}
   27.21 ++#else
   27.22 + #define INIT_MM(name) \
   27.23 + {			 					\
   27.24 + 	.mm_rb		= RB_ROOT,				\
   27.25 +@@ -43,6 +55,7 @@
   27.26 + 	.cpu_vm_mask	= CPU_MASK_ALL,				\
   27.27 + 	.default_kioctx = INIT_KIOCTX(name.default_kioctx, name),	\
   27.28 + }
   27.29 ++#endif
   27.30 + 
   27.31 + #define INIT_SIGNALS(sig) {	\
   27.32 + 	.count		= ATOMIC_INIT(1), 		\
   27.33 +@@ -64,6 +77,15 @@
   27.34 +  *  INIT_TASK is used to set up the first task table, touch at
   27.35 +  * your own risk!. Base=0, limit=0x1fffff (=2MB)
   27.36 +  */
   27.37 ++#ifdef XEN
   27.38 ++#define INIT_TASK(tsk) \
   27.39 ++{							\
   27.40 ++	/*processor:	0,*/				\
   27.41 ++	/*id:		IDLE_DOMAIN_ID,*/		\
   27.42 ++	/*flags:	1<<DF_IDLETASK,*/		\
   27.43 ++	refcnt:		ATOMIC_INIT(1)			\
   27.44 ++}
   27.45 ++#else
   27.46 + #define INIT_TASK(tsk)	\
   27.47 + {									\
   27.48 + 	.state		= 0,						\
   27.49 +@@ -113,6 +135,7 @@
   27.50 + 	.switch_lock	= SPIN_LOCK_UNLOCKED,				\
   27.51 + 	.journal_info	= NULL,						\
   27.52 + }
   27.53 ++#endif
   27.54 + 
   27.55 + 
   27.56 + 
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/interrupt.h	Mon Feb 14 12:25:30 2005 +0000
    28.3 @@ -0,0 +1,18 @@
    28.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/interrupt.h	2004-06-15 23:19:29.000000000 -0600
    28.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/interrupt.h	2004-08-25 19:28:13.000000000 -0600
    28.6 +@@ -32,6 +32,7 @@
    28.7 + #define IRQ_HANDLED	(1)
    28.8 + #define IRQ_RETVAL(x)	((x) != 0)
    28.9 + 
   28.10 ++#ifndef XEN
   28.11 + struct irqaction {
   28.12 + 	irqreturn_t (*handler)(int, void *, struct pt_regs *);
   28.13 + 	unsigned long flags;
   28.14 +@@ -46,6 +47,7 @@
   28.15 + 		       irqreturn_t (*handler)(int, void *, struct pt_regs *),
   28.16 + 		       unsigned long, const char *, void *);
   28.17 + extern void free_irq(unsigned int, void *);
   28.18 ++#endif
   28.19 + 
   28.20 + /*
   28.21 +  * Temporary defines for UP kernels, until all code gets fixed.
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/io.h	Mon Feb 14 12:25:30 2005 +0000
    29.3 @@ -0,0 +1,14 @@
    29.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/io.h	2004-06-15 23:18:57.000000000 -0600
    29.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/io.h	2004-11-05 16:53:36.000000000 -0700
    29.6 +@@ -23,7 +23,11 @@
    29.7 + #define __SLOW_DOWN_IO	do { } while (0)
    29.8 + #define SLOW_DOWN_IO	do { } while (0)
    29.9 + 
   29.10 ++#ifdef XEN
   29.11 ++#define __IA64_UNCACHED_OFFSET	0xdffc000000000000	/* region 6 */
   29.12 ++#else
   29.13 + #define __IA64_UNCACHED_OFFSET	0xc000000000000000	/* region 6 */
   29.14 ++#endif
   29.15 + 
   29.16 + /*
   29.17 +  * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/irq.h	Mon Feb 14 12:25:30 2005 +0000
    30.3 @@ -0,0 +1,18 @@
    30.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/irq.h	2005-01-23 13:23:36.000000000 -0700
    30.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/irq.h	2004-08-25 19:28:13.000000000 -0600
    30.6 +@@ -30,6 +30,15 @@
    30.7 + extern void enable_irq (unsigned int);
    30.8 + extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
    30.9 + 
   30.10 ++#ifdef XEN
   30.11 ++// dup'ed from signal.h to avoid changes to includes
   30.12 ++#define	SA_NOPROFILE	0x02000000
   30.13 ++#define	SA_SHIRQ	0x04000000
   30.14 ++#define	SA_RESTART	0x10000000
   30.15 ++#define	SA_INTERRUPT	0x20000000
   30.16 ++#define	SA_SAMPLE_RANDOM	SA_RESTART
   30.17 ++#endif
   30.18 ++
   30.19 + #ifdef CONFIG_SMP
   30.20 + extern void move_irq(int irq);
   30.21 + #else
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c	Mon Feb 14 12:25:30 2005 +0000
    31.3 @@ -0,0 +1,82 @@
    31.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/irq_ia64.c	2004-06-15 23:19:13.000000000 -0600
    31.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/irq_ia64.c	2004-11-22 17:41:59.000000000 -0700
    31.6 +@@ -17,18 +17,26 @@
    31.7 + #include <linux/config.h>
    31.8 + #include <linux/module.h>
    31.9 + 
   31.10 ++#ifndef XEN
   31.11 + #include <linux/jiffies.h>
   31.12 ++#endif
   31.13 + #include <linux/errno.h>
   31.14 + #include <linux/init.h>
   31.15 + #include <linux/interrupt.h>
   31.16 + #include <linux/ioport.h>
   31.17 ++#ifndef XEN
   31.18 + #include <linux/kernel_stat.h>
   31.19 ++#endif
   31.20 + #include <linux/slab.h>
   31.21 ++#ifndef XEN
   31.22 + #include <linux/ptrace.h>
   31.23 + #include <linux/random.h>	/* for rand_initialize_irq() */
   31.24 + #include <linux/signal.h>
   31.25 ++#endif
   31.26 + #include <linux/smp.h>
   31.27 ++#ifndef XEN
   31.28 + #include <linux/smp_lock.h>
   31.29 ++#endif
   31.30 + #include <linux/threads.h>
   31.31 + 
   31.32 + #include <asm/bitops.h>
   31.33 +@@ -101,6 +109,24 @@
   31.34 + ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
   31.35 + {
   31.36 + 	unsigned long saved_tpr;
   31.37 ++#if 0
   31.38 ++//FIXME: For debug only, can be removed
   31.39 ++	static char firstirq = 1;
   31.40 ++	static char firsttime[256];
   31.41 ++	static char firstpend[256];
   31.42 ++	if (firstirq) {
   31.43 ++		int i;
   31.44 ++		for (i=0;i<256;i++) firsttime[i] = 1;
   31.45 ++		for (i=0;i<256;i++) firstpend[i] = 1;
   31.46 ++		firstirq = 0;
   31.47 ++	}
   31.48 ++	if (firsttime[vector]) {
   31.49 ++		printf("**** (entry) First received int on vector=%d,itc=%lx\n",
   31.50 ++			(unsigned long) vector, ia64_get_itc());
   31.51 ++		firsttime[vector] = 0;
   31.52 ++	}
   31.53 ++#endif
   31.54 ++
   31.55 + 
   31.56 + #if IRQ_DEBUG
   31.57 + 	{
   31.58 +@@ -145,6 +171,27 @@
   31.59 + 			ia64_setreg(_IA64_REG_CR_TPR, vector);
   31.60 + 			ia64_srlz_d();
   31.61 + 
   31.62 ++#ifdef XEN
   31.63 ++	if (vector != 0xef) {
   31.64 ++		extern void vcpu_pend_interrupt(void *, int);
   31.65 ++#if 0
   31.66 ++		if (firsttime[vector]) {
   31.67 ++			printf("**** (iterate) First received int on vector=%d,itc=%lx\n",
   31.68 ++			(unsigned long) vector, ia64_get_itc());
   31.69 ++			firsttime[vector] = 0;
   31.70 ++		}
   31.71 ++		if (firstpend[vector]) {
   31.72 ++			printf("**** First pended int on vector=%d,itc=%lx\n",
   31.73 ++				(unsigned long) vector,ia64_get_itc());
   31.74 ++			firstpend[vector] = 0;
   31.75 ++		}
   31.76 ++#endif
   31.77 ++		//FIXME: TEMPORARY HACK!!!!
   31.78 ++		vcpu_pend_interrupt(dom0,vector);
   31.79 ++		domain_wake(dom0);
   31.80 ++	}
   31.81 ++	else
   31.82 ++#endif
   31.83 + 			do_IRQ(local_vector_to_irq(vector), regs);
   31.84 + 
   31.85 + 			/*
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/ivt.S	Mon Feb 14 12:25:30 2005 +0000
    32.3 @@ -0,0 +1,514 @@
    32.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/ivt.S	2004-06-15 23:18:59.000000000 -0600
    32.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/ivt.S	2004-12-17 13:47:03.000000000 -0700
    32.6 +@@ -1,3 +1,21 @@
    32.7 ++
    32.8 ++#ifdef XEN
    32.9 ++//#define CONFIG_DISABLE_VHPT	// FIXME: change when VHPT is enabled??
   32.10 ++// these are all hacked out for now as the entire IVT
   32.11 ++// will eventually be replaced... just want to use it
   32.12 ++// for startup code to handle TLB misses
   32.13 ++//#define ia64_leave_kernel 0
   32.14 ++//#define ia64_ret_from_syscall 0
   32.15 ++//#define ia64_handle_irq 0
   32.16 ++//#define ia64_fault 0
   32.17 ++#define ia64_illegal_op_fault 0
   32.18 ++#define ia64_prepare_handle_unaligned 0
   32.19 ++#define ia64_bad_break 0
   32.20 ++#define ia64_trace_syscall 0
   32.21 ++#define sys_call_table 0
   32.22 ++#define sys_ni_syscall 0
   32.23 ++#include <asm/vhpt.h>
   32.24 ++#endif
   32.25 + /*
   32.26 +  * arch/ia64/kernel/ivt.S
   32.27 +  *
   32.28 +@@ -76,6 +94,13 @@
   32.29 + 	mov r19=n;;			/* prepare to save predicates */		\
   32.30 + 	br.sptk.many dispatch_to_fault_handler
   32.31 + 
   32.32 ++#ifdef XEN
   32.33 ++#define REFLECT(n)									\
   32.34 ++	mov r31=pr;									\
   32.35 ++	mov r19=n;;			/* prepare to save predicates */		\
   32.36 ++	br.sptk.many dispatch_reflection
   32.37 ++#endif
   32.38 ++
   32.39 + 	.section .text.ivt,"ax"
   32.40 + 
   32.41 + 	.align 32768	// align on 32KB boundary
   32.42 +@@ -213,6 +238,9 @@
   32.43 + // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
   32.44 + ENTRY(itlb_miss)
   32.45 + 	DBG_FAULT(1)
   32.46 ++#ifdef XEN
   32.47 ++	VHPT_CCHAIN_LOOKUP(itlb_miss,i)
   32.48 ++#endif
   32.49 + 	/*
   32.50 + 	 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
   32.51 + 	 * page table.  If a nested TLB miss occurs, we switch into physical
   32.52 +@@ -257,6 +285,9 @@
   32.53 + // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
   32.54 + ENTRY(dtlb_miss)
   32.55 + 	DBG_FAULT(2)
   32.56 ++#ifdef XEN
   32.57 ++	VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
   32.58 ++#endif
   32.59 + 	/*
   32.60 + 	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
   32.61 + 	 * page table.  If a nested TLB miss occurs, we switch into physical
   32.62 +@@ -301,6 +332,10 @@
   32.63 + // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
   32.64 + ENTRY(alt_itlb_miss)
   32.65 + 	DBG_FAULT(3)
   32.66 ++#ifdef XEN
   32.67 ++// I think this is superfluous, once all regions have VHPT enabled
   32.68 ++//	VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i)
   32.69 ++#endif
   32.70 + 	mov r16=cr.ifa		// get address that caused the TLB miss
   32.71 + 	movl r17=PAGE_KERNEL
   32.72 + 	mov r21=cr.ipsr
   32.73 +@@ -339,6 +374,10 @@
   32.74 + // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
   32.75 + ENTRY(alt_dtlb_miss)
   32.76 + 	DBG_FAULT(4)
   32.77 ++#ifdef XEN
   32.78 ++// I think this is superfluous, once all regions have VHPT enabled
   32.79 ++//	VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d)
   32.80 ++#endif
   32.81 + 	mov r16=cr.ifa		// get address that caused the TLB miss
   32.82 + 	movl r17=PAGE_KERNEL
   32.83 + 	mov r20=cr.isr
   32.84 +@@ -368,6 +407,17 @@
   32.85 + 	cmp.ne p8,p0=r0,r23
   32.86 + (p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
   32.87 + (p8)	br.cond.spnt page_fault
   32.88 ++#ifdef XEN
   32.89 ++	;;
   32.90 ++	// FIXME: inadequate test, this is where we test for Xen address
   32.91 ++	// note that 0xfffc (cached) and 0xdffc (uncached) addresses
   32.92 ++	// should be OK.  (Though no I/O is done in Xen, EFI needs uncached
   32.93 ++	// addresses and some domain EFI calls are passed through)
   32.94 ++	tbit.nz p0,p8=r16,50
   32.95 ++(p8)	br.cond.spnt page_fault
   32.96 ++//(p8)	br.cond.spnt 0
   32.97 ++	;;
   32.98 ++#endif
   32.99 + 
  32.100 + 	dep r21=-1,r21,IA64_PSR_ED_BIT,1
  32.101 + 	or r19=r19,r17		// insert PTE control bits into r19
  32.102 +@@ -448,6 +498,9 @@
  32.103 + /////////////////////////////////////////////////////////////////////////////////////////
  32.104 + // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
  32.105 + ENTRY(ikey_miss)
  32.106 ++#ifdef XEN
  32.107 ++	REFLECT(6)
  32.108 ++#endif
  32.109 + 	DBG_FAULT(6)
  32.110 + 	FAULT(6)
  32.111 + END(ikey_miss)
  32.112 +@@ -460,9 +513,16 @@
  32.113 + 	srlz.i
  32.114 + 	;;
  32.115 + 	SAVE_MIN_WITH_COVER
  32.116 ++#ifdef XEN
  32.117 ++	alloc r15=ar.pfs,0,0,4,0
  32.118 ++	mov out0=cr.ifa
  32.119 ++	mov out1=cr.isr
  32.120 ++	mov out3=cr.itir
  32.121 ++#else
  32.122 + 	alloc r15=ar.pfs,0,0,3,0
  32.123 + 	mov out0=cr.ifa
  32.124 + 	mov out1=cr.isr
  32.125 ++#endif
  32.126 + 	adds r3=8,r2				// set up second base pointer
  32.127 + 	;;
  32.128 + 	ssm psr.ic | PSR_DEFAULT_BITS
  32.129 +@@ -483,6 +543,9 @@
  32.130 + /////////////////////////////////////////////////////////////////////////////////////////
  32.131 + // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  32.132 + ENTRY(dkey_miss)
  32.133 ++#ifdef XEN
  32.134 ++	REFLECT(7)
  32.135 ++#endif
  32.136 + 	DBG_FAULT(7)
  32.137 + 	FAULT(7)
  32.138 + END(dkey_miss)
  32.139 +@@ -491,6 +554,9 @@
  32.140 + /////////////////////////////////////////////////////////////////////////////////////////
  32.141 + // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
  32.142 + ENTRY(dirty_bit)
  32.143 ++#ifdef XEN
  32.144 ++	REFLECT(8)
  32.145 ++#endif
  32.146 + 	DBG_FAULT(8)
  32.147 + 	/*
  32.148 + 	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to
  32.149 +@@ -553,6 +619,9 @@
  32.150 + /////////////////////////////////////////////////////////////////////////////////////////
  32.151 + // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
  32.152 + ENTRY(iaccess_bit)
  32.153 ++#ifdef XEN
  32.154 ++	REFLECT(9)
  32.155 ++#endif
  32.156 + 	DBG_FAULT(9)
  32.157 + 	// Like Entry 8, except for instruction access
  32.158 + 	mov r16=cr.ifa				// get the address that caused the fault
  32.159 +@@ -618,6 +687,9 @@
  32.160 + /////////////////////////////////////////////////////////////////////////////////////////
  32.161 + // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
  32.162 + ENTRY(daccess_bit)
  32.163 ++#ifdef XEN
  32.164 ++	REFLECT(10)
  32.165 ++#endif
  32.166 + 	DBG_FAULT(10)
  32.167 + 	// Like Entry 8, except for data access
  32.168 + 	mov r16=cr.ifa				// get the address that caused the fault
  32.169 +@@ -686,6 +758,16 @@
  32.170 + 	 * to prevent leaking bits from kernel to user level.
  32.171 + 	 */
  32.172 + 	DBG_FAULT(11)
  32.173 ++#ifdef XEN
  32.174 ++	mov r16=cr.isr
  32.175 ++	mov r17=cr.iim
  32.176 ++	mov r31=pr
  32.177 ++	;;
  32.178 ++	cmp.eq p7,p0=r0,r17			// is this a psuedo-cover?
  32.179 ++	// FIXME: may also need to check slot==2?
  32.180 ++(p7)	br.sptk.many dispatch_privop_fault
  32.181 ++	br.sptk.many dispatch_break_fault
  32.182 ++#endif
  32.183 + 	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.
  32.184 + 	mov r17=cr.iim
  32.185 + 	mov r18=__IA64_BREAK_SYSCALL
  32.186 +@@ -696,7 +778,9 @@
  32.187 + 	mov r27=ar.rsc
  32.188 + 	mov r26=ar.pfs
  32.189 + 	mov r28=cr.iip
  32.190 ++#ifndef XEN
  32.191 + 	mov r31=pr				// prepare to save predicates
  32.192 ++#endif
  32.193 + 	mov r20=r1
  32.194 + 	;;
  32.195 + 	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
  32.196 +@@ -792,6 +876,36 @@
  32.197 + 	DBG_FAULT(13)
  32.198 + 	FAULT(13)
  32.199 + 
  32.200 ++#ifdef XEN
  32.201 ++	// There is no particular reason for this code to be here, other than that
  32.202 ++	// there happens to be space here that would go unused otherwise.  If this
  32.203 ++	// fault ever gets "unreserved", simply moved the following code to a more
  32.204 ++	// suitable spot...
  32.205 ++
  32.206 ++ENTRY(dispatch_break_fault)
  32.207 ++	SAVE_MIN_WITH_COVER
  32.208 ++	;;
  32.209 ++	alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
  32.210 ++	mov out0=cr.ifa
  32.211 ++	adds out1=16,sp
  32.212 ++	mov out2=cr.isr		// FIXME: pity to make this slow access twice
  32.213 ++	mov out3=cr.iim		// FIXME: pity to make this slow access twice
  32.214 ++
  32.215 ++	ssm psr.ic | PSR_DEFAULT_BITS
  32.216 ++	;;
  32.217 ++	srlz.i					// guarantee that interruption collection is on
  32.218 ++	;;
  32.219 ++(p15)	ssm psr.i				// restore psr.i
  32.220 ++	adds r3=8,r2				// set up second base pointer
  32.221 ++	;;
  32.222 ++	SAVE_REST
  32.223 ++	movl r14=ia64_leave_kernel
  32.224 ++	;;
  32.225 ++	mov rp=r14
  32.226 ++	br.sptk.many ia64_prepare_handle_break
  32.227 ++END(dispatch_break_fault)
  32.228 ++#endif
  32.229 ++
  32.230 + 	.org ia64_ivt+0x3800
  32.231 + /////////////////////////////////////////////////////////////////////////////////////////
  32.232 + // 0x3800 Entry 14 (size 64 bundles) Reserved
  32.233 +@@ -842,9 +956,11 @@
  32.234 + 	 *	- ar.fpsr: set to kernel settings
  32.235 + 	 */
  32.236 + GLOBAL_ENTRY(ia64_syscall_setup)
  32.237 ++#ifndef XEN
  32.238 + #if PT(B6) != 0
  32.239 + # error This code assumes that b6 is the first field in pt_regs.
  32.240 + #endif
  32.241 ++#endif
  32.242 + 	st8 [r1]=r19				// save b6
  32.243 + 	add r16=PT(CR_IPSR),r1			// initialize first base pointer
  32.244 + 	add r17=PT(R11),r1			// initialize second base pointer
  32.245 +@@ -974,6 +1090,37 @@
  32.246 + 	DBG_FAULT(16)
  32.247 + 	FAULT(16)
  32.248 + 
  32.249 ++#ifdef XEN
  32.250 ++	// There is no particular reason for this code to be here, other than that
  32.251 ++	// there happens to be space here that would go unused otherwise.  If this
  32.252 ++	// fault ever gets "unreserved", simply moved the following code to a more
  32.253 ++	// suitable spot...
  32.254 ++
  32.255 ++ENTRY(dispatch_privop_fault)
  32.256 ++	SAVE_MIN_WITH_COVER
  32.257 ++	;;
  32.258 ++	alloc r14=ar.pfs,0,0,4,0		// now it's safe (must be first in insn group!)
  32.259 ++	mov out0=cr.ifa
  32.260 ++	adds out1=16,sp
  32.261 ++	mov out2=cr.isr		// FIXME: pity to make this slow access twice
  32.262 ++	mov out3=cr.itir
  32.263 ++
  32.264 ++	ssm psr.ic | PSR_DEFAULT_BITS
  32.265 ++	;;
  32.266 ++	srlz.i					// guarantee that interruption collection is on
  32.267 ++	;;
  32.268 ++(p15)	ssm psr.i				// restore psr.i
  32.269 ++	adds r3=8,r2				// set up second base pointer
  32.270 ++	;;
  32.271 ++	SAVE_REST
  32.272 ++	movl r14=ia64_leave_kernel
  32.273 ++	;;
  32.274 ++	mov rp=r14
  32.275 ++	br.sptk.many ia64_prepare_handle_privop
  32.276 ++END(dispatch_privop_fault)
  32.277 ++#endif
  32.278 ++
  32.279 ++
  32.280 + 	.org ia64_ivt+0x4400
  32.281 + /////////////////////////////////////////////////////////////////////////////////////////
  32.282 + // 0x4400 Entry 17 (size 64 bundles) Reserved
  32.283 +@@ -1090,6 +1237,9 @@
  32.284 + /////////////////////////////////////////////////////////////////////////////////////////
  32.285 + // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
  32.286 + ENTRY(page_not_present)
  32.287 ++#ifdef XEN
  32.288 ++	REFLECT(20)
  32.289 ++#endif
  32.290 + 	DBG_FAULT(20)
  32.291 + 	mov r16=cr.ifa
  32.292 + 	rsm psr.dt
  32.293 +@@ -1110,6 +1260,9 @@
  32.294 + /////////////////////////////////////////////////////////////////////////////////////////
  32.295 + // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
  32.296 + ENTRY(key_permission)
  32.297 ++#ifdef XEN
  32.298 ++	REFLECT(21)
  32.299 ++#endif
  32.300 + 	DBG_FAULT(21)
  32.301 + 	mov r16=cr.ifa
  32.302 + 	rsm psr.dt
  32.303 +@@ -1123,6 +1276,9 @@
  32.304 + /////////////////////////////////////////////////////////////////////////////////////////
  32.305 + // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  32.306 + ENTRY(iaccess_rights)
  32.307 ++#ifdef XEN
  32.308 ++	REFLECT(22)
  32.309 ++#endif
  32.310 + 	DBG_FAULT(22)
  32.311 + 	mov r16=cr.ifa
  32.312 + 	rsm psr.dt
  32.313 +@@ -1136,6 +1292,9 @@
  32.314 + /////////////////////////////////////////////////////////////////////////////////////////
  32.315 + // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  32.316 + ENTRY(daccess_rights)
  32.317 ++#ifdef XEN
  32.318 ++	REFLECT(23)
  32.319 ++#endif
  32.320 + 	DBG_FAULT(23)
  32.321 + 	mov r16=cr.ifa
  32.322 + 	rsm psr.dt
  32.323 +@@ -1153,8 +1312,13 @@
  32.324 + 	mov r16=cr.isr
  32.325 + 	mov r31=pr
  32.326 + 	;;
  32.327 ++#ifdef XEN
  32.328 ++	cmp4.ge p6,p0=0x20,r16
  32.329 ++(p6)	br.sptk.many dispatch_privop_fault
  32.330 ++#else
  32.331 + 	cmp4.eq p6,p0=0,r16
  32.332 + (p6)	br.sptk.many dispatch_illegal_op_fault
  32.333 ++#endif
  32.334 + 	;;
  32.335 + 	mov r19=24		// fault number
  32.336 + 	br.sptk.many dispatch_to_fault_handler
  32.337 +@@ -1164,6 +1328,9 @@
  32.338 + /////////////////////////////////////////////////////////////////////////////////////////
  32.339 + // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  32.340 + ENTRY(disabled_fp_reg)
  32.341 ++#ifdef XEN
  32.342 ++	REFLECT(25)
  32.343 ++#endif
  32.344 + 	DBG_FAULT(25)
  32.345 + 	rsm psr.dfh		// ensure we can access fph
  32.346 + 	;;
  32.347 +@@ -1177,6 +1344,9 @@
  32.348 + /////////////////////////////////////////////////////////////////////////////////////////
  32.349 + // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
  32.350 + ENTRY(nat_consumption)
  32.351 ++#ifdef XEN
  32.352 ++	REFLECT(26)
  32.353 ++#endif
  32.354 + 	DBG_FAULT(26)
  32.355 + 	FAULT(26)
  32.356 + END(nat_consumption)
  32.357 +@@ -1185,6 +1355,10 @@
  32.358 + /////////////////////////////////////////////////////////////////////////////////////////
  32.359 + // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  32.360 + ENTRY(speculation_vector)
  32.361 ++#ifdef XEN
  32.362 ++	// this probably need not reflect...
  32.363 ++	REFLECT(27)
  32.364 ++#endif
  32.365 + 	DBG_FAULT(27)
  32.366 + 	/*
  32.367 + 	 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
  32.368 +@@ -1228,6 +1402,9 @@
  32.369 + /////////////////////////////////////////////////////////////////////////////////////////
  32.370 + // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
  32.371 + ENTRY(debug_vector)
  32.372 ++#ifdef XEN
  32.373 ++	REFLECT(29)
  32.374 ++#endif
  32.375 + 	DBG_FAULT(29)
  32.376 + 	FAULT(29)
  32.377 + END(debug_vector)
  32.378 +@@ -1236,6 +1413,9 @@
  32.379 + /////////////////////////////////////////////////////////////////////////////////////////
  32.380 + // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  32.381 + ENTRY(unaligned_access)
  32.382 ++#ifdef XEN
  32.383 ++	REFLECT(30)
  32.384 ++#endif
  32.385 + 	DBG_FAULT(30)
  32.386 + 	mov r16=cr.ipsr
  32.387 + 	mov r31=pr		// prepare to save predicates
  32.388 +@@ -1247,6 +1427,9 @@
  32.389 + /////////////////////////////////////////////////////////////////////////////////////////
  32.390 + // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  32.391 + ENTRY(unsupported_data_reference)
  32.392 ++#ifdef XEN
  32.393 ++	REFLECT(31)
  32.394 ++#endif
  32.395 + 	DBG_FAULT(31)
  32.396 + 	FAULT(31)
  32.397 + END(unsupported_data_reference)
  32.398 +@@ -1255,6 +1438,9 @@
  32.399 + /////////////////////////////////////////////////////////////////////////////////////////
  32.400 + // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
  32.401 + ENTRY(floating_point_fault)
  32.402 ++#ifdef XEN
  32.403 ++	REFLECT(32)
  32.404 ++#endif
  32.405 + 	DBG_FAULT(32)
  32.406 + 	FAULT(32)
  32.407 + END(floating_point_fault)
  32.408 +@@ -1263,6 +1449,9 @@
  32.409 + /////////////////////////////////////////////////////////////////////////////////////////
  32.410 + // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  32.411 + ENTRY(floating_point_trap)
  32.412 ++#ifdef XEN
  32.413 ++	REFLECT(33)
  32.414 ++#endif
  32.415 + 	DBG_FAULT(33)
  32.416 + 	FAULT(33)
  32.417 + END(floating_point_trap)
  32.418 +@@ -1271,6 +1460,9 @@
  32.419 + /////////////////////////////////////////////////////////////////////////////////////////
  32.420 + // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  32.421 + ENTRY(lower_privilege_trap)
  32.422 ++#ifdef XEN
  32.423 ++	REFLECT(34)
  32.424 ++#endif
  32.425 + 	DBG_FAULT(34)
  32.426 + 	FAULT(34)
  32.427 + END(lower_privilege_trap)
  32.428 +@@ -1279,6 +1471,9 @@
  32.429 + /////////////////////////////////////////////////////////////////////////////////////////
  32.430 + // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  32.431 + ENTRY(taken_branch_trap)
  32.432 ++#ifdef XEN
  32.433 ++	REFLECT(35)
  32.434 ++#endif
  32.435 + 	DBG_FAULT(35)
  32.436 + 	FAULT(35)
  32.437 + END(taken_branch_trap)
  32.438 +@@ -1287,6 +1482,9 @@
  32.439 + /////////////////////////////////////////////////////////////////////////////////////////
  32.440 + // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  32.441 + ENTRY(single_step_trap)
  32.442 ++#ifdef XEN
  32.443 ++	REFLECT(36)
  32.444 ++#endif
  32.445 + 	DBG_FAULT(36)
  32.446 + 	FAULT(36)
  32.447 + END(single_step_trap)
  32.448 +@@ -1343,6 +1541,9 @@
  32.449 + /////////////////////////////////////////////////////////////////////////////////////////
  32.450 + // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
  32.451 + ENTRY(ia32_exception)
  32.452 ++#ifdef XEN
  32.453 ++	REFLECT(45)
  32.454 ++#endif
  32.455 + 	DBG_FAULT(45)
  32.456 + 	FAULT(45)
  32.457 + END(ia32_exception)
  32.458 +@@ -1351,6 +1552,9 @@
  32.459 + /////////////////////////////////////////////////////////////////////////////////////////
  32.460 + // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
  32.461 + ENTRY(ia32_intercept)
  32.462 ++#ifdef XEN
  32.463 ++	REFLECT(46)
  32.464 ++#endif
  32.465 + 	DBG_FAULT(46)
  32.466 + #ifdef	CONFIG_IA32_SUPPORT
  32.467 + 	mov r31=pr
  32.468 +@@ -1381,6 +1585,9 @@
  32.469 + /////////////////////////////////////////////////////////////////////////////////////////
  32.470 + // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
  32.471 + ENTRY(ia32_interrupt)
  32.472 ++#ifdef XEN
  32.473 ++	REFLECT(47)
  32.474 ++#endif
  32.475 + 	DBG_FAULT(47)
  32.476 + #ifdef CONFIG_IA32_SUPPORT
  32.477 + 	mov r31=pr
  32.478 +@@ -1510,6 +1717,39 @@
  32.479 + 	DBG_FAULT(67)
  32.480 + 	FAULT(67)
  32.481 + 
  32.482 ++#ifdef XEN
  32.483 ++	.org ia64_ivt+0x8000
  32.484 ++ENTRY(dispatch_reflection)
  32.485 ++	/*
  32.486 ++	 * Input:
  32.487 ++	 *	psr.ic:	off
  32.488 ++	 *	r19:	intr type (offset into ivt, see ia64_int.h)
  32.489 ++	 *	r31:	contains saved predicates (pr)
  32.490 ++	 */
  32.491 ++	SAVE_MIN_WITH_COVER_R19
  32.492 ++	alloc r14=ar.pfs,0,0,5,0
  32.493 ++	mov out4=r15
  32.494 ++	mov out0=cr.ifa
  32.495 ++	adds out1=16,sp
  32.496 ++	mov out2=cr.isr
  32.497 ++	mov out3=cr.iim
  32.498 ++//	mov out3=cr.itir
  32.499 ++
  32.500 ++	ssm psr.ic | PSR_DEFAULT_BITS
  32.501 ++	;;
  32.502 ++	srlz.i					// guarantee that interruption collection is on
  32.503 ++	;;
  32.504 ++(p15)	ssm psr.i				// restore psr.i
  32.505 ++	adds r3=8,r2				// set up second base pointer
  32.506 ++	;;
  32.507 ++	SAVE_REST
  32.508 ++	movl r14=ia64_leave_kernel
  32.509 ++	;;
  32.510 ++	mov rp=r14
  32.511 ++	br.sptk.many ia64_prepare_handle_reflection
  32.512 ++END(dispatch_reflection)
  32.513 ++#endif
  32.514 ++
  32.515 + #ifdef CONFIG_IA32_SUPPORT
  32.516 + 
  32.517 + 	/*
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/kregs.h	Mon Feb 14 12:25:30 2005 +0000
    33.3 @@ -0,0 +1,13 @@
    33.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/kregs.h	2004-06-15 23:19:01.000000000 -0600
    33.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/kregs.h	2004-09-17 18:27:22.000000000 -0600
    33.6 +@@ -30,6 +30,10 @@
    33.7 + #define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
    33.8 + #define IA64_TR_PERCPU_DATA	1	/* dtr1: percpu data */
    33.9 + #define IA64_TR_CURRENT_STACK	2	/* dtr2: maps kernel's memory- & register-stacks */
   33.10 ++#ifdef XEN
   33.11 ++#define IA64_TR_SHARED_INFO	3	/* dtr3: page shared with domain */
   33.12 ++#define	IA64_TR_VHPT		4	/* dtr4: vhpt */
   33.13 ++#endif
   33.14 + 
   33.15 + /* Processor status register bits: */
   33.16 + #define IA64_PSR_BE_BIT		1
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/lds.S	Mon Feb 14 12:25:30 2005 +0000
    34.3 @@ -0,0 +1,17 @@
    34.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/vmlinux.lds.S	2004-06-15 23:19:52.000000000 -0600
    34.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/xen.lds.S	2004-08-25 19:28:12.000000000 -0600
    34.6 +@@ -11,12 +11,14 @@
    34.7 + OUTPUT_FORMAT("elf64-ia64-little")
    34.8 + OUTPUT_ARCH(ia64)
    34.9 + ENTRY(phys_start)
   34.10 ++#ifndef XEN
   34.11 + jiffies = jiffies_64;
   34.12 + PHDRS {
   34.13 +   code   PT_LOAD;
   34.14 +   percpu PT_LOAD;
   34.15 +   data   PT_LOAD;
   34.16 + }
   34.17 ++#endif
   34.18 + SECTIONS
   34.19 + {
   34.20 +   /* Sections to be discarded */
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/linuxtime.h	Mon Feb 14 12:25:30 2005 +0000
    35.3 @@ -0,0 +1,34 @@
    35.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/time.h	2004-06-15 23:19:37.000000000 -0600
    35.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/xen/linuxtime.h	2004-11-15 17:42:04.000000000 -0700
    35.6 +@@ -1,6 +1,11 @@
    35.7 + #ifndef _LINUX_TIME_H
    35.8 + #define _LINUX_TIME_H
    35.9 + 
   35.10 ++#ifdef XEN
   35.11 ++typedef	s64 time_t;
   35.12 ++typedef	s64 suseconds_t;
   35.13 ++#endif
   35.14 ++
   35.15 + #include <asm/param.h>
   35.16 + #include <linux/types.h>
   35.17 + 
   35.18 +@@ -25,7 +30,9 @@
   35.19 + #ifdef __KERNEL__
   35.20 + 
   35.21 + #include <linux/spinlock.h>
   35.22 ++#ifndef XEN
   35.23 + #include <linux/seqlock.h>
   35.24 ++#endif
   35.25 + #include <linux/timex.h>
   35.26 + #include <asm/div64.h>
   35.27 + #ifndef div_long_long_rem
   35.28 +@@ -322,7 +329,9 @@
   35.29 + 
   35.30 + extern struct timespec xtime;
   35.31 + extern struct timespec wall_to_monotonic;
   35.32 ++#ifndef XEN
   35.33 + extern seqlock_t xtime_lock;
   35.34 ++#endif
   35.35 + 
   35.36 + static inline unsigned long get_seconds(void)
   35.37 + { 
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/minstate.h	Mon Feb 14 12:25:30 2005 +0000
    36.3 @@ -0,0 +1,14 @@
    36.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/minstate.h	2004-06-15 23:19:52.000000000 -0600
    36.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/minstate.h	2004-12-15 16:36:00.000000000 -0700
    36.6 +@@ -3,6 +3,11 @@
    36.7 + #include <asm/cache.h>
    36.8 + 
    36.9 + #include "entry.h"
   36.10 ++#ifdef XEN
   36.11 ++//this can be removed when offsets.h is properly generated
   36.12 ++#undef IA64_TASK_THREAD_ON_USTACK_OFFSET
   36.13 ++#define IA64_TASK_THREAD_ON_USTACK_OFFSET 0x34
   36.14 ++#endif
   36.15 + 
   36.16 + /*
   36.17 +  * For ivt.s we want to access the stack virtually so we don't have to disable translation
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/mm_bootmem.c	Mon Feb 14 12:25:30 2005 +0000
    37.3 @@ -0,0 +1,92 @@
    37.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/bootmem.c	2004-06-15 23:19:09.000000000 -0600
    37.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_bootmem.c	2004-12-17 13:47:03.000000000 -0700
    37.6 +@@ -10,7 +10,9 @@
    37.7 +  */
    37.8 + 
    37.9 + #include <linux/mm.h>
   37.10 ++#ifndef XEN
   37.11 + #include <linux/kernel_stat.h>
   37.12 ++#endif
   37.13 + #include <linux/swap.h>
   37.14 + #include <linux/interrupt.h>
   37.15 + #include <linux/init.h>
   37.16 +@@ -55,6 +57,9 @@
   37.17 + 	bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
   37.18 + 	bdata->node_boot_start = (start << PAGE_SHIFT);
   37.19 + 	bdata->node_low_pfn = end;
   37.20 ++#ifdef XEN
   37.21 ++//printk("init_bootmem_core: mapstart=%lx,start=%lx,end=%lx,bdata->node_bootmem_map=%lx,bdata->node_boot_start=%lx,bdata->node_low_pfn=%lx\n",mapstart,start,end,bdata->node_bootmem_map,bdata->node_boot_start,bdata->node_low_pfn);
   37.22 ++#endif
   37.23 + 
   37.24 + 	/*
   37.25 + 	 * Initially all pages are reserved - setup_arch() has to
   37.26 +@@ -146,6 +151,9 @@
   37.27 + 	unsigned long i, start = 0, incr, eidx;
   37.28 + 	void *ret;
   37.29 + 
   37.30 ++#ifdef XEN
   37.31 ++//printf("__alloc_bootmem_core(%lx,%lx,%lx,%lx) called\n",bdata,size,align,goal);
   37.32 ++#endif
   37.33 + 	if(!size) {
   37.34 + 		printk("__alloc_bootmem_core(): zero-sized request\n");
   37.35 + 		BUG();
   37.36 +@@ -153,6 +161,9 @@
   37.37 + 	BUG_ON(align & (align-1));
   37.38 + 
   37.39 + 	eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
   37.40 ++#ifdef XEN
   37.41 ++//printf("__alloc_bootmem_core: eidx=%lx\n",eidx);
   37.42 ++#endif
   37.43 + 	offset = 0;
   37.44 + 	if (align &&
   37.45 + 	    (bdata->node_boot_start & (align - 1UL)) != 0)
   37.46 +@@ -182,6 +193,9 @@
   37.47 + 		unsigned long j;
   37.48 + 		i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
   37.49 + 		i = ALIGN(i, incr);
   37.50 ++#ifdef XEN
   37.51 ++//if (i >= eidx) goto fail_block;
   37.52 ++#endif
   37.53 + 		if (test_bit(i, bdata->node_bootmem_map))
   37.54 + 			continue;
   37.55 + 		for (j = i + 1; j < i + areasize; ++j) {
   37.56 +@@ -203,6 +217,9 @@
   37.57 + 	return NULL;
   37.58 + 
   37.59 + found:
   37.60 ++#ifdef XEN
   37.61 ++//printf("__alloc_bootmem_core: start=%lx\n",start);
   37.62 ++#endif
   37.63 + 	bdata->last_success = start << PAGE_SHIFT;
   37.64 + 	BUG_ON(start >= eidx);
   37.65 + 
   37.66 +@@ -262,6 +279,9 @@
   37.67 + 	page = virt_to_page(phys_to_virt(bdata->node_boot_start));
   37.68 + 	idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
   37.69 + 	map = bdata->node_bootmem_map;
   37.70 ++#ifdef XEN
   37.71 ++//printk("free_all_bootmem_core: bdata=%lx, bdata->node_boot_start=%lx, bdata->node_low_pfn=%lx, bdata->node_bootmem_map=%lx\n",bdata,bdata->node_boot_start,bdata->node_low_pfn,bdata->node_bootmem_map);
   37.72 ++#endif
   37.73 + 	for (i = 0; i < idx; ) {
   37.74 + 		unsigned long v = ~map[i / BITS_PER_LONG];
   37.75 + 		if (v) {
   37.76 +@@ -285,6 +305,9 @@
   37.77 + 	 * Now free the allocator bitmap itself, it's not
   37.78 + 	 * needed anymore:
   37.79 + 	 */
   37.80 ++#ifdef XEN
   37.81 ++//printk("About to free the allocator bitmap itself\n");
   37.82 ++#endif
   37.83 + 	page = virt_to_page(bdata->node_bootmem_map);
   37.84 + 	count = 0;
   37.85 + 	for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
   37.86 +@@ -327,6 +350,9 @@
   37.87 + 	return(init_bootmem_core(&contig_page_data, start, 0, pages));
   37.88 + }
   37.89 + 
   37.90 ++#ifdef XEN
   37.91 ++#undef reserve_bootmem
   37.92 ++#endif
   37.93 + #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
   37.94 + void __init reserve_bootmem (unsigned long addr, unsigned long size)
   37.95 + {
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c	Mon Feb 14 12:25:30 2005 +0000
    38.3 @@ -0,0 +1,91 @@
    38.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/contig.c	2004-06-15 23:19:12.000000000 -0600
    38.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_contig.c	2004-10-05 18:09:45.000000000 -0600
    38.6 +@@ -15,11 +15,23 @@
    38.7 +  * memory.
    38.8 +  */
    38.9 + #include <linux/config.h>
   38.10 ++#ifdef XEN
   38.11 ++#include <xen/sched.h>
   38.12 ++#endif
   38.13 + #include <linux/bootmem.h>
   38.14 + #include <linux/efi.h>
   38.15 + #include <linux/mm.h>
   38.16 + #include <linux/swap.h>
   38.17 + 
   38.18 ++#ifdef XEN
   38.19 ++#undef reserve_bootmem
   38.20 ++unsigned long max_mapnr;
   38.21 ++unsigned long num_physpages;
   38.22 ++extern struct page *zero_page_memmap_ptr;
   38.23 ++struct page *mem_map;
   38.24 ++#define MAX_DMA_ADDRESS ~0UL	// FIXME???
   38.25 ++#endif
   38.26 ++
   38.27 + #include <asm/meminit.h>
   38.28 + #include <asm/pgalloc.h>
   38.29 + #include <asm/pgtable.h>
   38.30 +@@ -80,6 +92,9 @@
   38.31 + {
   38.32 + 	unsigned long *max_pfnp = arg, pfn;
   38.33 + 
   38.34 ++#ifdef XEN
   38.35 ++//printf("find_max_pfn: start=%lx, end=%lx, *arg=%lx\n",start,end,*(unsigned long *)arg);
   38.36 ++#endif
   38.37 + 	pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
   38.38 + 	if (pfn > *max_pfnp)
   38.39 + 		*max_pfnp = pfn;
   38.40 +@@ -149,6 +164,9 @@
   38.41 + 	/* first find highest page frame number */
   38.42 + 	max_pfn = 0;
   38.43 + 	efi_memmap_walk(find_max_pfn, &max_pfn);
   38.44 ++#ifdef XEN
   38.45 ++//printf("find_memory: efi_memmap_walk returns max_pfn=%lx\n",max_pfn);
   38.46 ++#endif
   38.47 + 
   38.48 + 	/* how many bytes to cover all the pages */
   38.49 + 	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
   38.50 +@@ -242,6 +260,9 @@
   38.51 + 	efi_memmap_walk(count_pages, &num_physpages);
   38.52 + 
   38.53 + 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
   38.54 ++#ifdef XEN
   38.55 ++//printf("paging_init: num_physpages=%lx, max_dma=%lx\n",num_physpages,max_dma);
   38.56 ++#endif
   38.57 + 
   38.58 + #ifdef CONFIG_VIRTUAL_MEM_MAP
   38.59 + 	memset(zholes_size, 0, sizeof(zholes_size));
   38.60 +@@ -265,7 +286,13 @@
   38.61 + 
   38.62 + 	max_gap = 0;
   38.63 + 	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
   38.64 ++#ifdef XEN
   38.65 ++//printf("paging_init: max_gap=%lx\n",max_gap);
   38.66 ++#endif
   38.67 + 	if (max_gap < LARGE_GAP) {
   38.68 ++#ifdef XEN
   38.69 ++//printf("paging_init: no large gap\n");
   38.70 ++#endif
   38.71 + 		vmem_map = (struct page *) 0;
   38.72 + 		free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
   38.73 + 				    zholes_size);
   38.74 +@@ -274,6 +301,9 @@
   38.75 + 		unsigned long map_size;
   38.76 + 
   38.77 + 		/* allocate virtual_mem_map */
   38.78 ++#ifdef XEN
   38.79 ++//printf("paging_init: large gap, allocating virtual_mem_map\n");
   38.80 ++#endif
   38.81 + 
   38.82 + 		map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
   38.83 + 		vmalloc_end -= map_size;
   38.84 +@@ -293,6 +323,10 @@
   38.85 + 		zones_size[ZONE_DMA] = max_dma;
   38.86 + 		zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
   38.87 + 	}
   38.88 ++#ifdef XEN
   38.89 ++//printf("paging_init: zones_size[ZONE_DMA]=%lx, zones_size[ZONE_NORMAL]=%lx, max_low_pfn=%lx\n",
   38.90 ++//zones_size[ZONE_DMA],zones_size[ZONE_NORMAL],max_low_pfn);
   38.91 ++#endif
   38.92 + 	free_area_init(zones_size);
   38.93 + #endif /* !CONFIG_VIRTUAL_MEM_MAP */
   38.94 + 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/mmzone.h	Mon Feb 14 12:25:30 2005 +0000
    39.3 @@ -0,0 +1,14 @@
    39.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/mmzone.h	2004-06-15 23:19:36.000000000 -0600
    39.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/mmzone.h	2004-08-25 19:28:13.000000000 -0600
    39.6 +@@ -185,7 +185,11 @@
    39.7 + 	char			*name;
    39.8 + 	unsigned long		spanned_pages;	/* total size, including holes */
    39.9 + 	unsigned long		present_pages;	/* amount of memory (excluding holes) */
   39.10 ++#ifdef XEN
   39.11 ++};
   39.12 ++#else
   39.13 + } ____cacheline_maxaligned_in_smp;
   39.14 ++#endif
   39.15 + 
   39.16 + 
   39.17 + /*
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/page.h	Mon Feb 14 12:25:30 2005 +0000
    40.3 @@ -0,0 +1,60 @@
    40.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/page.h	2004-06-15 23:18:58.000000000 -0600
    40.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/page.h	2004-12-17 13:47:03.000000000 -0700
    40.6 +@@ -84,7 +84,11 @@
    40.7 + #endif
    40.8 + 
    40.9 + #ifndef CONFIG_DISCONTIGMEM
   40.10 ++#ifdef XEN
   40.11 ++#define pfn_valid(pfn)		(0)
   40.12 ++#else
   40.13 + #define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
   40.14 ++#endif
   40.15 + #define page_to_pfn(page)	((unsigned long) (page - mem_map))
   40.16 + #define pfn_to_page(pfn)	(mem_map + (pfn))
   40.17 + #endif /* CONFIG_DISCONTIGMEM */
   40.18 +@@ -107,8 +111,25 @@
   40.19 +  * expressed in this way to ensure they result in a single "dep"
   40.20 +  * instruction.
   40.21 +  */
   40.22 ++#ifdef XEN
   40.23 ++typedef union xen_va {
   40.24 ++	struct {
   40.25 ++		unsigned long off : 50;
   40.26 ++		unsigned long reg : 14;
   40.27 ++	} f;
   40.28 ++	unsigned long l;
   40.29 ++	void *p;
   40.30 ++} xen_va;
   40.31 ++
   40.32 ++// xen/drivers/console.c uses __va in a declaration (should be fixed!)
   40.33 ++#define __pa(x)		({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
   40.34 ++#define __va(x)		({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
   40.35 ++//# define __pa(x)	((unsigned long)(((unsigned long)x) - PAGE_OFFSET))
   40.36 ++//# define __va(x)	((void *)((char *)(x) + PAGE_OFFSET))
   40.37 ++#else
   40.38 + #define __pa(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
   40.39 + #define __va(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
   40.40 ++#endif
   40.41 + 
   40.42 + #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
   40.43 + #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
   40.44 +@@ -180,11 +201,19 @@
   40.45 + # define __pgprot(x)	(x)
   40.46 + #endif /* !STRICT_MM_TYPECHECKS */
   40.47 + 
   40.48 ++#ifdef XEN
   40.49 ++#define PAGE_OFFSET			0xfffc000000000000
   40.50 ++#else
   40.51 + #define PAGE_OFFSET			0xe000000000000000
   40.52 ++#endif
   40.53 + 
   40.54 + #define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE |					\
   40.55 + 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |		\
   40.56 + 					 (((current->thread.flags & IA64_THREAD_XSTACK) != 0)	\
   40.57 + 					  ? VM_EXEC : 0))
   40.58 + 
   40.59 ++#ifdef XEN
   40.60 ++#define __flush_tlb()	do {} while(0);
   40.61 ++#endif
   40.62 ++
   40.63 + #endif /* _ASM_IA64_PAGE_H */
    41.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    41.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/page_alloc.c	Mon Feb 14 12:25:30 2005 +0000
    41.3 @@ -0,0 +1,305 @@
    41.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/page_alloc.c	2004-06-15 23:18:57.000000000 -0600
    41.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/page_alloc.c	2004-12-17 13:47:03.000000000 -0700
    41.6 +@@ -19,20 +19,28 @@
    41.7 + #include <linux/mm.h>
    41.8 + #include <linux/swap.h>
    41.9 + #include <linux/interrupt.h>
   41.10 ++#ifndef XEN
   41.11 + #include <linux/pagemap.h>
   41.12 ++#endif
   41.13 + #include <linux/bootmem.h>
   41.14 + #include <linux/compiler.h>
   41.15 + #include <linux/module.h>
   41.16 ++#ifndef XEN
   41.17 + #include <linux/suspend.h>
   41.18 + #include <linux/pagevec.h>
   41.19 + #include <linux/blkdev.h>
   41.20 ++#endif
   41.21 + #include <linux/slab.h>
   41.22 ++#ifndef XEN
   41.23 + #include <linux/notifier.h>
   41.24 ++#endif
   41.25 + #include <linux/topology.h>
   41.26 ++#ifndef XEN
   41.27 + #include <linux/sysctl.h>
   41.28 + #include <linux/cpu.h>
   41.29 + 
   41.30 + #include <asm/tlbflush.h>
   41.31 ++#endif
   41.32 + 
   41.33 + DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
   41.34 + struct pglist_data *pgdat_list;
   41.35 +@@ -71,6 +79,9 @@
   41.36 + 
   41.37 + static void bad_page(const char *function, struct page *page)
   41.38 + {
   41.39 ++#ifdef XEN
   41.40 ++printk("bad_page: called but disabled\n");
   41.41 ++#else
   41.42 + 	printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
   41.43 + 		function, current->comm, page);
   41.44 + 	printk(KERN_EMERG "flags:0x%08lx mapping:%p mapcount:%d count:%d\n",
   41.45 +@@ -91,6 +102,7 @@
   41.46 + 	set_page_count(page, 0);
   41.47 + 	page->mapping = NULL;
   41.48 + 	page->mapcount = 0;
   41.49 ++#endif
   41.50 + }
   41.51 + 
   41.52 + #ifndef CONFIG_HUGETLB_PAGE
   41.53 +@@ -218,6 +230,7 @@
   41.54 + 
   41.55 + static inline void free_pages_check(const char *function, struct page *page)
   41.56 + {
   41.57 ++#ifndef XEN
   41.58 + 	if (	page_mapped(page) ||
   41.59 + 		page->mapping != NULL ||
   41.60 + 		page_count(page) != 0 ||
   41.61 +@@ -233,6 +246,7 @@
   41.62 + 			1 << PG_swapcache |
   41.63 + 			1 << PG_writeback )))
   41.64 + 		bad_page(function, page);
   41.65 ++#endif
   41.66 + 	if (PageDirty(page))
   41.67 + 		ClearPageDirty(page);
   41.68 + }
   41.69 +@@ -276,6 +290,9 @@
   41.70 + 
   41.71 + void __free_pages_ok(struct page *page, unsigned int order)
   41.72 + {
   41.73 ++#ifdef XEN
   41.74 ++printk("__free_pages_ok: called but disabled\n");
   41.75 ++#else
   41.76 + 	LIST_HEAD(list);
   41.77 + 	int i;
   41.78 + 
   41.79 +@@ -285,6 +302,7 @@
   41.80 + 	list_add(&page->lru, &list);
   41.81 + 	kernel_map_pages(page, 1<<order, 0);
   41.82 + 	free_pages_bulk(page_zone(page), 1, &list, order);
   41.83 ++#endif
   41.84 + }
   41.85 + 
   41.86 + #define MARK_USED(index, order, area) \
   41.87 +@@ -330,6 +348,7 @@
   41.88 +  */
   41.89 + static void prep_new_page(struct page *page, int order)
   41.90 + {
   41.91 ++#ifndef XEN
   41.92 + 	if (page->mapping || page_mapped(page) ||
   41.93 + 	    (page->flags & (
   41.94 + 			1 << PG_private	|
   41.95 +@@ -343,11 +362,14 @@
   41.96 + 			1 << PG_swapcache |
   41.97 + 			1 << PG_writeback )))
   41.98 + 		bad_page(__FUNCTION__, page);
   41.99 ++#endif
  41.100 + 
  41.101 + 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
  41.102 + 			1 << PG_referenced | 1 << PG_arch_1 |
  41.103 + 			1 << PG_checked | 1 << PG_mappedtodisk);
  41.104 ++#ifndef XEN
  41.105 + 	page->private = 0;
  41.106 ++#endif
  41.107 + 	set_page_refs(page, order);
  41.108 + }
  41.109 + 
  41.110 +@@ -590,13 +612,17 @@
  41.111 + 	unsigned long min;
  41.112 + 	struct zone **zones;
  41.113 + 	struct page *page;
  41.114 ++#ifndef XEN
  41.115 + 	struct reclaim_state reclaim_state;
  41.116 ++#endif
  41.117 + 	struct task_struct *p = current;
  41.118 + 	int i;
  41.119 + 	int alloc_type;
  41.120 + 	int do_retry;
  41.121 + 
  41.122 ++#ifndef XEN
  41.123 + 	might_sleep_if(wait);
  41.124 ++#endif
  41.125 + 
  41.126 + 	zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
  41.127 + 	if (zones[0] == NULL)     /* no zones in the zonelist */
  41.128 +@@ -610,12 +636,14 @@
  41.129 + 
  41.130 + 		min = (1<<order) + z->protection[alloc_type];
  41.131 + 
  41.132 ++#ifndef XEN
  41.133 + 		/*
  41.134 + 		 * We let real-time tasks dip their real-time paws a little
  41.135 + 		 * deeper into reserves.
  41.136 + 		 */
  41.137 + 		if (rt_task(p))
  41.138 + 			min -= z->pages_low >> 1;
  41.139 ++#endif
  41.140 + 
  41.141 + 		if (z->free_pages >= min ||
  41.142 + 				(!wait && z->free_pages >= z->pages_high)) {
  41.143 +@@ -627,9 +655,11 @@
  41.144 + 		}
  41.145 + 	}
  41.146 + 
  41.147 ++#ifndef XEN
  41.148 + 	/* we're somewhat low on memory, failed to find what we needed */
  41.149 + 	for (i = 0; zones[i] != NULL; i++)
  41.150 + 		wakeup_kswapd(zones[i]);
  41.151 ++#endif
  41.152 + 
  41.153 + 	/* Go through the zonelist again, taking __GFP_HIGH into account */
  41.154 + 	for (i = 0; zones[i] != NULL; i++) {
  41.155 +@@ -639,8 +669,10 @@
  41.156 + 
  41.157 + 		if (gfp_mask & __GFP_HIGH)
  41.158 + 			min -= z->pages_low >> 2;
  41.159 ++#ifndef XEN
  41.160 + 		if (rt_task(p))
  41.161 + 			min -= z->pages_low >> 1;
  41.162 ++#endif
  41.163 + 
  41.164 + 		if (z->free_pages >= min ||
  41.165 + 				(!wait && z->free_pages >= z->pages_high)) {
  41.166 +@@ -654,6 +686,7 @@
  41.167 + 
  41.168 + 	/* here we're in the low on memory slow path */
  41.169 + 
  41.170 ++#ifndef XEN
  41.171 + rebalance:
  41.172 + 	if ((p->flags & (PF_MEMALLOC | PF_MEMDIE)) && !in_interrupt()) {
  41.173 + 		/* go through the zonelist yet again, ignoring mins */
  41.174 +@@ -681,6 +714,7 @@
  41.175 + 
  41.176 + 	p->reclaim_state = NULL;
  41.177 + 	p->flags &= ~PF_MEMALLOC;
  41.178 ++#endif
  41.179 + 
  41.180 + 	/* go through the zonelist yet one more time */
  41.181 + 	for (i = 0; zones[i] != NULL; i++) {
  41.182 +@@ -698,6 +732,11 @@
  41.183 + 		}
  41.184 + 	}
  41.185 + 
  41.186 ++#ifdef XEN
  41.187 ++printk(KERN_WARNING "%s: page allocation failure."
  41.188 ++			" order:%d, mode:0x%x\n",
  41.189 ++			"(xen tasks have no comm)", order, gfp_mask);
  41.190 ++#else
  41.191 + 	/*
  41.192 + 	 * Don't let big-order allocations loop unless the caller explicitly
  41.193 + 	 * requests that.  Wait for some write requests to complete then retry.
  41.194 +@@ -724,6 +763,7 @@
  41.195 + 			p->comm, order, gfp_mask);
  41.196 + 		dump_stack();
  41.197 + 	}
  41.198 ++#endif
  41.199 + 	return NULL;
  41.200 + got_pg:
  41.201 + 	kernel_map_pages(page, 1 << order, 1);
  41.202 +@@ -808,6 +848,7 @@
  41.203 + 
  41.204 + EXPORT_SYMBOL(get_zeroed_page);
  41.205 + 
  41.206 ++#ifndef XEN
  41.207 + void __pagevec_free(struct pagevec *pvec)
  41.208 + {
  41.209 + 	int i = pagevec_count(pvec);
  41.210 +@@ -815,10 +856,15 @@
  41.211 + 	while (--i >= 0)
  41.212 + 		free_hot_cold_page(pvec->pages[i], pvec->cold);
  41.213 + }
  41.214 ++#endif
  41.215 + 
  41.216 + fastcall void __free_pages(struct page *page, unsigned int order)
  41.217 + {
  41.218 ++#ifdef XEN
  41.219 ++	if (!PageReserved(page)) {
  41.220 ++#else
  41.221 + 	if (!PageReserved(page) && put_page_testzero(page)) {
  41.222 ++#endif
  41.223 + 		if (order == 0)
  41.224 + 			free_hot_page(page);
  41.225 + 		else
  41.226 +@@ -914,6 +960,13 @@
  41.227 + 	return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
  41.228 + }
  41.229 + 
  41.230 ++#ifdef XEN
  41.231 ++unsigned int nr_free_highpages (void)
  41.232 ++{
  41.233 ++printf("nr_free_highpages: called but not implemented\n");
  41.234 ++}
  41.235 ++#endif
  41.236 ++
  41.237 + #ifdef CONFIG_HIGHMEM
  41.238 + unsigned int nr_free_highpages (void)
  41.239 + {
  41.240 +@@ -1022,6 +1075,7 @@
  41.241 + 
  41.242 + void si_meminfo(struct sysinfo *val)
  41.243 + {
  41.244 ++#ifndef XEN
  41.245 + 	val->totalram = totalram_pages;
  41.246 + 	val->sharedram = 0;
  41.247 + 	val->freeram = nr_free_pages();
  41.248 +@@ -1034,6 +1088,7 @@
  41.249 + 	val->freehigh = 0;
  41.250 + #endif
  41.251 + 	val->mem_unit = PAGE_SIZE;
  41.252 ++#endif
  41.253 + }
  41.254 + 
  41.255 + EXPORT_SYMBOL(si_meminfo);
  41.256 +@@ -1165,7 +1220,9 @@
  41.257 + 		printk("= %lukB\n", K(total));
  41.258 + 	}
  41.259 + 
  41.260 ++#ifndef XEN
  41.261 + 	show_swap_cache_info();
  41.262 ++#endif
  41.263 + }
  41.264 + 
  41.265 + /*
  41.266 +@@ -1530,6 +1587,9 @@
  41.267 + 		zone->wait_table_size = wait_table_size(size);
  41.268 + 		zone->wait_table_bits =
  41.269 + 			wait_table_bits(zone->wait_table_size);
  41.270 ++#ifdef XEN
  41.271 ++//printf("free_area_init_core-1: calling alloc_bootmem_node(%lx,%lx)\n",pgdat,zone->wait_table_size * sizeof(wait_queue_head_t));
  41.272 ++#endif
  41.273 + 		zone->wait_table = (wait_queue_head_t *)
  41.274 + 			alloc_bootmem_node(pgdat, zone->wait_table_size
  41.275 + 						* sizeof(wait_queue_head_t));
  41.276 +@@ -1584,6 +1644,9 @@
  41.277 + 			 */
  41.278 + 			bitmap_size = (size-1) >> (i+4);
  41.279 + 			bitmap_size = LONG_ALIGN(bitmap_size+1);
  41.280 ++#ifdef XEN
  41.281 ++//printf("free_area_init_core-2: calling alloc_bootmem_node(%lx,%lx)\n",pgdat, bitmap_size);
  41.282 ++#endif
  41.283 + 			zone->free_area[i].map = 
  41.284 + 			  (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
  41.285 + 		}
  41.286 +@@ -1601,6 +1664,9 @@
  41.287 + 	calculate_zone_totalpages(pgdat, zones_size, zholes_size);
  41.288 + 	if (!node_mem_map) {
  41.289 + 		size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
  41.290 ++#ifdef XEN
  41.291 ++//printf("free_area_init_node: calling alloc_bootmem_node(%lx,%lx)\n",pgdat,size);
  41.292 ++#endif
  41.293 + 		node_mem_map = alloc_bootmem_node(pgdat, size);
  41.294 + 	}
  41.295 + 	pgdat->node_mem_map = node_mem_map;
  41.296 +@@ -1784,6 +1850,7 @@
  41.297 + 
  41.298 + #endif /* CONFIG_PROC_FS */
  41.299 + 
  41.300 ++#ifndef XEN
  41.301 + #ifdef CONFIG_HOTPLUG_CPU
  41.302 + static int page_alloc_cpu_notify(struct notifier_block *self,
  41.303 + 				 unsigned long action, void *hcpu)
  41.304 +@@ -2011,3 +2078,4 @@
  41.305 + 	setup_per_zone_protection();
  41.306 + 	return 0;
  41.307 + }
  41.308 ++#endif
    42.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    42.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/processor.h	Mon Feb 14 12:25:30 2005 +0000
    42.3 @@ -0,0 +1,19 @@
    42.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/processor.h	2005-01-23 13:23:36.000000000 -0700
    42.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/processor.h	2004-08-25 19:28:13.000000000 -0600
    42.6 +@@ -406,12 +406,16 @@
    42.7 +  */
    42.8 + 
    42.9 + /* Return TRUE if task T owns the fph partition of the CPU we're running on. */
   42.10 ++#ifdef XEN
   42.11 ++#define ia64_is_local_fpu_owner(t) 0
   42.12 ++#else
   42.13 + #define ia64_is_local_fpu_owner(t)								\
   42.14 + ({												\
   42.15 + 	struct task_struct *__ia64_islfo_task = (t);						\
   42.16 + 	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\
   42.17 + 	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\
   42.18 + })
   42.19 ++#endif
   42.20 + 
   42.21 + /* Mark task T as owning the fph partition of the CPU we're running on. */
   42.22 + #define ia64_set_local_fpu_owner(t) do {						\
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/sal.h	Mon Feb 14 12:25:30 2005 +0000
    43.3 @@ -0,0 +1,26 @@
    43.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/sal.h	2004-06-15 23:20:04.000000000 -0600
    43.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/sal.h	2004-10-27 13:55:23.000000000 -0600
    43.6 +@@ -646,7 +646,23 @@
    43.7 + {
    43.8 + 	struct ia64_sal_retval isrv;
    43.9 + 
   43.10 ++//#ifdef XEN
   43.11 ++#if 0
   43.12 ++	unsigned long *x = (unsigned long *)ia64_sal;
   43.13 ++	unsigned long *inst = (unsigned long *)*x;
   43.14 ++	unsigned long __ia64_sc_flags;
   43.15 ++	struct ia64_fpreg __ia64_sc_fr[6];
   43.16 ++printf("ia64_sal_freq_base: about to save_scratch_fpregs\n");
   43.17 ++	ia64_save_scratch_fpregs(__ia64_sc_fr);
   43.18 ++	spin_lock_irqsave(&sal_lock, __ia64_sc_flags);
   43.19 ++printf("ia64_sal_freq_base: about to call, ia64_sal=%p, ia64_sal[0]=%p, ia64_sal[1]=%p\n",x,x[0],x[1]);
   43.20 ++printf("first inst=%p,%p\n",inst[0],inst[1]);
   43.21 ++	isrv = (*ia64_sal)(SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
   43.22 ++	spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);
   43.23 ++	ia64_load_scratch_fpregs(__ia64_sc_fr);
   43.24 ++#else
   43.25 + 	SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
   43.26 ++#endif
   43.27 + 	*ticks_per_second = isrv.v0;
   43.28 + 	*drift_info = isrv.v1;
   43.29 + 	return isrv.status;
    44.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    44.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/setup.c	Mon Feb 14 12:25:30 2005 +0000
    44.3 @@ -0,0 +1,100 @@
    44.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/setup.c	2004-06-15 23:18:58.000000000 -0600
    44.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/setup.c	2004-11-11 17:08:30.000000000 -0700
    44.6 +@@ -21,6 +21,9 @@
    44.7 + #include <linux/init.h>
    44.8 + 
    44.9 + #include <linux/acpi.h>
   44.10 ++#ifdef XEN
   44.11 ++#include <xen/sched.h>
   44.12 ++#endif
   44.13 + #include <linux/bootmem.h>
   44.14 + #include <linux/console.h>
   44.15 + #include <linux/delay.h>
   44.16 +@@ -30,13 +33,17 @@
   44.17 + #include <linux/seq_file.h>
   44.18 + #include <linux/string.h>
   44.19 + #include <linux/threads.h>
   44.20 ++#ifndef XEN
   44.21 + #include <linux/tty.h>
   44.22 + #include <linux/serial.h>
   44.23 + #include <linux/serial_core.h>
   44.24 ++#endif
   44.25 + #include <linux/efi.h>
   44.26 + #include <linux/initrd.h>
   44.27 + 
   44.28 ++#ifndef XEN
   44.29 + #include <asm/ia32.h>
   44.30 ++#endif
   44.31 + #include <asm/machvec.h>
   44.32 + #include <asm/mca.h>
   44.33 + #include <asm/meminit.h>
   44.34 +@@ -50,6 +57,11 @@
   44.35 + #include <asm/smp.h>
   44.36 + #include <asm/system.h>
   44.37 + #include <asm/unistd.h>
   44.38 ++#ifdef XEN
   44.39 ++#include <linux/mm.h>
   44.40 ++#include <asm/mmu_context.h>
   44.41 ++extern unsigned long loops_per_jiffy;	// from linux/init/main.c
   44.42 ++#endif
   44.43 + 
   44.44 + #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
   44.45 + # error "struct cpuinfo_ia64 too big!"
   44.46 +@@ -65,7 +77,9 @@
   44.47 + DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
   44.48 + unsigned long ia64_cycles_per_usec;
   44.49 + struct ia64_boot_param *ia64_boot_param;
   44.50 ++#ifndef XEN
   44.51 + struct screen_info screen_info;
   44.52 ++#endif
   44.53 + 
   44.54 + unsigned long ia64_max_cacheline_size;
   44.55 + unsigned long ia64_iobase;	/* virtual address for I/O accesses */
   44.56 +@@ -98,7 +112,6 @@
   44.57 + struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
   44.58 + int num_rsvd_regions;
   44.59 + 
   44.60 +-
   44.61 + /*
   44.62 +  * Filter incoming memory segments based on the primitive map created from the boot
   44.63 +  * parameters. Segments contained in the map are removed from the memory ranges. A
   44.64 +@@ -285,7 +298,9 @@
   44.65 + {
   44.66 + 	unw_init();
   44.67 + 
   44.68 ++#ifndef XEN
   44.69 + 	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
   44.70 ++#endif
   44.71 + 
   44.72 + 	*cmdline_p = __va(ia64_boot_param->command_line);
   44.73 + 	strlcpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
   44.74 +@@ -297,6 +312,10 @@
   44.75 + 	machvec_init(acpi_get_sysname());
   44.76 + #endif
   44.77 + 
   44.78 ++#ifdef XEN
   44.79 ++#undef CONFIG_ACPI_BOOT
   44.80 ++#endif
   44.81 ++
   44.82 + #ifdef CONFIG_ACPI_BOOT
   44.83 + 	/* Initialize the ACPI boot-time table parser */
   44.84 + 	acpi_table_init();
   44.85 +@@ -413,6 +432,9 @@
   44.86 + 		sprintf(cp, " 0x%lx", mask);
   44.87 + 	}
   44.88 + 
   44.89 ++#ifdef XEN
   44.90 ++#define seq_printf(a,b...) printf(b)
   44.91 ++#endif
   44.92 + 	seq_printf(m,
   44.93 + 		   "processor  : %d\n"
   44.94 + 		   "vendor     : %s\n"
   44.95 +@@ -667,6 +689,8 @@
   44.96 + void
   44.97 + check_bugs (void)
   44.98 + {
   44.99 ++#ifndef XEN
  44.100 + 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
  44.101 + 			       (unsigned long) __end___mckinley_e9_bundles);
  44.102 ++#endif
  44.103 + }
    45.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/slab.c	Mon Feb 14 12:25:30 2005 +0000
    45.3 @@ -0,0 +1,139 @@
    45.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/slab.c	2004-06-15 23:19:44.000000000 -0600
    45.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/slab.c	2004-12-17 13:47:03.000000000 -0700
    45.6 +@@ -86,15 +86,30 @@
    45.7 + #include	<linux/init.h>
    45.8 + #include	<linux/compiler.h>
    45.9 + #include	<linux/seq_file.h>
   45.10 ++#ifndef XEN
   45.11 + #include	<linux/notifier.h>
   45.12 + #include	<linux/kallsyms.h>
   45.13 + #include	<linux/cpu.h>
   45.14 + #include	<linux/sysctl.h>
   45.15 + #include	<linux/module.h>
   45.16 ++#endif
   45.17 + 
   45.18 + #include	<asm/uaccess.h>
   45.19 + #include	<asm/cacheflush.h>
   45.20 ++#ifndef XEN
   45.21 + #include	<asm/tlbflush.h>
   45.22 ++#endif
   45.23 ++
   45.24 ++#ifdef XEN
   45.25 ++#define lock_cpu_hotplug()	do { } while (0)
   45.26 ++#define unlock_cpu_hotplug()	do { } while (0)
   45.27 ++#define might_sleep_if(x)	do { } while (0)
   45.28 ++#define	dump_stack()		do { } while (0)
   45.29 ++#define start_cpu_timer(cpu)	do { } while (0)
   45.30 ++static inline void __down(struct semaphore *sem) { }
   45.31 ++static inline void __up(struct semaphore *sem) { }
   45.32 ++static inline void might_sleep(void) { }
   45.33 ++#endif
   45.34 + 
   45.35 + /*
   45.36 +  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
   45.37 +@@ -530,7 +545,9 @@
   45.38 + 	FULL
   45.39 + } g_cpucache_up;
   45.40 + 
   45.41 ++#ifndef XEN
   45.42 + static DEFINE_PER_CPU(struct timer_list, reap_timers);
   45.43 ++#endif
   45.44 + 
   45.45 + static void reap_timer_fnc(unsigned long data);
   45.46 + static void free_block(kmem_cache_t* cachep, void** objpp, int len);
   45.47 +@@ -588,6 +605,7 @@
   45.48 +  * Add the CPU number into the expiry time to minimize the possibility of the
   45.49 +  * CPUs getting into lockstep and contending for the global cache chain lock.
   45.50 +  */
   45.51 ++#ifndef XEN
   45.52 + static void __devinit start_cpu_timer(int cpu)
   45.53 + {
   45.54 + 	struct timer_list *rt = &per_cpu(reap_timers, cpu);
   45.55 +@@ -600,6 +618,7 @@
   45.56 + 		add_timer_on(rt, cpu);
   45.57 + 	}
   45.58 + }
   45.59 ++#endif
   45.60 + 
   45.61 + #ifdef CONFIG_HOTPLUG_CPU
   45.62 + static void stop_cpu_timer(int cpu)
   45.63 +@@ -634,6 +653,7 @@
   45.64 + 	return nc;
   45.65 + }
   45.66 + 
   45.67 ++#ifndef XEN
   45.68 + static int __devinit cpuup_callback(struct notifier_block *nfb,
   45.69 + 				  unsigned long action,
   45.70 + 				  void *hcpu)
   45.71 +@@ -693,6 +713,7 @@
   45.72 + }
   45.73 + 
   45.74 + static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
   45.75 ++#endif
   45.76 + 
   45.77 + /* Initialisation.
   45.78 +  * Called after the gfp() functions have been enabled, and before smp_init().
   45.79 +@@ -805,10 +826,14 @@
   45.80 + 	/* Done! */
   45.81 + 	g_cpucache_up = FULL;
   45.82 + 
   45.83 ++#ifdef XEN
   45.84 ++printk("kmem_cache_init: some parts commented out, ignored\n");
   45.85 ++#else
   45.86 + 	/* Register a cpu startup notifier callback
   45.87 + 	 * that initializes ac_data for all new cpus
   45.88 + 	 */
   45.89 + 	register_cpu_notifier(&cpucache_notifier);
   45.90 ++#endif
   45.91 + 	
   45.92 + 
   45.93 + 	/* The reap timers are started later, with a module init call:
   45.94 +@@ -886,8 +911,10 @@
   45.95 + 		page++;
   45.96 + 	}
   45.97 + 	sub_page_state(nr_slab, nr_freed);
   45.98 ++#ifndef XEN
   45.99 + 	if (current->reclaim_state)
  45.100 + 		current->reclaim_state->reclaimed_slab += nr_freed;
  45.101 ++#endif
  45.102 + 	free_pages((unsigned long)addr, cachep->gfporder);
  45.103 + 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 
  45.104 + 		atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
  45.105 +@@ -1363,8 +1390,10 @@
  45.106 + 					+ cachep->num;
  45.107 + 	} 
  45.108 + 
  45.109 ++#ifndef XEN
  45.110 + 	cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
  45.111 + 					((unsigned long)cachep)%REAPTIMEOUT_LIST3;
  45.112 ++#endif
  45.113 + 
  45.114 + 	/* Need the semaphore to access the chain. */
  45.115 + 	down(&cache_chain_sem);
  45.116 +@@ -2237,8 +2266,10 @@
  45.117 + 
  45.118 + 	if (unlikely(addr < min_addr))
  45.119 + 		goto out;
  45.120 ++#ifndef XEN
  45.121 + 	if (unlikely(addr > (unsigned long)high_memory - size))
  45.122 + 		goto out;
  45.123 ++#endif
  45.124 + 	if (unlikely(addr & align_mask))
  45.125 + 		goto out;
  45.126 + 	if (unlikely(!kern_addr_valid(addr)))
  45.127 +@@ -2769,6 +2800,7 @@
  45.128 +  */
  45.129 + static void reap_timer_fnc(unsigned long cpu)
  45.130 + {
  45.131 ++#ifndef XEN
  45.132 + 	struct timer_list *rt = &__get_cpu_var(reap_timers);
  45.133 + 
  45.134 + 	/* CPU hotplug can drag us off cpu: don't run on wrong CPU */
  45.135 +@@ -2776,6 +2808,7 @@
  45.136 + 		cache_reap();
  45.137 + 		mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
  45.138 + 	}
  45.139 ++#endif
  45.140 + }
  45.141 + 
  45.142 + #ifdef CONFIG_PROC_FS
    46.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    46.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/slab.h	Mon Feb 14 12:25:30 2005 +0000
    46.3 @@ -0,0 +1,14 @@
    46.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/slab.h	2004-06-15 23:20:26.000000000 -0600
    46.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/slab.h	2004-08-25 19:28:13.000000000 -0600
    46.6 +@@ -83,7 +83,11 @@
    46.7 + 			goto found; \
    46.8 + 		else \
    46.9 + 			i++;
   46.10 ++#ifdef XEN
   46.11 ++#include <linux/kmalloc_sizes.h>
   46.12 ++#else
   46.13 + #include "kmalloc_sizes.h"
   46.14 ++#endif
   46.15 + #undef CACHE
   46.16 + 		{
   46.17 + 			extern void __you_cannot_kmalloc_that_much(void);
    47.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    47.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/system.h	Mon Feb 14 12:25:30 2005 +0000
    47.3 @@ -0,0 +1,43 @@
    47.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/system.h	2005-01-23 13:23:36.000000000 -0700
    47.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/system.h	2004-09-17 18:27:22.000000000 -0600
    47.6 +@@ -24,8 +24,16 @@
    47.7 +  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
    47.8 +  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
    47.9 +  */
   47.10 ++#ifdef XEN
   47.11 ++//#define KERNEL_START		 0xfffc000100000000
   47.12 ++#define KERNEL_START		 0xfffc000004000000
   47.13 ++#define PERCPU_ADDR		 0xfffd000000000000-PERCPU_PAGE_SIZE
   47.14 ++#define SHAREDINFO_ADDR		 0xfffd000000000000
   47.15 ++#define VHPT_ADDR		 0xfffe000000000000
   47.16 ++#else
   47.17 + #define KERNEL_START		 0xa000000100000000
   47.18 + #define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
   47.19 ++#endif
   47.20 + 
   47.21 + #ifndef __ASSEMBLY__
   47.22 + 
   47.23 +@@ -218,9 +226,13 @@
   47.24 + # define PERFMON_IS_SYSWIDE() (0)
   47.25 + #endif
   47.26 + 
   47.27 ++#ifdef XEN
   47.28 ++#define IA64_HAS_EXTRA_STATE(t) 0
   47.29 ++#else
   47.30 + #define IA64_HAS_EXTRA_STATE(t)							\
   47.31 + 	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
   47.32 + 	 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
   47.33 ++#endif
   47.34 + 
   47.35 + #define __switch_to(prev,next,last) do {							 \
   47.36 + 	if (IA64_HAS_EXTRA_STATE(prev))								 \
   47.37 +@@ -249,6 +261,9 @@
   47.38 + #else
   47.39 + # define switch_to(prev,next,last)	__switch_to(prev, next, last)
   47.40 + #endif
   47.41 ++#ifdef XEN
   47.42 ++#undef switch_to
   47.43 ++#endif
   47.44 + 
   47.45 + /*
   47.46 +  * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
    48.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    48.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/time.c	Mon Feb 14 12:25:30 2005 +0000
    48.3 @@ -0,0 +1,337 @@
    48.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/time.c	2004-06-15 23:19:01.000000000 -0600
    48.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/time.c	2004-11-23 17:25:18.000000000 -0700
    48.6 +@@ -10,16 +10,22 @@
    48.7 +  */
    48.8 + #include <linux/config.h>
    48.9 + 
   48.10 ++#ifndef XEN
   48.11 + #include <linux/cpu.h>
   48.12 ++#endif
   48.13 + #include <linux/init.h>
   48.14 + #include <linux/kernel.h>
   48.15 + #include <linux/module.h>
   48.16 ++#ifndef XEN
   48.17 + #include <linux/profile.h>
   48.18 ++#endif
   48.19 + #include <linux/sched.h>
   48.20 + #include <linux/time.h>
   48.21 + #include <linux/interrupt.h>
   48.22 + #include <linux/efi.h>
   48.23 ++#ifndef XEN
   48.24 + #include <linux/profile.h>
   48.25 ++#endif
   48.26 + #include <linux/timex.h>
   48.27 + 
   48.28 + #include <asm/machvec.h>
   48.29 +@@ -29,6 +35,9 @@
   48.30 + #include <asm/sal.h>
   48.31 + #include <asm/sections.h>
   48.32 + #include <asm/system.h>
   48.33 ++#ifdef XEN
   48.34 ++#include <asm/ia64_int.h>
   48.35 ++#endif
   48.36 + 
   48.37 + extern unsigned long wall_jiffies;
   48.38 + 
   48.39 +@@ -45,6 +54,59 @@
   48.40 + 
   48.41 + #endif
   48.42 + 
   48.43 ++#ifdef XEN
   48.44 ++volatile unsigned long last_nsec_offset;
   48.45 ++extern rwlock_t xtime_lock;
   48.46 ++unsigned long cpu_khz;  /* Detected as we calibrate the TSC */
   48.47 ++static s_time_t        stime_irq;       /* System time at last 'time update' */
   48.48 ++
   48.49 ++static inline u64 get_time_delta(void)
   48.50 ++{
   48.51 ++	printf("get_time_delta: called, not implemented\n");
   48.52 ++	return 0;
   48.53 ++}
   48.54 ++
   48.55 ++s_time_t get_s_time(void)
   48.56 ++{
   48.57 ++    s_time_t now;
   48.58 ++    unsigned long flags;
   48.59 ++
   48.60 ++    read_lock_irqsave(&xtime_lock, flags);
   48.61 ++
   48.62 ++    now = stime_irq + get_time_delta();
   48.63 ++
   48.64 ++    /* Ensure that the returned system time is monotonically increasing. */
   48.65 ++    {
   48.66 ++        static s_time_t prev_now = 0;
   48.67 ++        if ( unlikely(now < prev_now) )
   48.68 ++            now = prev_now;
   48.69 ++        prev_now = now;
   48.70 ++    }
   48.71 ++
   48.72 ++    read_unlock_irqrestore(&xtime_lock, flags);
   48.73 ++
   48.74 ++    return now; 
   48.75 ++}
   48.76 ++
   48.77 ++void update_dom_time(struct domain *d)
   48.78 ++{
   48.79 ++// FIXME: implement this?
   48.80 ++	printf("update_dom_time: called, not implemented, skipping\n");
   48.81 ++}
   48.82 ++
   48.83 ++/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
   48.84 ++void do_settime(unsigned long secs, unsigned long usecs, u64 system_time_base)
   48.85 ++{
   48.86 ++// FIXME: Should this be do_settimeofday (from linux)???
   48.87 ++	printf("do_settime: called, not implemented, stopping\n");
   48.88 ++	dummy();
   48.89 ++}
   48.90 ++#endif
   48.91 ++
   48.92 ++#if 0	/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
   48.93 ++#endif	/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
   48.94 ++
   48.95 ++#ifndef XEN
   48.96 + static void
   48.97 + itc_reset (void)
   48.98 + {
   48.99 +@@ -80,12 +142,15 @@
  48.100 + 	return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
  48.101 + }
  48.102 + 
  48.103 ++#ifndef XEN
  48.104 + static struct time_interpolator itc_interpolator = {
  48.105 + 	.get_offset =	itc_get_offset,
  48.106 + 	.update =	itc_update,
  48.107 + 	.reset =	itc_reset
  48.108 + };
  48.109 ++#endif
  48.110 + 
  48.111 ++#ifndef XEN
  48.112 + int
  48.113 + do_settimeofday (struct timespec *tv)
  48.114 + {
  48.115 +@@ -95,7 +160,9 @@
  48.116 + 	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  48.117 + 		return -EINVAL;
  48.118 + 
  48.119 ++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
  48.120 + 	write_seqlock_irq(&xtime_lock);
  48.121 ++#endif
  48.122 + 	{
  48.123 + 		/*
  48.124 + 		 * This is revolting. We need to set "xtime" correctly. However, the value
  48.125 +@@ -117,12 +184,15 @@
  48.126 + 		time_esterror = NTP_PHASE_LIMIT;
  48.127 + 		time_interpolator_reset();
  48.128 + 	}
  48.129 ++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
  48.130 + 	write_sequnlock_irq(&xtime_lock);
  48.131 ++#endif
  48.132 + 	clock_was_set();
  48.133 + 	return 0;
  48.134 + }
  48.135 + 
  48.136 + EXPORT_SYMBOL(do_settimeofday);
  48.137 ++#endif
  48.138 + 
  48.139 + void
  48.140 + do_gettimeofday (struct timeval *tv)
  48.141 +@@ -185,6 +255,7 @@
  48.142 + }
  48.143 + 
  48.144 + EXPORT_SYMBOL(do_gettimeofday);
  48.145 ++#endif
  48.146 + 
  48.147 + /*
  48.148 +  * The profiling function is SMP safe. (nothing can mess
  48.149 +@@ -195,6 +266,9 @@
  48.150 + static inline void
  48.151 + ia64_do_profile (struct pt_regs * regs)
  48.152 + {
  48.153 ++#ifdef XEN
  48.154 ++}
  48.155 ++#else
  48.156 + 	unsigned long ip, slot;
  48.157 + 	extern cpumask_t prof_cpu_mask;
  48.158 + 
  48.159 +@@ -231,24 +305,88 @@
  48.160 + 		ip = prof_len-1;
  48.161 + 	atomic_inc((atomic_t *)&prof_buffer[ip]);
  48.162 + }
  48.163 ++#endif
  48.164 ++
  48.165 ++#ifdef XEN
  48.166 ++unsigned long domain0_ready = 0;	// FIXME (see below)
  48.167 ++#define typecheck(a,b)	1
  48.168 ++/* FROM linux/include/linux/jiffies.h */
  48.169 ++/*
  48.170 ++ *	These inlines deal with timer wrapping correctly. You are 
  48.171 ++ *	strongly encouraged to use them
  48.172 ++ *	1. Because people otherwise forget
  48.173 ++ *	2. Because if the timer wrap changes in future you won't have to
  48.174 ++ *	   alter your driver code.
  48.175 ++ *
  48.176 ++ * time_after(a,b) returns true if the time a is after time b.
  48.177 ++ *
  48.178 ++ * Do this with "<0" and ">=0" to only test the sign of the result. A
  48.179 ++ * good compiler would generate better code (and a really good compiler
  48.180 ++ * wouldn't care). Gcc is currently neither.
  48.181 ++ */
  48.182 ++#define time_after(a,b)		\
  48.183 ++	(typecheck(unsigned long, a) && \
  48.184 ++	 typecheck(unsigned long, b) && \
  48.185 ++	 ((long)(b) - (long)(a) < 0))
  48.186 ++#define time_before(a,b)	time_after(b,a)
  48.187 ++
  48.188 ++#define time_after_eq(a,b)	\
  48.189 ++	(typecheck(unsigned long, a) && \
  48.190 ++	 typecheck(unsigned long, b) && \
  48.191 ++	 ((long)(a) - (long)(b) >= 0))
  48.192 ++#define time_before_eq(a,b)	time_after_eq(b,a)
  48.193 ++#endif
  48.194 + 
  48.195 + static irqreturn_t
  48.196 + timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
  48.197 + {
  48.198 + 	unsigned long new_itm;
  48.199 + 
  48.200 ++#ifndef XEN
  48.201 + 	if (unlikely(cpu_is_offline(smp_processor_id()))) {
  48.202 + 		return IRQ_HANDLED;
  48.203 + 	}
  48.204 ++#endif
  48.205 ++#ifdef XEN
  48.206 ++	if (current->domain == dom0) {
  48.207 ++		// FIXME: there's gotta be a better way of doing this...
  48.208 ++		// We have to ensure that domain0 is launched before we
  48.209 ++		// call vcpu_timer_expired on it
  48.210 ++		//domain0_ready = 1; // moved to xensetup.c
  48.211 ++	}
  48.212 ++	if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
  48.213 ++		vcpu_pend_timer(dom0->exec_domain[0]);
  48.214 ++		vcpu_set_next_timer(dom0->exec_domain[0]);
  48.215 ++		domain_wake(dom0->exec_domain[0]);
  48.216 ++	}
  48.217 ++	if (!is_idle_task(current->domain) && current->domain != dom0) {
  48.218 ++		if (vcpu_timer_expired(current)) {
  48.219 ++			vcpu_pend_timer(current);
  48.220 ++			// ensure another timer interrupt happens even if domain doesn't
  48.221 ++			vcpu_set_next_timer(current);
  48.222 ++			domain_wake(current);
  48.223 ++		}
  48.224 ++	}
  48.225 ++#endif
  48.226 + 
  48.227 ++#ifndef XEN
  48.228 + 	platform_timer_interrupt(irq, dev_id, regs);
  48.229 ++#endif
  48.230 + 
  48.231 + 	new_itm = local_cpu_data->itm_next;
  48.232 + 
  48.233 + 	if (!time_after(ia64_get_itc(), new_itm))
  48.234 ++#ifdef XEN
  48.235 ++		return;
  48.236 ++#else
  48.237 + 		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
  48.238 + 		       ia64_get_itc(), new_itm);
  48.239 ++#endif
  48.240 + 
  48.241 ++#ifdef XEN
  48.242 ++//	printf("GOT TO HERE!!!!!!!!!!!\n");
  48.243 ++	//while(1);
  48.244 ++#endif
  48.245 + 	ia64_do_profile(regs);
  48.246 + 
  48.247 + 	while (1) {
  48.248 +@@ -269,10 +407,16 @@
  48.249 + 			 * another CPU. We need to avoid to SMP race by acquiring the
  48.250 + 			 * xtime_lock.
  48.251 + 			 */
  48.252 ++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
  48.253 + 			write_seqlock(&xtime_lock);
  48.254 ++#endif
  48.255 ++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
  48.256 + 			do_timer(regs);
  48.257 ++#endif
  48.258 + 			local_cpu_data->itm_next = new_itm;
  48.259 ++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
  48.260 + 			write_sequnlock(&xtime_lock);
  48.261 ++#endif
  48.262 + 		} else
  48.263 + 			local_cpu_data->itm_next = new_itm;
  48.264 + 
  48.265 +@@ -292,7 +436,12 @@
  48.266 + 		 */
  48.267 + 		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
  48.268 + 			new_itm += local_cpu_data->itm_delta;
  48.269 ++//#ifdef XEN
  48.270 ++//		vcpu_set_next_timer(current);
  48.271 ++//#else
  48.272 ++//printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
  48.273 + 		ia64_set_itm(new_itm);
  48.274 ++//#endif
  48.275 + 		/* double check, in case we got hit by a (slow) PMI: */
  48.276 + 	} while (time_after_eq(ia64_get_itc(), new_itm));
  48.277 + 	return IRQ_HANDLED;
  48.278 +@@ -307,6 +456,7 @@
  48.279 + 	int cpu = smp_processor_id();
  48.280 + 	unsigned long shift = 0, delta;
  48.281 + 
  48.282 ++printf("ia64_cpu_local_tick: about to call ia64_set_itv\n");
  48.283 + 	/* arrange for the cycle counter to generate a timer interrupt: */
  48.284 + 	ia64_set_itv(IA64_TIMER_VECTOR);
  48.285 + 
  48.286 +@@ -320,6 +470,7 @@
  48.287 + 		shift = (2*(cpu - hi) + 1) * delta/hi/2;
  48.288 + 	}
  48.289 + 	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
  48.290 ++printf("***** ia64_cpu_local_tick: Setting itm to %lx\n",local_cpu_data->itm_next);
  48.291 + 	ia64_set_itm(local_cpu_data->itm_next);
  48.292 + }
  48.293 + 
  48.294 +@@ -335,6 +486,7 @@
  48.295 + 	 * frequency and then a PAL call to determine the frequency ratio between the ITC
  48.296 + 	 * and the base frequency.
  48.297 + 	 */
  48.298 ++
  48.299 + 	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
  48.300 + 				    &platform_base_freq, &platform_base_drift);
  48.301 + 	if (status != 0) {
  48.302 +@@ -384,9 +536,11 @@
  48.303 + 					+ itc_freq/2)/itc_freq;
  48.304 + 
  48.305 + 	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
  48.306 ++#ifndef XEN
  48.307 + 		itc_interpolator.frequency = local_cpu_data->itc_freq;
  48.308 + 		itc_interpolator.drift = itc_drift;
  48.309 + 		register_time_interpolator(&itc_interpolator);
  48.310 ++#endif
  48.311 + 	}
  48.312 + 
  48.313 + 	/* Setup the CPU local timer tick */
  48.314 +@@ -395,7 +549,9 @@
  48.315 + 
  48.316 + static struct irqaction timer_irqaction = {
  48.317 + 	.handler =	timer_interrupt,
  48.318 ++#ifndef XEN
  48.319 + 	.flags =	SA_INTERRUPT,
  48.320 ++#endif
  48.321 + 	.name =		"timer"
  48.322 + };
  48.323 + 
  48.324 +@@ -403,12 +559,16 @@
  48.325 + time_init (void)
  48.326 + {
  48.327 + 	register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
  48.328 ++#ifndef XEN
  48.329 + 	efi_gettimeofday(&xtime);
  48.330 ++#endif
  48.331 + 	ia64_init_itm();
  48.332 + 
  48.333 ++#ifndef XEN
  48.334 + 	/*
  48.335 + 	 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
  48.336 + 	 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
  48.337 + 	 */
  48.338 + 	set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
  48.339 ++#endif
  48.340 + }
    49.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/tlb.c	Mon Feb 14 12:25:30 2005 +0000
    49.3 @@ -0,0 +1,48 @@
    49.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/tlb.c	2004-06-15 23:19:43.000000000 -0600
    49.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/tlb.c	2004-08-25 19:28:12.000000000 -0600
    49.6 +@@ -21,7 +21,9 @@
    49.7 + #include <asm/mmu_context.h>
    49.8 + #include <asm/pgalloc.h>
    49.9 + #include <asm/pal.h>
   49.10 ++#ifndef XEN
   49.11 + #include <asm/tlbflush.h>
   49.12 ++#endif
   49.13 + 
   49.14 + static struct {
   49.15 + 	unsigned long mask;	/* mask of supported purge page-sizes */
   49.16 +@@ -43,6 +45,9 @@
   49.17 + void
   49.18 + wrap_mmu_context (struct mm_struct *mm)
   49.19 + {
   49.20 ++#ifdef XEN
   49.21 ++printf("wrap_mmu_context: called, not implemented\n");
   49.22 ++#else
   49.23 + 	unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
   49.24 + 	struct task_struct *tsk;
   49.25 + 	int i;
   49.26 +@@ -83,6 +88,7 @@
   49.27 + 		put_cpu();
   49.28 + 	}
   49.29 + 	local_flush_tlb_all();
   49.30 ++#endif
   49.31 + }
   49.32 + 
   49.33 + void
   49.34 +@@ -132,6 +138,9 @@
   49.35 + void
   49.36 + flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
   49.37 + {
   49.38 ++#ifdef XEN
   49.39 ++printf("flush_tlb_range: called, not implemented\n");
   49.40 ++#else
   49.41 + 	struct mm_struct *mm = vma->vm_mm;
   49.42 + 	unsigned long size = end - start;
   49.43 + 	unsigned long nbits;
   49.44 +@@ -163,6 +172,7 @@
   49.45 + # endif
   49.46 + 
   49.47 + 	ia64_srlz_i();			/* srlz.i implies srlz.d */
   49.48 ++#endif
   49.49 + }
   49.50 + EXPORT_SYMBOL(flush_tlb_range);
   49.51 + 
    50.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    50.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/types.h	Mon Feb 14 12:25:30 2005 +0000
    50.3 @@ -0,0 +1,15 @@
    50.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/types.h	2004-06-15 23:19:01.000000000 -0600
    50.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/types.h	2004-11-11 17:08:30.000000000 -0700
    50.6 +@@ -1,5 +1,12 @@
    50.7 + #ifndef _ASM_IA64_TYPES_H
    50.8 + #define _ASM_IA64_TYPES_H
    50.9 ++#ifdef XEN
   50.10 ++#ifndef __ASSEMBLY__
   50.11 ++typedef unsigned long ssize_t;
   50.12 ++typedef unsigned long size_t;
   50.13 ++typedef long long loff_t;
   50.14 ++#endif
   50.15 ++#endif
   50.16 + 
   50.17 + /*
   50.18 +  * This file is never included by application software unless explicitly requested (e.g.,
    51.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    51.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/unaligned.c	Mon Feb 14 12:25:30 2005 +0000
    51.3 @@ -0,0 +1,97 @@
    51.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/unaligned.c	2004-06-15 23:20:03.000000000 -0600
    51.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/unaligned.c	2004-08-25 19:28:12.000000000 -0600
    51.6 +@@ -15,8 +15,10 @@
    51.7 +  */
    51.8 + #include <linux/kernel.h>
    51.9 + #include <linux/sched.h>
   51.10 ++#ifndef XEN
   51.11 + #include <linux/smp_lock.h>
   51.12 + #include <linux/tty.h>
   51.13 ++#endif
   51.14 + 
   51.15 + #include <asm/intrinsics.h>
   51.16 + #include <asm/processor.h>
   51.17 +@@ -24,7 +26,16 @@
   51.18 + #include <asm/uaccess.h>
   51.19 + #include <asm/unaligned.h>
   51.20 + 
   51.21 ++#ifdef XEN
   51.22 ++#define	ia64_peek(x...)	printk("ia64_peek: called, not implemented\n")
   51.23 ++#define	ia64_poke(x...)	printk("ia64_poke: called, not implemented\n")
   51.24 ++#define	ia64_sync_fph(x...) printk("ia64_sync_fph: called, not implemented\n")
   51.25 ++#define	ia64_flush_fph(x...) printk("ia64_flush_fph: called, not implemented\n")
   51.26 ++#define	die_if_kernel(x...) printk("die_if_kernel: called, not implemented\n")
   51.27 ++#define jiffies 0
   51.28 ++#else
   51.29 + extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
   51.30 ++#endif
   51.31 + 
   51.32 + #undef DEBUG_UNALIGNED_TRAP
   51.33 + 
   51.34 +@@ -437,7 +448,11 @@
   51.35 + }
   51.36 + 
   51.37 + 
   51.38 ++#ifdef XEN
   51.39 ++void
   51.40 ++#else
   51.41 + static void
   51.42 ++#endif
   51.43 + setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
   51.44 + {
   51.45 + 	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   51.46 +@@ -611,7 +626,11 @@
   51.47 + }
   51.48 + 
   51.49 + 
   51.50 ++#ifdef XEN
   51.51 ++void
   51.52 ++#else
   51.53 + static void
   51.54 ++#endif
   51.55 + getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
   51.56 + {
   51.57 + 	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   51.58 +@@ -1298,7 +1317,9 @@
   51.59 + 	mm_segment_t old_fs = get_fs();
   51.60 + 	unsigned long bundle[2];
   51.61 + 	unsigned long opcode;
   51.62 ++#ifndef XEN
   51.63 + 	struct siginfo si;
   51.64 ++#endif
   51.65 + 	const struct exception_table_entry *eh = NULL;
   51.66 + 	union {
   51.67 + 		unsigned long l;
   51.68 +@@ -1317,6 +1338,9 @@
   51.69 + 	 * user-level unaligned accesses.  Otherwise, a clever program could trick this
   51.70 + 	 * handler into reading an arbitrary kernel addresses...
   51.71 + 	 */
   51.72 ++#ifdef XEN
   51.73 ++printk("ia64_handle_unaligned: called, not working yet\n");
   51.74 ++#else
   51.75 + 	if (!user_mode(regs))
   51.76 + 		eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
   51.77 + 	if (user_mode(regs) || eh) {
   51.78 +@@ -1353,6 +1377,7 @@
   51.79 + 
   51.80 + 	if (__copy_from_user(bundle, (void *) regs->cr_iip, 16))
   51.81 + 		goto failure;
   51.82 ++#endif
   51.83 + 
   51.84 + 	/*
   51.85 + 	 * extract the instruction from the bundle given the slot number
   51.86 +@@ -1493,6 +1518,7 @@
   51.87 + 		/* NOT_REACHED */
   51.88 + 	}
   51.89 +   force_sigbus:
   51.90 ++#ifndef XEN
   51.91 + 	si.si_signo = SIGBUS;
   51.92 + 	si.si_errno = 0;
   51.93 + 	si.si_code = BUS_ADRALN;
   51.94 +@@ -1501,5 +1527,6 @@
   51.95 + 	si.si_isr = 0;
   51.96 + 	si.si_imm = 0;
   51.97 + 	force_sig_info(SIGBUS, &si, current);
   51.98 ++#endif
   51.99 + 	goto done;
  51.100 + }
    52.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    52.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/wait.h	Mon Feb 14 12:25:30 2005 +0000
    52.3 @@ -0,0 +1,26 @@
    52.4 +--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/wait.h	2004-06-15 23:19:31.000000000 -0600
    52.5 ++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/wait.h	2004-08-25 19:28:13.000000000 -0600
    52.6 +@@ -104,10 +104,15 @@
    52.7 + 	list_del(&old->task_list);
    52.8 + }
    52.9 + 
   52.10 ++#ifdef XEN
   52.11 ++void FASTCALL(__wake_up(struct task_struct *p));
   52.12 ++#else
   52.13 + void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
   52.14 ++#endif
   52.15 + extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
   52.16 + extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
   52.17 + 
   52.18 ++#ifndef XEN
   52.19 + #define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
   52.20 + #define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
   52.21 + #define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
   52.22 +@@ -117,6 +122,7 @@
   52.23 + #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
   52.24 + #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
   52.25 + #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
   52.26 ++#endif
   52.27 + 
   52.28 + #define __wait_event(wq, condition) 					\
   52.29 + do {									\
    53.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    53.2 +++ b/xen/arch/ia64/pdb-stub.c	Mon Feb 14 12:25:30 2005 +0000
    53.3 @@ -0,0 +1,61 @@
    53.4 +
    53.5 +/*
    53.6 + * pervasive debugger
    53.7 + * www.cl.cam.ac.uk/netos/pdb
    53.8 + *
    53.9 + * alex ho
   53.10 + * 2004
   53.11 + * university of cambridge computer laboratory
   53.12 + *
   53.13 + * code adapted originally from kgdb, nemesis, & gdbserver
   53.14 + */
   53.15 +
   53.16 +#include <xen/lib.h>
   53.17 +#include <xen/sched.h>
   53.18 +#include <asm/ptrace.h>
   53.19 +#include <xen/keyhandler.h> 
   53.20 +//#include <asm/apic.h>
   53.21 +#include <asm/domain_page.h>                           /* [un]map_domain_mem */
   53.22 +#include <asm/processor.h>
   53.23 +#include <asm/pdb.h>
   53.24 +#include <xen/list.h>
   53.25 +#include <xen/serial.h>
   53.26 +
   53.27 +#define __PDB_GET_VAL 1
   53.28 +#define __PDB_SET_VAL 2
   53.29 +
   53.30 +/*
   53.31 + * Read or write memory in an address space
   53.32 + */
   53.33 +int pdb_change_values(u_char *buffer, int length,
   53.34 +		      unsigned long cr3, unsigned long addr, int rw)
   53.35 +{
   53.36 +	dummy();
   53.37 +	return 0;
   53.38 +}
   53.39 +
   53.40 +/*
   53.41 + * Set memory in a domain's address space
   53.42 + * Set "length" bytes at "address" from "domain" to the values in "buffer".
   53.43 + * Return the number of bytes set, 0 if there was a problem.
   53.44 + */
   53.45 +
   53.46 +int pdb_set_values(u_char *buffer, int length,
   53.47 +		   unsigned long cr3, unsigned long addr)
   53.48 +{
   53.49 +    int count = pdb_change_values(buffer, length, cr3, addr, __PDB_SET_VAL);
   53.50 +    return count;
   53.51 +}
   53.52 +
   53.53 +/*
   53.54 + * Read memory from a domain's address space.
   53.55 + * Fetch "length" bytes at "address" from "domain" into "buffer".
   53.56 + * Return the number of bytes read, 0 if there was a problem.
   53.57 + */
   53.58 +
   53.59 +int pdb_get_values(u_char *buffer, int length,
   53.60 +		   unsigned long cr3, unsigned long addr)
   53.61 +{
   53.62 +  return pdb_change_values(buffer, length, cr3, addr, __PDB_GET_VAL);
   53.63 +}
   53.64 +
    54.1 --- a/xen/arch/ia64/privop.c	Mon Feb 14 11:42:11 2005 +0000
    54.2 +++ b/xen/arch/ia64/privop.c	Mon Feb 14 12:25:30 2005 +0000
    54.3 @@ -7,7 +7,6 @@
    54.4   */
    54.5  
    54.6  #include <asm/privop.h>
    54.7 -#include <asm/privify.h>
    54.8  #include <asm/vcpu.h>
    54.9  #include <asm/processor.h>
   54.10  #include <asm/delay.h>	// Debug only
   54.11 @@ -20,7 +19,7 @@ Hypercall bundle creation
   54.12  **************************************************************************/
   54.13  
   54.14  
   54.15 -void build_hypercall_bundle(UINT64 *imva, UINT64 breakimm, UINT64 hypnum, UINT64 ret)
   54.16 +void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
   54.17  {
   54.18  	INST64_A5 slot0;
   54.19  	INST64_I19 slot1;
   54.20 @@ -32,10 +31,10 @@ void build_hypercall_bundle(UINT64 *imva
   54.21  	slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
   54.22  	slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
   54.23  	slot0.imm5c = hypnum >> 16; slot0.s = 0;
   54.24 -	// slot1: break breakimm
   54.25 +	// slot1: break brkimm
   54.26  	slot1.inst = 0;
   54.27  	slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
   54.28 -	slot1.imm20 = breakimm; slot1.i = breakimm >> 20;
   54.29 +	slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
   54.30  	// if ret slot2: br.ret.sptk.many rp
   54.31  	// else slot2: br.cond.sptk.many rp
   54.32  	slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
    55.1 --- a/xen/arch/ia64/process.c	Mon Feb 14 11:42:11 2005 +0000
    55.2 +++ b/xen/arch/ia64/process.c	Mon Feb 14 12:25:30 2005 +0000
    55.3 @@ -21,7 +21,7 @@
    55.4  #include <asm/io.h>
    55.5  #include <asm/processor.h>
    55.6  #include <asm/desc.h>
    55.7 -#include <asm/ldt.h>
    55.8 +//#include <asm/ldt.h>
    55.9  #include <xen/irq.h>
   55.10  #include <xen/event.h>
   55.11  #include <asm/regionreg.h>
   55.12 @@ -31,6 +31,7 @@
   55.13  #include <asm/hpsim_ssc.h>
   55.14  #include <asm/dom_fw.h>
   55.15  
   55.16 +extern unsigned long vcpu_get_itir_on_fault(struct exec_domain *, UINT64);
   55.17  extern struct ia64_sal_retval pal_emulator_static(UINT64);
   55.18  extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
   55.19  
   55.20 @@ -49,7 +50,7 @@ extern unsigned long dom0_start, dom0_si
   55.21  			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
   55.22  			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
   55.23  
   55.24 -#define PSCB(x)	x->shared_info->arch
   55.25 +#define PSCB(x,y)	x->vcpu_info->arch.y
   55.26  
   55.27  extern unsigned long vcpu_verbose;
   55.28  
   55.29 @@ -59,17 +60,17 @@ long do_iopl(domid_t domain, unsigned in
   55.30  	return 0;
   55.31  }
   55.32  
   55.33 -void schedule_tail(struct domain *next)
   55.34 +void schedule_tail(struct exec_domain *next)
   55.35  {
   55.36  	unsigned long rr7;
   55.37 -	printk("current=%lx,shared_info=%lx\n",current,current->shared_info);
   55.38 -	printk("next=%lx,shared_info=%lx\n",next,next->shared_info);
   55.39 +	printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
   55.40 +	printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
   55.41  	if (rr7 = load_region_regs(current)) {
   55.42  		printk("schedule_tail: change to rr7 not yet implemented\n");
   55.43  	}
   55.44  }
   55.45  
   55.46 -extern TR_ENTRY *match_tr(struct domain *d, unsigned long ifa);
   55.47 +extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
   55.48  
   55.49  void tdpfoo(void) { }
   55.50  
   55.51 @@ -80,7 +81,7 @@ void tdpfoo(void) { }
   55.52  unsigned long translate_domain_pte(unsigned long pteval,
   55.53  	unsigned long address, unsigned long itir)
   55.54  {
   55.55 -	struct domain *d = (struct domain *) current;
   55.56 +	struct domain *d = current->domain;
   55.57  	unsigned long mask, pteval2, mpaddr;
   55.58  	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   55.59  	extern struct domain *dom0;
   55.60 @@ -113,22 +114,22 @@ unsigned long translate_domain_mpaddr(un
   55.61  	extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   55.62  	unsigned long pteval;
   55.63  
   55.64 -	if (current == dom0) {
   55.65 +	if (current->domain == dom0) {
   55.66  		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   55.67  			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   55.68  			tdpfoo();
   55.69  		}
   55.70  	}
   55.71 -	pteval = lookup_domain_mpa(current,mpaddr);
   55.72 +	pteval = lookup_domain_mpa(current->domain,mpaddr);
   55.73  	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
   55.74  }
   55.75  
   55.76  void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
   55.77  {
   55.78 -	unsigned long vcpu_get_ipsr_int_state(struct domain *,unsigned long);
   55.79 -	unsigned long vcpu_get_rr_ve(struct domain *,unsigned long);
   55.80 -	unsigned long vcpu_get_itir_on_fault(struct domain *,unsigned long);
   55.81 -	struct domain *d = (struct domain *) current;
   55.82 +	unsigned long vcpu_get_ipsr_int_state(struct exec_domain *,unsigned long);
   55.83 +	unsigned long vcpu_get_rr_ve(struct exec_domain *,unsigned long);
   55.84 +	struct domain *d = current->domain;
   55.85 +	struct exec_domain *ed = current;
   55.86  
   55.87  	if (vector == IA64_EXTINT_VECTOR) {
   55.88  		
   55.89 @@ -140,8 +141,8 @@ void reflect_interruption(unsigned long 
   55.90  			first_extint = 0;
   55.91  		}
   55.92  	}
   55.93 -	if (!PSCB(d).interrupt_collection_enabled) {
   55.94 -		if (!(PSCB(d).ipsr & IA64_PSR_DT)) {
   55.95 +	if (!PSCB(ed,interrupt_collection_enabled)) {
   55.96 +		if (!(PSCB(ed,ipsr) & IA64_PSR_DT)) {
   55.97  			printf("psr.dt off, trying to deliver nested dtlb!\n");
   55.98  			while(1);
   55.99  		}
  55.100 @@ -149,49 +150,49 @@ void reflect_interruption(unsigned long 
  55.101  		if (vector != IA64_DATA_TLB_VECTOR &&
  55.102  		    vector != IA64_DATA_TLB_VECTOR) {
  55.103  printf("psr.ic off, delivering fault=%lx,iip=%p,isr=%p,PSCB.iip=%p\n",
  55.104 -	vector,regs->cr_iip,isr,PSCB(d).iip);
  55.105 +	vector,regs->cr_iip,isr,PSCB(ed,iip));
  55.106  			while(1);
  55.107  			
  55.108  		}
  55.109  //printf("Delivering NESTED DATA TLB fault\n");
  55.110  		vector = IA64_DATA_NESTED_TLB_VECTOR;
  55.111 -		regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
  55.112 +		regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
  55.113  		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
  55.114  // NOTE: nested trap must NOT pass PSCB address
  55.115 -		//regs->r31 = (unsigned long) &PSCB(d);
  55.116 +		//regs->r31 = (unsigned long) &PSCB(ed);
  55.117  		return;
  55.118  
  55.119  	}
  55.120 -	if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(d).ifa = ifa;
  55.121 -	else ifa = PSCB(d).ifa;
  55.122 +	if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(ed,ifa) = ifa;
  55.123 +	else ifa = PSCB(ed,ifa);
  55.124  	vector &= ~0xf;
  55.125  //	always deliver on ALT vector (for now?) because no VHPT
  55.126 -//	if (!vcpu_get_rr_ve(d,ifa)) {
  55.127 +//	if (!vcpu_get_rr_ve(ed,ifa)) {
  55.128  		if (vector == IA64_DATA_TLB_VECTOR)
  55.129  			vector = IA64_ALT_DATA_TLB_VECTOR;
  55.130  		else if (vector == IA64_INST_TLB_VECTOR)
  55.131  			vector = IA64_ALT_INST_TLB_VECTOR;
  55.132  //	}
  55.133 -	PSCB(d).unat = regs->ar_unat;  // not sure if this is really needed?
  55.134 -	PSCB(d).precover_ifs = regs->cr_ifs;
  55.135 -	vcpu_bsw0(d);
  55.136 -	PSCB(d).ipsr = vcpu_get_ipsr_int_state(d,regs->cr_ipsr);
  55.137 +	PSCB(ed,unat) = regs->ar_unat;  // not sure if this is really needed?
  55.138 +	PSCB(ed,precover_ifs) = regs->cr_ifs;
  55.139 +	vcpu_bsw0(ed);
  55.140 +	PSCB(ed,ipsr) = vcpu_get_ipsr_int_state(ed,regs->cr_ipsr);
  55.141  	if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
  55.142 -		PSCB(d).iim = itiriim;
  55.143 -	else PSCB(d).itir = vcpu_get_itir_on_fault(d,ifa);
  55.144 -	PSCB(d).isr = isr; // this is unnecessary except for interrupts!
  55.145 -	PSCB(d).iip = regs->cr_iip;
  55.146 -	PSCB(d).ifs = 0;
  55.147 -	PSCB(d).incomplete_regframe = 0;
  55.148 +		PSCB(ed,iim) = itiriim;
  55.149 +	else PSCB(ed,itir) = vcpu_get_itir_on_fault(ed,ifa);
  55.150 +	PSCB(ed,isr) = isr; // this is unnecessary except for interrupts!
  55.151 +	PSCB(ed,iip) = regs->cr_iip;
  55.152 +	PSCB(ed,ifs) = 0;
  55.153 +	PSCB(ed,incomplete_regframe) = 0;
  55.154  
  55.155 -	regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
  55.156 +	regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
  55.157  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
  55.158  // FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
  55.159  // IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
  55.160 -	//regs->r31 = (unsigned long) &PSCB(d);
  55.161 +	//regs->r31 = (unsigned long) &PSCB(ed);
  55.162  
  55.163 -	PSCB(d).interrupt_delivery_enabled = 0;
  55.164 -	PSCB(d).interrupt_collection_enabled = 0;
  55.165 +	PSCB(ed,interrupt_delivery_enabled) = 0;
  55.166 +	PSCB(ed,interrupt_collection_enabled) = 0;
  55.167  }
  55.168  
  55.169  void foodpi(void) {}
  55.170 @@ -201,11 +202,12 @@ void foodpi(void) {}
  55.171  // NEVER successful if already reflecting a trap/fault because psr.i==0
  55.172  void deliver_pending_interrupt(struct pt_regs *regs)
  55.173  {
  55.174 -	struct domain *d = (struct domain *) current;
  55.175 +	struct domain *d = current->domain;
  55.176 +	struct exec_domain *ed = current;
  55.177  	// FIXME: Will this work properly if doing an RFI???
  55.178  	if (!is_idle_task(d) && user_mode(regs)) {
  55.179 -		vcpu_poke_timer(d);
  55.180 -		if (vcpu_deliverable_interrupts(d)) {
  55.181 +		vcpu_poke_timer(ed);
  55.182 +		if (vcpu_deliverable_interrupts(ed)) {
  55.183  			unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
  55.184  			foodpi();
  55.185  			reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
  55.186 @@ -213,13 +215,13 @@ void deliver_pending_interrupt(struct pt
  55.187  	}
  55.188  }
  55.189  
  55.190 -int handle_lazy_cover(struct domain *d, unsigned long isr, struct pt_regs *regs)
  55.191 +int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs *regs)
  55.192  {
  55.193 -	if (!PSCB(d).interrupt_collection_enabled) {
  55.194 +	if (!PSCB(ed,interrupt_collection_enabled)) {
  55.195  		if (isr & IA64_ISR_IR) {
  55.196  //			printf("Handling lazy cover\n");
  55.197 -			PSCB(d).ifs = regs->cr_ifs;
  55.198 -			PSCB(d).incomplete_regframe = 1;
  55.199 +			PSCB(ed,ifs) = regs->cr_ifs;
  55.200 +			PSCB(ed,incomplete_regframe) = 1;
  55.201  			regs->cr_ifs = 0;
  55.202  			return(1); // retry same instruction with cr.ifs off
  55.203  		}
  55.204 @@ -231,7 +233,8 @@ int handle_lazy_cover(struct domain *d, 
  55.205  
  55.206  void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
  55.207  {
  55.208 -	struct domain *d = (struct domain *) current;
  55.209 +	struct domain *d = (struct domain *) current->domain;
  55.210 +	struct domain *ed = (struct exec_domain *) current;
  55.211  	TR_ENTRY *trp;
  55.212  	unsigned long psr = regs->cr_ipsr, mask, flags;
  55.213  	unsigned long iip = regs->cr_iip;
  55.214 @@ -248,7 +251,7 @@ void xen_handle_domain_access(unsigned l
  55.215  
  55.216  		// got here trying to read a privop bundle
  55.217  	     	//if (d->metaphysical_mode) {
  55.218 -     	if (d->metaphysical_mode && !(address>>61)) {  //FIXME
  55.219 +     	if (PSCB(current,metaphysical_mode) && !(address>>61)) {  //FIXME
  55.220  		if (d == dom0) {
  55.221  			if (address < dom0_start || address >= dom0_start + dom0_size) {
  55.222  				printk("xen_handle_domain_access: out-of-bounds"
  55.223 @@ -259,7 +262,7 @@ void xen_handle_domain_access(unsigned l
  55.224  		pteval = lookup_domain_mpa(d,address);
  55.225  		//FIXME: check return value?
  55.226  		// would be nice to have a counter here
  55.227 -		vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
  55.228 +		vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT);
  55.229  		return;
  55.230  	}
  55.231  if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
  55.232 @@ -269,7 +272,7 @@ if (address < 0x4000) printf("WARNING: p
  55.233  		while(1);
  55.234  	}
  55.235  		
  55.236 -	fault = vcpu_tpa(d,address,&mpaddr);
  55.237 +	fault = vcpu_tpa(ed,address,&mpaddr);
  55.238  	if (fault != IA64_NO_FAULT) {
  55.239  		// this is hardcoded to handle __get_domain_bundle only
  55.240  		regs->r8 = 0; regs->r9 = 0;
  55.241 @@ -287,12 +290,12 @@ if (address < 0x4000) printf("WARNING: p
  55.242  	// would be nice to have a counter here
  55.243  	//printf("Handling privop data TLB miss\n");
  55.244  	// FIXME, must be inlined or potential for nested fault here!
  55.245 -	vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
  55.246 +	vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT);
  55.247  }
  55.248  
  55.249  void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
  55.250  {
  55.251 -	struct domain *d = (struct domain *) current;
  55.252 +	struct domain *d = (struct domain *) current->domain;
  55.253  	TR_ENTRY *trp;
  55.254  	unsigned long psr = regs->cr_ipsr, mask, flags;
  55.255  	unsigned long iip = regs->cr_iip;
  55.256 @@ -317,30 +320,30 @@ void ia64_do_page_fault (unsigned long a
  55.257  	// FIXME: no need to pass itir in to this routine as we need to
  55.258  	// compute the virtual itir anyway (based on domain's RR.ps)
  55.259  	// AND ACTUALLY reflect_interruption doesn't use it anyway!
  55.260 -	itir = vcpu_get_itir_on_fault(d,address);
  55.261 +	itir = vcpu_get_itir_on_fault(current,address);
  55.262  
  55.263 -	if (d->metaphysical_mode && (is_data || !(address>>61))) {  //FIXME
  55.264 +	if (PSCB(current,metaphysical_mode) && (is_data || !(address>>61))) {  //FIXME
  55.265  		// FIXME should validate mpaddr here
  55.266  		if (d == dom0) {
  55.267  			if (address < dom0_start || address >= dom0_start + dom0_size) {
  55.268  				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
  55.269 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,d->shared_info->arch.iip);
  55.270 +				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
  55.271  				tdpfoo();
  55.272  			}
  55.273  		}
  55.274  		pteval = lookup_domain_mpa(d,address);
  55.275  		// FIXME, must be inlined or potential for nested fault here!
  55.276 -		vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,PAGE_SHIFT);
  55.277 +		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,PAGE_SHIFT);
  55.278  		return;
  55.279  	}
  55.280 -	if (trp = match_tr(d,address)) {
  55.281 +	if (trp = match_tr(current,address)) {
  55.282  		// FIXME address had better be pre-validated on insert
  55.283  		pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
  55.284 -		vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
  55.285 +		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
  55.286  		return;
  55.287  	}
  55.288  	vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
  55.289 -	if (handle_lazy_cover(d, isr, regs)) return;
  55.290 +	if (handle_lazy_cover(current, isr, regs)) return;
  55.291  if (!(address>>61)) { printf("ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc()); while(1); }
  55.292  	if ((isr & IA64_ISR_SP)
  55.293  	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  55.294 @@ -683,7 +686,8 @@ void
  55.295  ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
  55.296  {
  55.297  	static int first_time = 1;
  55.298 -	struct domain *d = (struct domain *) current;
  55.299 +	struct domain *d = (struct domain *) current->domain;
  55.300 +	struct exec_domain *ed = (struct domain *) current;
  55.301  	extern unsigned long running_on_sim;
  55.302  
  55.303  	if (first_time) {
  55.304 @@ -706,16 +710,16 @@ ia64_handle_break (unsigned long ifa, st
  55.305  			regs->r10 = x.v1; regs->r11 = x.v2;
  55.306  			break;
  55.307  		    case FW_HYPERCALL_SAL_CALL:
  55.308 -			x = sal_emulator(vcpu_get_gr(d,32),vcpu_get_gr(d,33),
  55.309 -				vcpu_get_gr(d,34),vcpu_get_gr(d,35),
  55.310 -				vcpu_get_gr(d,36),vcpu_get_gr(d,37),
  55.311 -				vcpu_get_gr(d,38),vcpu_get_gr(d,39));
  55.312 +			x = sal_emulator(vcpu_get_gr(ed,32),vcpu_get_gr(ed,33),
  55.313 +				vcpu_get_gr(ed,34),vcpu_get_gr(ed,35),
  55.314 +				vcpu_get_gr(ed,36),vcpu_get_gr(ed,37),
  55.315 +				vcpu_get_gr(ed,38),vcpu_get_gr(ed,39));
  55.316  			regs->r8 = x.status; regs->r9 = x.v0;
  55.317  			regs->r10 = x.v1; regs->r11 = x.v2;
  55.318  			break;
  55.319  		    case FW_HYPERCALL_EFI_RESET_SYSTEM:
  55.320  			printf("efi.reset_system called ");
  55.321 -			if (current == dom0) {
  55.322 +			if (current->domain == dom0) {
  55.323  				printf("(by dom0)\n ");
  55.324  				(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
  55.325  			}
  55.326 @@ -726,8 +730,8 @@ ia64_handle_break (unsigned long ifa, st
  55.327  			{
  55.328  			unsigned long *tv, *tc;
  55.329  			fooefi();
  55.330 -			tv = vcpu_get_gr(d,32);
  55.331 -			tc = vcpu_get_gr(d,33);
  55.332 +			tv = vcpu_get_gr(ed,32);
  55.333 +			tc = vcpu_get_gr(ed,33);
  55.334  			//printf("efi_get_time(%p,%p) called...",tv,tc);
  55.335  			tv = __va(translate_domain_mpaddr(tv));
  55.336  			if (tc) tc = __va(translate_domain_mpaddr(tc));
  55.337 @@ -761,12 +765,13 @@ void
  55.338  ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
  55.339  {
  55.340  	IA64FAULT vector;
  55.341 -	struct domain *d = (struct domain *) current;
  55.342 +	struct domain *d = current->domain;
  55.343 +	struct exec_domain *ed = current;
  55.344  	// FIXME: no need to pass itir in to this routine as we need to
  55.345  	// compute the virtual itir anyway (based on domain's RR.ps)
  55.346  	// AND ACTUALLY reflect_interruption doesn't use it anyway!
  55.347 -	itir = vcpu_get_itir_on_fault(d,ifa);
  55.348 -	vector = priv_emulate((struct domain *)current,regs,isr);
  55.349 +	itir = vcpu_get_itir_on_fault(ed,ifa);
  55.350 +	vector = priv_emulate(current,regs,isr);
  55.351  	if (vector == IA64_RETRY) {
  55.352  		reflect_interruption(ifa,isr,itir,regs,
  55.353  			IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
  55.354 @@ -782,11 +787,11 @@ UINT64 int_counts[INTR_TYPE_MAX];
  55.355  void
  55.356  ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
  55.357  {
  55.358 -	extern unsigned long vcpu_get_itir_on_fault(struct domain *vcpu, UINT64 ifa);
  55.359 -	struct domain *d = (struct domain *) current;
  55.360 +	struct domain *d = (struct domain *) current->domain;
  55.361 +	struct exec_domain *ed = (struct domain *) current;
  55.362  	unsigned long check_lazy_cover = 0;
  55.363  	unsigned long psr = regs->cr_ipsr;
  55.364 -	unsigned long itir = vcpu_get_itir_on_fault(d,ifa);
  55.365 +	unsigned long itir = vcpu_get_itir_on_fault(ed,ifa);
  55.366  
  55.367  	if (!(psr & IA64_PSR_CPL)) {
  55.368  		printf("ia64_handle_reflection: reflecting with priv=0!!\n");
  55.369 @@ -795,7 +800,7 @@ ia64_handle_reflection (unsigned long if
  55.370  	// FIXME: no need to pass itir in to this routine as we need to
  55.371  	// compute the virtual itir anyway (based on domain's RR.ps)
  55.372  	// AND ACTUALLY reflect_interruption doesn't use it anyway!
  55.373 -	itir = vcpu_get_itir_on_fault(d,ifa);
  55.374 +	itir = vcpu_get_itir_on_fault(ed,ifa);
  55.375  	switch(vector) {
  55.376  	    case 8:
  55.377  		vector = IA64_DIRTY_BIT_VECTOR; break;
  55.378 @@ -813,7 +818,7 @@ ia64_handle_reflection (unsigned long if
  55.379  		vector = IA64_DISABLED_FPREG_VECTOR; break;
  55.380  	    case 26:
  55.381  printf("*** NaT fault... attempting to handle as privop\n");
  55.382 -		vector = priv_emulate(d,regs,isr);
  55.383 +		vector = priv_emulate(ed,regs,isr);
  55.384  		if (vector == IA64_NO_FAULT) {
  55.385  printf("*** Handled privop masquerading as NaT fault\n");
  55.386  			return;
  55.387 @@ -831,6 +836,6 @@ printf("*** Handled privop masquerading 
  55.388  		while(vector);
  55.389  		return;
  55.390  	}
  55.391 -	if (check_lazy_cover && handle_lazy_cover(d, isr, regs)) return;
  55.392 +	if (check_lazy_cover && handle_lazy_cover(ed, isr, regs)) return;
  55.393  	reflect_interruption(ifa,isr,itir,regs,vector);
  55.394  }
    56.1 --- a/xen/arch/ia64/regionreg.c	Mon Feb 14 11:42:11 2005 +0000
    56.2 +++ b/xen/arch/ia64/regionreg.c	Mon Feb 14 12:25:30 2005 +0000
    56.3 @@ -249,7 +249,7 @@ static inline int validate_page_size(uns
    56.4  // NOTE: DOES NOT SET VCPU's rrs[x] value!!
    56.5  int set_one_rr(unsigned long rr, unsigned long val)
    56.6  {
    56.7 -	struct domain *d = current;
    56.8 +	struct exec_domain *ed = current;
    56.9  	unsigned long rreg = REGION_NUMBER(rr);
   56.10  	ia64_rr rrv, newrrv, memrrv;
   56.11  	unsigned long newrid;
   56.12 @@ -258,16 +258,16 @@ int set_one_rr(unsigned long rr, unsigne
   56.13  
   56.14  	rrv.rrval = val;
   56.15  	newrrv.rrval = 0;
   56.16 -	newrid = d->starting_rid + rrv.rid;
   56.17 +	newrid = ed->domain->starting_rid + rrv.rid;
   56.18  
   56.19 -	if (newrid > d->ending_rid) return 0;
   56.20 +	if (newrid > ed->domain->ending_rid) return 0;
   56.21  
   56.22  	memrrv.rrval = rrv.rrval;
   56.23  	if (rreg == 7) {
   56.24  		newrrv.rid = newrid;
   56.25  		newrrv.ve = VHPT_ENABLED_REGION_7;
   56.26  		newrrv.ps = IA64_GRANULE_SHIFT;
   56.27 -		ia64_new_rr7(vmMangleRID(newrrv.rrval));
   56.28 +		ia64_new_rr7(vmMangleRID(newrrv.rrval),ed->vcpu_info);
   56.29  	}
   56.30  	else {
   56.31  		newrrv.rid = newrid;
   56.32 @@ -310,43 +310,44 @@ int set_all_rr( u64 rr0, u64 rr1, u64 rr
   56.33  	return 1;
   56.34  }
   56.35  
   56.36 -void init_all_rr(struct domain *d)
   56.37 +void init_all_rr(struct exec_domain *ed)
   56.38  {
   56.39  	ia64_rr rrv;
   56.40  
   56.41  	rrv.rrval = 0;
   56.42 -	rrv.rid = d->metaphysical_rid;
   56.43 +	rrv.rid = ed->domain->metaphysical_rid;
   56.44  	rrv.ps = PAGE_SHIFT;
   56.45  	rrv.ve = 1;
   56.46 -	d->shared_info->arch.rrs[0] = -1;
   56.47 -	d->shared_info->arch.rrs[1] = rrv.rrval;
   56.48 -	d->shared_info->arch.rrs[2] = rrv.rrval;
   56.49 -	d->shared_info->arch.rrs[3] = rrv.rrval;
   56.50 -	d->shared_info->arch.rrs[4] = rrv.rrval;
   56.51 -	d->shared_info->arch.rrs[5] = rrv.rrval;
   56.52 -	d->shared_info->arch.rrs[6] = rrv.rrval;
   56.53 -//	d->shared_info->arch.rrs[7] = rrv.rrval;
   56.54 +if (!ed->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
   56.55 +	ed->vcpu_info->arch.rrs[0] = -1;
   56.56 +	ed->vcpu_info->arch.rrs[1] = rrv.rrval;
   56.57 +	ed->vcpu_info->arch.rrs[2] = rrv.rrval;
   56.58 +	ed->vcpu_info->arch.rrs[3] = rrv.rrval;
   56.59 +	ed->vcpu_info->arch.rrs[4] = rrv.rrval;
   56.60 +	ed->vcpu_info->arch.rrs[5] = rrv.rrval;
   56.61 +	ed->vcpu_info->arch.rrs[6] = rrv.rrval;
   56.62 +//	ed->shared_info->arch.rrs[7] = rrv.rrval;
   56.63  }
   56.64  
   56.65  
   56.66  /* XEN/ia64 INTERNAL ROUTINES */
   56.67  
   56.68 -unsigned long physicalize_rid(struct domain *d, unsigned long rid)
   56.69 +unsigned long physicalize_rid(struct exec_domain *ed, unsigned long rid)
   56.70  {
   56.71  	ia64_rr rrv;
   56.72  	    
   56.73  	rrv.rrval = rid;
   56.74 -	rrv.rid += d->starting_rid;
   56.75 +	rrv.rid += ed->domain->starting_rid;
   56.76  	return rrv.rrval;
   56.77  }
   56.78  
   56.79  unsigned long
   56.80 -virtualize_rid(struct domain *d, unsigned long rid)
   56.81 +virtualize_rid(struct exec_domain *ed, unsigned long rid)
   56.82  {
   56.83  	ia64_rr rrv;
   56.84  	    
   56.85  	rrv.rrval = rid;
   56.86 -	rrv.rid -= d->starting_rid;
   56.87 +	rrv.rid -= ed->domain->starting_rid;
   56.88  	return rrv.rrval;
   56.89  }
   56.90  
   56.91 @@ -357,29 +358,29 @@ virtualize_rid(struct domain *d, unsigne
   56.92  // rr7 (because we have to to assembly and physical mode
   56.93  // to change rr7).  If no change to rr7 is required, returns 0.
   56.94  //
   56.95 -unsigned long load_region_regs(struct domain *d)
   56.96 +unsigned long load_region_regs(struct exec_domain *ed)
   56.97  {
   56.98  	unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6;
   56.99  	unsigned long oldrr7, newrr7;
  56.100  	// TODO: These probably should be validated
  56.101  
  56.102 -	if (d->metaphysical_mode) {
  56.103 +	if (ed->vcpu_info->arch.metaphysical_mode) {
  56.104  		ia64_rr rrv;
  56.105  
  56.106 -		rrv.rid = d->metaphysical_rid;
  56.107 +		rrv.rid = ed->domain->metaphysical_rid;
  56.108  		rrv.ps = PAGE_SHIFT;
  56.109  		rrv.ve = 1;
  56.110  		rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rr6 = newrr7 = rrv.rrval;
  56.111  	}
  56.112  	else {
  56.113 -		rr0 = physicalize_rid(d, d->shared_info->arch.rrs[0]);
  56.114 -		rr1 = physicalize_rid(d, d->shared_info->arch.rrs[1]);
  56.115 -		rr2 = physicalize_rid(d, d->shared_info->arch.rrs[2]);
  56.116 -		rr3 = physicalize_rid(d, d->shared_info->arch.rrs[3]);
  56.117 -		rr4 = physicalize_rid(d, d->shared_info->arch.rrs[4]);
  56.118 -		rr5 = physicalize_rid(d, d->shared_info->arch.rrs[5]);
  56.119 -		rr6 = physicalize_rid(d, d->shared_info->arch.rrs[6]);
  56.120 -		newrr7 = physicalize_rid(d, d->shared_info->arch.rrs[7]);
  56.121 +		rr0 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[0]);
  56.122 +		rr1 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[1]);
  56.123 +		rr2 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[2]);
  56.124 +		rr3 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[3]);
  56.125 +		rr4 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[4]);
  56.126 +		rr5 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[5]);
  56.127 +		rr6 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[6]);
  56.128 +		newrr7 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[7]);
  56.129  	}
  56.130  
  56.131  	set_rr_no_srlz(0x0000000000000000L, rr0);
    57.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    57.2 +++ b/xen/arch/ia64/smp.c	Mon Feb 14 12:25:30 2005 +0000
    57.3 @@ -0,0 +1,42 @@
    57.4 +/*
    57.5 + *	Intel SMP support routines.
    57.6 + *
    57.7 + *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
    57.8 + *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
    57.9 + *
   57.10 + *	This code is released under the GNU General Public License version 2 or
   57.11 + *	later.
   57.12 + */
   57.13 +
   57.14 +//#include <xen/irq.h>
   57.15 +#include <xen/sched.h>
   57.16 +#include <xen/delay.h>
   57.17 +#include <xen/spinlock.h>
   57.18 +#include <asm/smp.h>
   57.19 +//#include <asm/mc146818rtc.h>
   57.20 +#include <asm/pgalloc.h>
   57.21 +//#include <asm/smpboot.h>
   57.22 +#include <asm/hardirq.h>
   57.23 +
   57.24 +//#if CONFIG_SMP || IA64
   57.25 +#if CONFIG_SMP
   57.26 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   57.27 +void smp_send_event_check_mask(unsigned long cpu_mask)
   57.28 +{
   57.29 +	dummy();
   57.30 +	//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
   57.31 +}
   57.32 +
   57.33 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   57.34 +void flush_tlb_mask(unsigned long mask)
   57.35 +{
   57.36 +	dummy();
   57.37 +}
   57.38 +
   57.39 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   57.40 +int try_flush_tlb_mask(unsigned long mask)
   57.41 +{
   57.42 +	dummy();
   57.43 +	return 1;
   57.44 +}
   57.45 +#endif
    58.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    58.2 +++ b/xen/arch/ia64/smpboot.c	Mon Feb 14 12:25:30 2005 +0000
    58.3 @@ -0,0 +1,2 @@
    58.4 +// expand later
    58.5 +int ht_per_core = 1;
    59.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    59.2 +++ b/xen/arch/ia64/tools/README.xenia64	Mon Feb 14 12:25:30 2005 +0000
    59.3 @@ -0,0 +1,27 @@
    59.4 +# Xen/ia64 heavily leverages/reuses many files from Linux/ia64
    59.5 +# you need the following files from kernel.org
    59.6 +#  linux-2.6.7.tar.gz
    59.7 +#  linux-2.6.7-ia64-040619.diff.gz
    59.8 +# place these in the parent directory of the xenXXX.bk tree
    59.9 +# e.g. xen-unstable.bk should be in the same directory as linux-2.6.7.tar.gz
   59.10 +
   59.11 +# unpack linux-2.6.7 in the xenXXX.bk/.. directory and apply the patch
   59.12 +gunzip linux-2.6.7-ia64-040619.diff.gz
   59.13 +tar xzf linux-2.6.7.tar.gz
   59.14 +cd linux-2.6.7
   59.15 +# is there a way to gunzip as part of patch?
   59.16 +patch -p1 <../../../linux-2.6.7-ia64-040619.diff
   59.17 +
   59.18 +# go back to the xen subdirectory of xen*.bk
   59.19 +cd xen*.bk/xen
   59.20 +
   59.21 +# create and patch the linux/ia64 files
   59.22 +bash arch/ia64/tools/mkbuildtree
   59.23 +# this should print out many patch messages but no errors
   59.24 +bash arch/ia64/tools/mkbuildtree
   59.25 +
   59.26 +# build xen/ia64
   59.27 +# if using cross-compiler
   59.28 +make TARGET_ARCH=ia64
   59.29 +# else if native
   59.30 +make
    60.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    60.2 +++ b/xen/arch/ia64/tools/mkbuildtree	Mon Feb 14 12:25:30 2005 +0000
    60.3 @@ -0,0 +1,290 @@
    60.4 +#!/bin/sh
    60.5 +#
    60.6 +# run in xen-X.X/xen directory after unpacking linux in same directory
    60.7 +
    60.8 +XEN=$PWD
    60.9 +#LINUX=$XEN/linux-2.6.7
   60.10 +LINUX=$XEN/../../linux-2.6.7
   60.11 +LINUXPATCH=$XEN/arch/ia64/patch/linux-2.6.7
   60.12 +XENPATCH=$XEN/arch/ia64/patch/xen-2.0.1
   60.13 +
   60.14 +cp_patch ()
   60.15 +{
   60.16 +	#diff -u $LINUX/$1 $XEN/$2 > $LINUXPATCH/$3
   60.17 +	cp $LINUX/$1 $XEN/$2
   60.18 +	patch <$LINUXPATCH/$3 $XEN/$2
   60.19 +}
   60.20 +
   60.21 +xen_patch ()
   60.22 +{
   60.23 +	#patch <$XENPATCH/$2 $XEN/$1
   60.24 +	echo 'skipping patch of' $XEN/$1
   60.25 +}
   60.26 +
   60.27 +softlink ()
   60.28 +{
   60.29 +	ln -s $LINUX/$1 $XEN/$2
   60.30 +}
   60.31 +
   60.32 +null ()
   60.33 +{
   60.34 +	touch $XEN/$1
   60.35 +}
   60.36 +
   60.37 +
   60.38 +# ensure linux directory is set up
   60.39 +if [ ! -d $LINUX ]; then
   60.40 +	echo "ERROR: $LINUX directory doesn't exist"
   60.41 +	echo "(don't forget to apply the -ia64 patch to it too)"
   60.42 +	exit
   60.43 +fi
   60.44 +
   60.45 +# setup
   60.46 +
   60.47 +#mkdir arch/ia64
   60.48 +#mkdir arch/ia64/lib
   60.49 +#mkdir include/asm-ia64
   60.50 +mkdir include/asm-generic
   60.51 +mkdir include/asm-ia64/linux
   60.52 +mkdir include/asm-ia64/linux/byteorder
   60.53 +# use "gcc -Iinclude/asm-ia64" to find these linux includes
   60.54 +ln -s $XEN/include/xen $XEN/include/linux
   60.55 +ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen 
   60.56 +
   60.57 +# prepare for building asm-offsets (circular dependency)
   60.58 +touch include/asm-ia64/asm-offsets.h
   60.59 +sleep 2
   60.60 +touch arch/ia64/asm-offsets.c
   60.61 +
   60.62 +# patches to xen/common files
   60.63 +#xen_patch common/domain.c domain.c
   60.64 +#xen_patch common/dom_mem_ops.c dom_mem_ops.c
   60.65 +#xen_patch common/grant_table.c grant_table.c
   60.66 +#xen_patch common/kernel.c kernel.c
   60.67 +#xen_patch common/dom0_ops.c dom0_ops.c
   60.68 +#xen_patch common/memory.c memory.c
   60.69 +#xen_patch common/keyhandler.c keyhandler.c
   60.70 +#xen_patch common/softirq.c softirq.c
   60.71 +#xen_patch common/string.c string.c
   60.72 +#xen_patch common/elf.c elf.c
   60.73 +#xen_patch common/schedule.c schedule.c
   60.74 +#xen_patch drivers/char/serial.c serial.c
   60.75 +#xen_patch drivers/char/console.c console.c
   60.76 +#xen_patch include/public/xen.h xen.h
   60.77 +#xen_patch include/xen/grant_table.h grant_table.h
   60.78 +#xen_patch include/xen/init.h init.h
   60.79 +#xen_patch include/xen/irq.h irq.h
   60.80 +#xen_patch include/xen/list.h list.h
   60.81 +#xen_patch include/xen/sched.h sched.h
   60.82 +#xen_patch include/xen/slab.h slab.h
   60.83 +#xen_patch include/xen/time.h time.h
   60.84 +
   60.85 +
   60.86 +# arch/ia64 files
   60.87 +
   60.88 +cp_patch arch/ia64/kernel/efi.c arch/ia64/efi.c efi.c
   60.89 +cp_patch arch/ia64/kernel/entry.S arch/ia64/entry.S entry.S
   60.90 +cp_patch arch/ia64/kernel/head.S arch/ia64/head.S head.S
   60.91 +#cp_patch arch/ia64/kernel/init_task.c arch/ia64/init_task.c init_task.c
   60.92 +cp_patch arch/ia64/kernel/irq_ia64.c arch/ia64/irq_ia64.c irq_ia64.c
   60.93 +cp_patch arch/ia64/kernel/ivt.S arch/ia64/ivt.S ivt.S
   60.94 +#cp_patch arch/ia64/kernel/minstate.h arch/ia64/minstate.h minstate.h
   60.95 +cp_patch arch/ia64/kernel/setup.c arch/ia64/setup.c setup.c
   60.96 +cp_patch arch/ia64/kernel/time.c arch/ia64/time.c time.c
   60.97 +cp_patch arch/ia64/kernel/unaligned.c arch/ia64/unaligned.c unaligned.c
   60.98 +cp_patch arch/ia64/kernel/vmlinux.lds.S arch/ia64/xen.lds.S lds.S
   60.99 +
  60.100 +cp_patch mm/bootmem.c arch/ia64/mm_bootmem.c mm_bootmem.c
  60.101 +cp_patch mm/page_alloc.c arch/ia64/page_alloc.c page_alloc.c
  60.102 +cp_patch mm/slab.c arch/ia64/slab.c slab.c
  60.103 +
  60.104 +cp_patch arch/ia64/mm/contig.c arch/ia64/mm_contig.c mm_contig.c
  60.105 +cp_patch arch/ia64/mm/tlb.c arch/ia64/tlb.c tlb.c
  60.106 +
  60.107 +cp_patch arch/ia64/hp/sim/hpsim_irq.c arch/ia64/hpsim_irq.c hpsim_irq.c
  60.108 +
  60.109 +softlink arch/ia64/kernel/efi_stub.S arch/ia64/efi_stub.S
  60.110 +softlink arch/ia64/kernel/entry.h arch/ia64/entry.h
  60.111 +softlink arch/ia64/kernel/ia64_ksyms.c arch/ia64/ia64_ksyms.c
  60.112 +softlink arch/ia64/kernel/irq_lsapic.c arch/ia64/irq_lsapic.c
  60.113 +softlink arch/ia64/kernel/machvec.c arch/ia64/machvec.c
  60.114 +softlink arch/ia64/kernel/pal.S arch/ia64/pal.S
  60.115 +softlink arch/ia64/kernel/patch.c arch/ia64/patch.c
  60.116 +softlink arch/ia64/kernel/sal.c arch/ia64/sal.c
  60.117 +softlink arch/ia64/kernel/minstate.h arch/ia64/minstate.h
  60.118 +
  60.119 +softlink arch/ia64/lib/bitop.c arch/ia64/lib/bitop.c
  60.120 +softlink arch/ia64/lib/carta_random.S arch/ia64/lib/carta_random.S
  60.121 +softlink arch/ia64/lib/checksum.c arch/ia64/lib/checksum.c
  60.122 +softlink arch/ia64/lib/clear_page.S arch/ia64/lib/clear_page.S
  60.123 +softlink arch/ia64/lib/clear_user.S arch/ia64/lib/clear_user.S
  60.124 +softlink arch/ia64/lib/copy_page_mck.S arch/ia64/lib/copy_page_mck.S
  60.125 +softlink arch/ia64/lib/copy_page.S arch/ia64/lib/copy_page.S
  60.126 +softlink arch/ia64/lib/copy_user.S arch/ia64/lib/copy_user.S
  60.127 +softlink arch/ia64/lib/csum_partial_copy.c arch/ia64/lib/csum_partial_copy.c
  60.128 +softlink arch/ia64/lib/dec_and_lock.c arch/ia64/lib/dec_and_lock.c
  60.129 +softlink arch/ia64/lib/do_csum.S arch/ia64/lib/do_csum.S
  60.130 +softlink arch/ia64/lib/flush.S arch/ia64/lib/flush.S
  60.131 +softlink arch/ia64/lib/idiv32.S arch/ia64/lib/idiv32.S
  60.132 +softlink arch/ia64/lib/idiv64.S arch/ia64/lib/idiv64.S
  60.133 +softlink arch/ia64/lib/io.c arch/ia64/lib/io.c
  60.134 +softlink arch/ia64/lib/ip_fast_csum.S arch/ia64/lib/ip_fast_csum.S
  60.135 +softlink arch/ia64/lib/memcpy_mck.S arch/ia64/lib/memcpy_mck.S
  60.136 +softlink arch/ia64/lib/memcpy.S arch/ia64/lib/memcpy.S
  60.137 +softlink arch/ia64/lib/memset.S arch/ia64/lib/memset.S
  60.138 +softlink arch/ia64/lib/strlen.S arch/ia64/lib/strlen.S
  60.139 +softlink arch/ia64/lib/strlen_user.S arch/ia64/lib/strlen_user.S
  60.140 +softlink arch/ia64/lib/strncpy_from_user.S arch/ia64/lib/strncpy_from_user.S
  60.141 +softlink arch/ia64/lib/strnlen_user.S arch/ia64/lib/strnlen_user.S
  60.142 +softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
  60.143 +softlink arch/ia64/lib/xor.S arch/ia64/lib/xor.S
  60.144 +
  60.145 +softlink lib/cmdline.c arch/ia64/cmdline.c
  60.146 +
  60.147 +softlink arch/ia64/hp/sim/hpsim.S arch/ia64/hpsim.S
  60.148 +
  60.149 +# xen/include/asm-generic files
  60.150 +
  60.151 +softlink include/asm-generic/cpumask_const_value.h include/asm-generic/cpumask_const_value.h cpumask_const_value.h
  60.152 +softlink include/asm-generic/cpumask.h include/asm-generic/cpumask.h cpumask.h
  60.153 +softlink include/asm-generic/cpumask_up.h include/asm-generic/cpumask_up.h cpumask_up.h
  60.154 +softlink include/asm-generic/cpumask_arith.h include/asm-generic/cpumask_arith.h cpumask_arith.h
  60.155 +softlink include/asm-generic/div64.h include/asm-generic/div64.h div64.h
  60.156 +softlink include/asm-generic/ide_iops.h include/asm-generic/ide_iops.h ide_iops.h
  60.157 +softlink include/asm-generic/pci-dma-compat.h include/asm-generic/pci-dma-compat.h pci-dma-compat.h
  60.158 +softlink include/asm-generic/pci.h include/asm-generic/pci.h pci.h
  60.159 +softlink include/asm-generic/pgtable.h include/asm-generic/pgtable.h pgtable.h
  60.160 +softlink include/asm-generic/sections.h include/asm-generic/sections.h sections.h
  60.161 +softlink include/asm-generic/topology.h include/asm-generic/topology.h topology.h
  60.162 +softlink include/asm-generic/vmlinux.lds.h include/asm-generic/vmlinux.lds.h vmlinux.lds.h
  60.163 +
  60.164 +
  60.165 +# xen/include/asm-ia64 files
  60.166 +
  60.167 +cp_patch arch/ia64/hp/sim/hpsim_ssc.h include/asm-ia64/hpsim_ssc.h hpsim_ssc.h
  60.168 +
  60.169 +cp_patch include/asm-ia64/current.h include/asm-ia64/current.h current.h
  60.170 +cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
  60.171 +cp_patch include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h hardirq.h
  60.172 +cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h
  60.173 +cp_patch include/asm-ia64/ide.h include/asm-ia64/ide.h ide.h
  60.174 +cp_patch include/asm-ia64/io.h include/asm-ia64/io.h io.h
  60.175 +cp_patch include/asm-ia64/irq.h include/asm-ia64/irq.h irq.h
  60.176 +cp_patch include/asm-ia64/kregs.h include/asm-ia64/kregs.h kregs.h
  60.177 +cp_patch include/asm-ia64/page.h include/asm-ia64/page.h page.h
  60.178 +cp_patch include/asm-ia64/processor.h include/asm-ia64/processor.h processor.h
  60.179 +cp_patch include/asm-ia64/sal.h include/asm-ia64/sal.h sal.h
  60.180 +cp_patch include/asm-ia64/system.h include/asm-ia64/system.h system.h
  60.181 +cp_patch include/asm-ia64/types.h include/asm-ia64/types.h types.h
  60.182 +
  60.183 +null include/asm-ia64/desc.h 
  60.184 +null include/asm-ia64/domain_page.h
  60.185 +null include/asm-ia64/flushtlb.h
  60.186 +null include/asm-ia64/io_apic.h
  60.187 +null include/asm-ia64/pdb.h
  60.188 +
  60.189 +softlink include/asm-ia64/acpi.h include/asm-ia64/acpi.h
  60.190 +softlink include/asm-ia64/asmmacro.h include/asm-ia64/asmmacro.h
  60.191 +softlink include/asm-ia64/atomic.h include/asm-ia64/atomic.h
  60.192 +softlink include/asm-ia64/bitops.h include/asm-ia64/bitops.h
  60.193 +softlink include/asm-ia64/break.h include/asm-ia64/break.h
  60.194 +softlink include/asm-ia64/bug.h include/asm-ia64/bug.h
  60.195 +softlink include/asm-ia64/byteorder.h include/asm-ia64/byteorder.h
  60.196 +softlink include/asm-ia64/cacheflush.h include/asm-ia64/cacheflush.h
  60.197 +softlink include/asm-ia64/cache.h include/asm-ia64/cache.h
  60.198 +softlink include/asm-ia64/checksum.h include/asm-ia64/checksum.h
  60.199 +softlink include/asm-ia64/cpumask.h include/asm-ia64/cpumask.h
  60.200 +softlink include/asm-ia64/delay.h include/asm-ia64/delay.h
  60.201 +softlink include/asm-ia64/div64.h include/asm-ia64/div64.h
  60.202 +softlink include/asm-ia64/dma.h include/asm-ia64/dma.h
  60.203 +softlink include/asm-ia64/dma-mapping.h include/asm-ia64/dma-mapping.h
  60.204 +softlink include/asm-ia64/fpu.h include/asm-ia64/fpu.h
  60.205 +softlink include/asm-ia64/hdreg.h include/asm-ia64/hdreg.h
  60.206 +softlink include/asm-ia64/ia32.h include/asm-ia64/ia32.h
  60.207 +softlink include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h
  60.208 +softlink include/asm-ia64/intrinsics.h include/asm-ia64/intrinsics.h
  60.209 +softlink include/asm-ia64/ioctl.h include/asm-ia64/ioctl.h
  60.210 +softlink include/asm-ia64/linkage.h include/asm-ia64/linkage.h
  60.211 +softlink include/asm-ia64/machvec.h include/asm-ia64/machvec.h
  60.212 +softlink include/asm-ia64/machvec_hpsim.h include/asm-ia64/machvec_hpsim.h
  60.213 +softlink include/asm-ia64/mca_asm.h include/asm-ia64/mca_asm.h
  60.214 +softlink include/asm-ia64/mca.h include/asm-ia64/mca.h
  60.215 +softlink include/asm-ia64/meminit.h include/asm-ia64/meminit.h
  60.216 +softlink include/asm-ia64/mman.h include/asm-ia64/mman.h
  60.217 +softlink include/asm-ia64/numa.h include/asm-ia64/numa.h
  60.218 +softlink include/asm-ia64/pal.h include/asm-ia64/pal.h
  60.219 +softlink include/asm-ia64/param.h include/asm-ia64/param.h
  60.220 +softlink include/asm-ia64/patch.h include/asm-ia64/patch.h
  60.221 +softlink include/asm-ia64/pci.h include/asm-ia64/pci.h
  60.222 +softlink include/asm-ia64/percpu.h include/asm-ia64/percpu.h
  60.223 +softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
  60.224 +softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h
  60.225 +softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h
  60.226 +softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h
  60.227 +softlink include/asm-ia64/rse.h include/asm-ia64/rse.h
  60.228 +softlink include/asm-ia64/rwsem.h include/asm-ia64/rwsem.h
  60.229 +softlink include/asm-ia64/scatterlist.h include/asm-ia64/scatterlist.h
  60.230 +softlink include/asm-ia64/sections.h include/asm-ia64/sections.h
  60.231 +softlink include/asm-ia64/semaphore.h include/asm-ia64/semaphore.h
  60.232 +softlink include/asm-ia64/serial.h include/asm-ia64/serial.h
  60.233 +softlink include/asm-ia64/sigcontext.h include/asm-ia64/sigcontext.h
  60.234 +softlink include/asm-ia64/signal.h include/asm-ia64/signal.h
  60.235 +softlink include/asm-ia64/smp.h include/asm-ia64/smp.h
  60.236 +softlink include/asm-ia64/spinlock.h include/asm-ia64/spinlock.h
  60.237 +softlink include/asm-ia64/string.h include/asm-ia64/string.h
  60.238 +softlink include/asm-ia64/thread_info.h include/asm-ia64/thread_info.h
  60.239 +softlink include/asm-ia64/timex.h include/asm-ia64/timex.h
  60.240 +softlink include/asm-ia64/topology.h include/asm-ia64/topology.h
  60.241 +softlink include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h
  60.242 +softlink include/asm-ia64/unaligned.h include/asm-ia64/unaligned.h
  60.243 +softlink include/asm-ia64/unistd.h include/asm-ia64/unistd.h
  60.244 +softlink include/asm-ia64/unwind.h include/asm-ia64/unwind.h
  60.245 +softlink include/asm-ia64/ustack.h include/asm-ia64/ustack.h
  60.246 +
  60.247 +# xen/include/asm-ia64/linux/*.h (== linux/include/linux/*.h)
  60.248 +
  60.249 +cp_patch include/linux/bootmem.h include/asm-ia64/linux/bootmem.h bootmem.h
  60.250 +cp_patch include/linux/efi.h include/asm-ia64/linux/efi.h efi.h
  60.251 +#cp_patch include/linux/init_task.h include/asm-ia64/linux/init_task.h init_task.h
  60.252 +cp_patch include/linux/interrupt.h include/asm-ia64/linux/interrupt.h interrupt.h
  60.253 +cp_patch include/linux/mmzone.h include/asm-ia64/linux/mmzone.h mmzone.h
  60.254 +
  60.255 +cp_patch include/linux/wait.h include/asm-ia64/linux/wait.h wait.h
  60.256 +
  60.257 +cp_patch include/linux/slab.h include/asm-ia64/slab.h slab.h
  60.258 +
  60.259 +# following renamed to avoid conflict
  60.260 +cp_patch include/linux/time.h include/xen/linuxtime.h linuxtime.h
  60.261 +
  60.262 +softlink include/linux/bcd.h include/asm-ia64/linux/bcd.h
  60.263 +softlink include/linux/bitmap.h include/asm-ia64/linux/bitmap.h
  60.264 +softlink include/linux/bitops.h include/asm-ia64/linux/bitops.h
  60.265 +softlink include/linux/cpumask.h include/asm-ia64/linux/cpumask.h
  60.266 +softlink include/linux/dma-mapping.h include/asm-ia64/linux/dma-mapping.h
  60.267 +softlink include/linux/gfp.h include/asm-ia64/linux/gfp.h
  60.268 +softlink include/linux/initrd.h include/asm-ia64/linux/initrd.h
  60.269 +softlink include/linux/kmalloc_sizes.h include/asm-ia64/linux/kmalloc_sizes.h
  60.270 +softlink include/linux/linkage.h include/asm-ia64/linux/linkage.h
  60.271 +softlink include/linux/numa.h include/asm-ia64/linux/numa.h
  60.272 +softlink include/linux/page-flags.h include/asm-ia64/linux/page-flags.h
  60.273 +softlink include/linux/percpu.h include/asm-ia64/linux/percpu.h
  60.274 +softlink include/linux/preempt.h include/asm-ia64/linux/preempt.h
  60.275 +softlink include/linux/rbtree.h include/asm-ia64/linux/rbtree.h
  60.276 +softlink include/linux/rwsem.h include/asm-ia64/linux/rwsem.h
  60.277 +softlink include/linux/seq_file.h include/asm-ia64/linux/seq_file.h
  60.278 +softlink include/linux/serial_core.h include/asm-ia64/linux/serial_core.h
  60.279 +softlink include/linux/stddef.h include/asm-ia64/linux/stddef.h
  60.280 +softlink include/linux/thread_info.h include/asm-ia64/linux/thread_info.h
  60.281 +softlink include/linux/threads.h include/asm-ia64/linux/threads.h
  60.282 +softlink include/linux/timex.h include/asm-ia64/linux/timex.h
  60.283 +softlink include/linux/topology.h include/asm-ia64/linux/topology.h
  60.284 +softlink include/linux/tty.h include/asm-ia64/linux/tty.h
  60.285 +
  60.286 +null include/asm-ia64/linux/file.h
  60.287 +null include/asm-ia64/linux/module.h
  60.288 +null include/asm-ia64/linux/swap.h
  60.289 +
  60.290 +softlink include/linux/byteorder/generic.h include/asm-ia64/linux/byteorder/generic.h
  60.291 +softlink include/linux/byteorder/little_endian.h include/asm-ia64/linux/byteorder/little_endian.h
  60.292 +softlink include/linux/byteorder/swab.h include/asm-ia64/linux/byteorder/swab.h
  60.293 +
    61.1 --- a/xen/arch/ia64/vcpu.c	Mon Feb 14 11:42:11 2005 +0000
    61.2 +++ b/xen/arch/ia64/vcpu.c	Mon Feb 14 12:25:30 2005 +0000
    61.3 @@ -7,6 +7,7 @@
    61.4   */
    61.5  
    61.6  #include <linux/sched.h>
    61.7 +#include <public/arch-ia64.h>
    61.8  #include <asm/ia64_int.h>
    61.9  #include <asm/vcpu.h>
   61.10  #include <asm/regionreg.h>
   61.11 @@ -15,7 +16,7 @@
   61.12  #include <asm/delay.h>
   61.13  
   61.14  typedef	union {
   61.15 -	struct ia64_psr;
   61.16 +	struct ia64_psr ia64_psr;
   61.17  	unsigned long i64;
   61.18  } PSR;
   61.19  
   61.20 @@ -23,8 +24,8 @@ typedef	union {
   61.21  //typedef struct domain VCPU;
   61.22  
   61.23  // this def for vcpu_regs won't work if kernel stack is present
   61.24 -#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->regs)
   61.25 -#define	PSCB(x)	x->shared_info->arch
   61.26 +#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
   61.27 +#define	PSCB(x,y)	x->vcpu_info->arch.y
   61.28  
   61.29  #define	TRUE	1
   61.30  #define	FALSE	0
   61.31 @@ -78,7 +79,7 @@ IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64
   61.32  	if (reg == 44) return (vcpu_set_itc(vcpu,val));
   61.33  	if (reg == 27) return (IA64_ILLOP_FAULT);
   61.34  	if (reg > 7) return (IA64_ILLOP_FAULT);
   61.35 -	PSCB(vcpu).krs[reg] = val;
   61.36 +	PSCB(vcpu,krs[reg]) = val;
   61.37  #if 0
   61.38  // for now, privify kr read's so all kr accesses are privileged
   61.39  	switch (reg) {
   61.40 @@ -99,7 +100,7 @@ IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64
   61.41  IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
   61.42  {
   61.43  	if (reg > 7) return (IA64_ILLOP_FAULT);
   61.44 -	*val = PSCB(vcpu).krs[reg];
   61.45 +	*val = PSCB(vcpu,krs[reg]);
   61.46  	return IA64_NO_FAULT;
   61.47  }
   61.48  
   61.49 @@ -110,11 +111,11 @@ IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64
   61.50  void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
   61.51  {
   61.52  	/* only do something if mode changes */
   61.53 -	if (!!newmode ^ !!vcpu->metaphysical_mode) {
   61.54 -		if (newmode) set_metaphysical_rr(0,vcpu->metaphysical_rid);
   61.55 -		else if (PSCB(vcpu).rrs[0] != -1)
   61.56 -			set_one_rr(0, PSCB(vcpu).rrs[0]);
   61.57 -		vcpu->metaphysical_mode = newmode;
   61.58 +	if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
   61.59 +		if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
   61.60 +		else if (PSCB(vcpu,rrs[0]) != -1)
   61.61 +			set_one_rr(0, PSCB(vcpu,rrs[0]));
   61.62 +		PSCB(vcpu,metaphysical_mode) = newmode;
   61.63  	}
   61.64  }
   61.65  
   61.66 @@ -129,10 +130,10 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
   61.67  	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   61.68  	imm = *(struct ia64_psr *)&imm24;
   61.69  	// interrupt flag
   61.70 -	if (imm.i) PSCB(vcpu).interrupt_delivery_enabled = 0;
   61.71 -	if (imm.ic)  PSCB(vcpu).interrupt_collection_enabled = 0;
   61.72 +	if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
   61.73 +	if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 0;
   61.74  	// interrupt collection flag
   61.75 -	//if (imm.ic) PSCB(vcpu).interrupt_delivery_enabled = 0;
   61.76 +	//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
   61.77  	// just handle psr.up and psr.pp for now
   61.78  	if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
   61.79  		| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
   61.80 @@ -170,13 +171,13 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
   61.81  	if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
   61.82  	if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
   61.83  	if (imm.i) {
   61.84 -		if (!PSCB(vcpu).interrupt_delivery_enabled) {
   61.85 +		if (!PSCB(vcpu,interrupt_delivery_enabled)) {
   61.86  //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
   61.87  			enabling_interrupts = 1;
   61.88  		}
   61.89 -		PSCB(vcpu).interrupt_delivery_enabled = 1;
   61.90 +		PSCB(vcpu,interrupt_delivery_enabled) = 1;
   61.91  	}
   61.92 -	if (imm.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
   61.93 +	if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
   61.94  	// TODO: do this faster
   61.95  	if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   61.96  	if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
   61.97 @@ -218,11 +219,11 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UIN
   61.98  	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   61.99  	if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
  61.100  	if (newpsr.i) {
  61.101 -		if (!PSCB(vcpu).interrupt_delivery_enabled)
  61.102 +		if (!PSCB(vcpu,interrupt_delivery_enabled))
  61.103  			enabling_interrupts = 1;
  61.104 -		PSCB(vcpu).interrupt_delivery_enabled = 1;
  61.105 +		PSCB(vcpu,interrupt_delivery_enabled) = 1;
  61.106  	}
  61.107 -	if (newpsr.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
  61.108 +	if (newpsr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
  61.109  	if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
  61.110  	if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
  61.111  	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
  61.112 @@ -253,9 +254,9 @@ IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT6
  61.113  	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
  61.114  	newpsr = *(struct ia64_psr *)&psr;
  61.115  	if (newpsr.cpl == 2) newpsr.cpl = 0;
  61.116 -	if (PSCB(vcpu).interrupt_delivery_enabled) newpsr.i = 1;
  61.117 +	if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
  61.118  	else newpsr.i = 0;
  61.119 -	if (PSCB(vcpu).interrupt_collection_enabled) newpsr.ic = 1;
  61.120 +	if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
  61.121  	else newpsr.ic = 0;
  61.122  	*pval = *(unsigned long *)&newpsr;
  61.123  	return IA64_NO_FAULT;
  61.124 @@ -263,28 +264,28 @@ IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT6
  61.125  
  61.126  BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
  61.127  {
  61.128 -	return !!PSCB(vcpu).interrupt_collection_enabled;
  61.129 +	return !!PSCB(vcpu,interrupt_collection_enabled);
  61.130  }
  61.131  
  61.132  BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
  61.133  {
  61.134 -	return !!PSCB(vcpu).interrupt_delivery_enabled;
  61.135 +	return !!PSCB(vcpu,interrupt_delivery_enabled);
  61.136  }
  61.137  
  61.138  UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
  61.139  {
  61.140 -	UINT64 dcr = PSCB(vcpu).dcr;
  61.141 +	UINT64 dcr = PSCB(vcpu,dcr);
  61.142  	PSR psr = {0};
  61.143  	
  61.144  	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
  61.145  	psr.i64 = prevpsr;
  61.146 -	psr.be = 0; if (dcr & IA64_DCR_BE) psr.be = 1;
  61.147 -	psr.pp = 0; if (dcr & IA64_DCR_PP) psr.pp = 1;
  61.148 -	psr.ic = PSCB(vcpu).interrupt_collection_enabled;
  61.149 -	psr.i = PSCB(vcpu).interrupt_delivery_enabled;
  61.150 -	psr.bn = PSCB(vcpu).banknum;
  61.151 -	psr.dt = 1; psr.it = 1; psr.rt = 1;
  61.152 -	if (psr.cpl == 2) psr.cpl = 0; // !!!! fool domain
  61.153 +	psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
  61.154 +	psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
  61.155 +	psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
  61.156 +	psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
  61.157 +	psr.ia64_psr.bn = PSCB(vcpu,banknum);
  61.158 +	psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
  61.159 +	if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
  61.160  	// psr.pk = 1;
  61.161  	//printf("returns 0x%016lx...",psr.i64);
  61.162  	return psr.i64;
  61.163 @@ -298,22 +299,22 @@ IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT6
  61.164  {
  61.165  extern unsigned long privop_trace;
  61.166  //privop_trace=0;
  61.167 -//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu).iip);
  61.168 +//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
  61.169  	// Reads of cr.dcr on Xen always have the sign bit set, so
  61.170  	// a domain can differentiate whether it is running on SP or not
  61.171 -	*pval = PSCB(vcpu).dcr | 0x8000000000000000L;
  61.172 +	*pval = PSCB(vcpu,dcr) | 0x8000000000000000L;
  61.173  	return (IA64_NO_FAULT);
  61.174  }
  61.175  
  61.176  IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
  61.177  {
  61.178 -	*pval = PSCB(vcpu).iva & ~0x7fffL;
  61.179 +	*pval = PSCB(vcpu,iva) & ~0x7fffL;
  61.180  	return (IA64_NO_FAULT);
  61.181  }
  61.182  
  61.183  IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
  61.184  {
  61.185 -	*pval = PSCB(vcpu).pta;
  61.186 +	*pval = PSCB(vcpu,pta);
  61.187  	return (IA64_NO_FAULT);
  61.188  }
  61.189  
  61.190 @@ -321,13 +322,13 @@ IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT
  61.191  {
  61.192  	//REGS *regs = vcpu_regs(vcpu);
  61.193  	//*pval = regs->cr_ipsr;
  61.194 -	*pval = PSCB(vcpu).ipsr;
  61.195 +	*pval = PSCB(vcpu,ipsr);
  61.196  	return (IA64_NO_FAULT);
  61.197  }
  61.198  
  61.199  IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
  61.200  {
  61.201 -	*pval = PSCB(vcpu).isr;
  61.202 +	*pval = PSCB(vcpu,isr);
  61.203  	return (IA64_NO_FAULT);
  61.204  }
  61.205  
  61.206 @@ -335,13 +336,13 @@ IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT6
  61.207  {
  61.208  	//REGS *regs = vcpu_regs(vcpu);
  61.209  	//*pval = regs->cr_iip;
  61.210 -	*pval = PSCB(vcpu).iip;
  61.211 +	*pval = PSCB(vcpu,iip);
  61.212  	return (IA64_NO_FAULT);
  61.213  }
  61.214  
  61.215  IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
  61.216  {
  61.217 -	UINT64 val = PSCB(vcpu).ifa;
  61.218 +	UINT64 val = PSCB(vcpu,ifa);
  61.219  	*pval = val;
  61.220  	return (IA64_NO_FAULT);
  61.221  }
  61.222 @@ -360,14 +361,14 @@ unsigned long vcpu_get_itir_on_fault(VCP
  61.223  
  61.224  IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
  61.225  {
  61.226 -	UINT64 val = PSCB(vcpu).itir;
  61.227 +	UINT64 val = PSCB(vcpu,itir);
  61.228  	*pval = val;
  61.229  	return (IA64_NO_FAULT);
  61.230  }
  61.231  
  61.232  IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
  61.233  {
  61.234 -	UINT64 val = PSCB(vcpu).iipa;
  61.235 +	UINT64 val = PSCB(vcpu,iipa);
  61.236  	// SP entry code does not save iipa yet nor does it get
  61.237  	//  properly delivered in the pscb
  61.238  	printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
  61.239 @@ -377,23 +378,23 @@ IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT
  61.240  
  61.241  IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
  61.242  {
  61.243 -	//PSCB(vcpu).ifs = PSCB(vcpu)->regs.cr_ifs;
  61.244 -	//*pval = PSCB(vcpu).regs.cr_ifs;
  61.245 -	*pval = PSCB(vcpu).ifs;
  61.246 -	PSCB(vcpu).incomplete_regframe = 0;
  61.247 +	//PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
  61.248 +	//*pval = PSCB(vcpu,regs).cr_ifs;
  61.249 +	*pval = PSCB(vcpu,ifs);
  61.250 +	PSCB(vcpu,incomplete_regframe) = 0;
  61.251  	return (IA64_NO_FAULT);
  61.252  }
  61.253  
  61.254  IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
  61.255  {
  61.256 -	UINT64 val = PSCB(vcpu).iim;
  61.257 +	UINT64 val = PSCB(vcpu,iim);
  61.258  	*pval = val;
  61.259  	return (IA64_NO_FAULT);
  61.260  }
  61.261  
  61.262  IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
  61.263  {
  61.264 -	return vcpu_thash(vcpu,PSCB(vcpu).ifa,pval);
  61.265 +	return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
  61.266  }
  61.267  
  61.268  IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
  61.269 @@ -404,13 +405,13 @@ extern unsigned long privop_trace;
  61.270  	// a domain can differentiate whether it is running on SP or not
  61.271  	// Thus, writes of DCR should ignore the sign bit
  61.272  //verbose("vcpu_set_dcr: called\n");
  61.273 -	PSCB(vcpu).dcr = val & ~0x8000000000000000L;
  61.274 +	PSCB(vcpu,dcr) = val & ~0x8000000000000000L;
  61.275  	return (IA64_NO_FAULT);
  61.276  }
  61.277  
  61.278  IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
  61.279  {
  61.280 -	PSCB(vcpu).iva = val & ~0x7fffL;
  61.281 +	PSCB(vcpu,iva) = val & ~0x7fffL;
  61.282  	return (IA64_NO_FAULT);
  61.283  }
  61.284  
  61.285 @@ -422,25 +423,25 @@ IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT6
  61.286  	}
  61.287  	if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
  61.288  	if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
  61.289 -	PSCB(vcpu).pta = val;
  61.290 +	PSCB(vcpu,pta) = val;
  61.291  	return IA64_NO_FAULT;
  61.292  }
  61.293  
  61.294  IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
  61.295  {
  61.296 -	PSCB(vcpu).ipsr = val;
  61.297 +	PSCB(vcpu,ipsr) = val;
  61.298  	return IA64_NO_FAULT;
  61.299  }
  61.300  
  61.301  IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
  61.302  {
  61.303 -	PSCB(vcpu).isr = val;
  61.304 +	PSCB(vcpu,isr) = val;
  61.305  	return IA64_NO_FAULT;
  61.306  }
  61.307  
  61.308  IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
  61.309  {
  61.310 -	PSCB(vcpu).iip = val;
  61.311 +	PSCB(vcpu,iip) = val;
  61.312  	return IA64_NO_FAULT;
  61.313  }
  61.314  
  61.315 @@ -455,13 +456,13 @@ IA64FAULT vcpu_increment_iip(VCPU *vcpu)
  61.316  
  61.317  IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
  61.318  {
  61.319 -	PSCB(vcpu).ifa = val;
  61.320 +	PSCB(vcpu,ifa) = val;
  61.321  	return IA64_NO_FAULT;
  61.322  }
  61.323  
  61.324  IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
  61.325  {
  61.326 -	PSCB(vcpu).itir = val;
  61.327 +	PSCB(vcpu,itir) = val;
  61.328  	return IA64_NO_FAULT;
  61.329  }
  61.330  
  61.331 @@ -470,26 +471,26 @@ IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT
  61.332  	// SP entry code does not save iipa yet nor does it get
  61.333  	//  properly delivered in the pscb
  61.334  	printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
  61.335 -	PSCB(vcpu).iipa = val;
  61.336 +	PSCB(vcpu,iipa) = val;
  61.337  	return IA64_NO_FAULT;
  61.338  }
  61.339  
  61.340  IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
  61.341  {
  61.342  	//REGS *regs = vcpu_regs(vcpu);
  61.343 -	PSCB(vcpu).ifs = val;
  61.344 +	PSCB(vcpu,ifs) = val;
  61.345  	return IA64_NO_FAULT;
  61.346  }
  61.347  
  61.348  IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
  61.349  {
  61.350 -	PSCB(vcpu).iim = val;
  61.351 +	PSCB(vcpu,iim) = val;
  61.352  	return IA64_NO_FAULT;
  61.353  }
  61.354  
  61.355  IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
  61.356  {
  61.357 -	PSCB(vcpu).iha = val;
  61.358 +	PSCB(vcpu,iha) = val;
  61.359  	return IA64_NO_FAULT;
  61.360  }
  61.361  
  61.362 @@ -503,11 +504,11 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
  61.363  		printf("vcpu_pend_interrupt: bad vector\n");
  61.364  		return;
  61.365  	}
  61.366 -	if (!test_bit(vector,PSCB(vcpu).delivery_mask)) return;
  61.367 -	if (test_bit(vector,PSCB(vcpu).irr)) {
  61.368 +	if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
  61.369 +	if (test_bit(vector,PSCB(vcpu,irr))) {
  61.370  //printf("vcpu_pend_interrupt: overrun\n");
  61.371  	}
  61.372 -	set_bit(vector,PSCB(vcpu).irr);
  61.373 +	set_bit(vector,PSCB(vcpu,irr));
  61.374  }
  61.375  
  61.376  #define	IA64_TPR_MMI	0x10000
  61.377 @@ -523,9 +524,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
  61.378  {
  61.379  	UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
  61.380  
  61.381 -	p = &PSCB(vcpu).irr[3];
  61.382 -	q = &PSCB(vcpu).delivery_mask[3];
  61.383 -	r = &PSCB(vcpu).insvc[3];
  61.384 +	p = &PSCB(vcpu,irr[3]);
  61.385 +	q = &PSCB(vcpu,delivery_mask[3]);
  61.386 +	r = &PSCB(vcpu,insvc[3]);
  61.387  	for (i = 3; ; p--, q--, r--, i--) {
  61.388  		bits = *p & *q;
  61.389  		if (bits) break; // got a potential interrupt
  61.390 @@ -550,12 +551,12 @@ UINT64 vcpu_check_pending_interrupts(VCP
  61.391  //printf("but masked by equal inservice\n");
  61.392  		return SPURIOUS_VECTOR;
  61.393  	}
  61.394 -	if (PSCB(vcpu).tpr & IA64_TPR_MMI) {
  61.395 +	if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
  61.396  		// tpr.mmi is set
  61.397  //printf("but masked by tpr.mmi\n");
  61.398  		return SPURIOUS_VECTOR;
  61.399  	}
  61.400 -	if (((PSCB(vcpu).tpr & IA64_TPR_MIC) + 15) >= vector) {
  61.401 +	if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
  61.402  		//tpr.mic masks class
  61.403  //printf("but masked by tpr.mic\n");
  61.404  		return SPURIOUS_VECTOR;
  61.405 @@ -597,7 +598,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
  61.406  
  61.407  	vector = vcpu_check_pending_interrupts(vcpu);
  61.408  	if (vector == SPURIOUS_VECTOR) {
  61.409 -		PSCB(vcpu).pending_interruption = 0;
  61.410 +		PSCB(vcpu,pending_interruption) = 0;
  61.411  		*pval = vector;
  61.412  		return IA64_NO_FAULT;
  61.413  	}
  61.414 @@ -613,16 +614,16 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
  61.415  	i = vector >> 6;
  61.416  	mask = 1L << (vector & 0x3f);
  61.417  //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
  61.418 -	PSCB(vcpu).insvc[i] |= mask;
  61.419 -	PSCB(vcpu).irr[i] &= ~mask;
  61.420 -	PSCB(vcpu).pending_interruption--;
  61.421 +	PSCB(vcpu,insvc[i]) |= mask;
  61.422 +	PSCB(vcpu,irr[i]) &= ~mask;
  61.423 +	PSCB(vcpu,pending_interruption)--;
  61.424  	*pval = vector;
  61.425  	return IA64_NO_FAULT;
  61.426  }
  61.427  
  61.428  IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
  61.429  {
  61.430 -	*pval = PSCB(vcpu).tpr;
  61.431 +	*pval = PSCB(vcpu,tpr);
  61.432  	return (IA64_NO_FAULT);
  61.433  }
  61.434  
  61.435 @@ -678,19 +679,19 @@ IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT
  61.436  
  61.437  IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
  61.438  {
  61.439 -	*pval = PSCB(vcpu).itv;
  61.440 +	*pval = PSCB(vcpu,itv);
  61.441  	return (IA64_NO_FAULT);
  61.442  }
  61.443  
  61.444  IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
  61.445  {
  61.446 -	*pval = PSCB(vcpu).pmv;
  61.447 +	*pval = PSCB(vcpu,pmv);
  61.448  	return (IA64_NO_FAULT);
  61.449  }
  61.450  
  61.451  IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
  61.452  {
  61.453 -	*pval = PSCB(vcpu).cmcv;
  61.454 +	*pval = PSCB(vcpu,cmcv);
  61.455  	return (IA64_NO_FAULT);
  61.456  }
  61.457  
  61.458 @@ -719,7 +720,7 @@ IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT6
  61.459  IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
  61.460  {
  61.461  	if (val & 0xff00) return IA64_RSVDREG_FAULT;
  61.462 -	PSCB(vcpu).tpr = val;
  61.463 +	PSCB(vcpu,tpr) = val;
  61.464  	return (IA64_NO_FAULT);
  61.465  }
  61.466  
  61.467 @@ -728,7 +729,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT6
  61.468  	UINT64 *p, bits, vec, bitnum;
  61.469  	int i;
  61.470  
  61.471 -	p = &PSCB(vcpu).insvc[3];
  61.472 +	p = &PSCB(vcpu,insvc[3]);
  61.473  	for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
  61.474  	if (i < 0) {
  61.475  		printf("Trying to EOI interrupt when none are in-service.\r\n");
  61.476 @@ -740,7 +741,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT6
  61.477  	bits &= ~(1L << bitnum);
  61.478  	*p = bits;
  61.479  	/* clearing an eoi bit may unmask another pending interrupt... */
  61.480 -	if (PSCB(vcpu).interrupt_delivery_enabled) { // but only if enabled...
  61.481 +	if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
  61.482  		// worry about this later... Linux only calls eoi
  61.483  		// with interrupts disabled
  61.484  		printf("Trying to EOI interrupt with interrupts enabled\r\n");
  61.485 @@ -775,10 +776,10 @@ IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT6
  61.486  extern unsigned long privop_trace;
  61.487  //privop_trace=1;
  61.488  	if (val & 0xef00) return (IA64_ILLOP_FAULT);
  61.489 -	PSCB(vcpu).itv = val;
  61.490 +	PSCB(vcpu,itv) = val;
  61.491  	if (val & 0x10000) {
  61.492 -printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu).domain_itm);
  61.493 -		PSCB(vcpu).domain_itm = 0;
  61.494 +printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu,domain_itm));
  61.495 +		PSCB(vcpu,domain_itm) = 0;
  61.496  	}
  61.497  	else vcpu_enable_timer(vcpu,1000000L);
  61.498  	return (IA64_NO_FAULT);
  61.499 @@ -787,14 +788,14 @@ printf("**** vcpu_set_itv(%d): vitm=%lx,
  61.500  IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
  61.501  {
  61.502  	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
  61.503 -	PSCB(vcpu).pmv = val;
  61.504 +	PSCB(vcpu,pmv) = val;
  61.505  	return (IA64_NO_FAULT);
  61.506  }
  61.507  
  61.508  IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
  61.509  {
  61.510  	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
  61.511 -	PSCB(vcpu).cmcv = val;
  61.512 +	PSCB(vcpu,cmcv) = val;
  61.513  	return (IA64_NO_FAULT);
  61.514  }
  61.515  
  61.516 @@ -804,13 +805,13 @@ Interval timer routines
  61.517  
  61.518  BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
  61.519  {
  61.520 -	UINT64 itv = PSCB(vcpu).itv;
  61.521 +	UINT64 itv = PSCB(vcpu,itv);
  61.522  	return(!itv || !!(itv & 0x10000));
  61.523  }
  61.524  
  61.525  BOOLEAN vcpu_timer_expired(VCPU *vcpu)
  61.526  {
  61.527 -	unsigned long domain_itm = PSCB(vcpu).domain_itm;
  61.528 +	unsigned long domain_itm = PSCB(vcpu,domain_itm);
  61.529  	unsigned long now = ia64_get_itc();
  61.530   
  61.531  	if (domain_itm && (now > domain_itm) &&
  61.532 @@ -836,18 +837,18 @@ void vcpu_safe_set_itm(unsigned long val
  61.533  
  61.534  void vcpu_set_next_timer(VCPU *vcpu)
  61.535  {
  61.536 -	UINT64 d = PSCB(vcpu).domain_itm;
  61.537 -	//UINT64 s = PSCB(vcpu).xen_itm;
  61.538 +	UINT64 d = PSCB(vcpu,domain_itm);
  61.539 +	//UINT64 s = PSCB(vcpu,xen_itm);
  61.540  	UINT64 s = local_cpu_data->itm_next;
  61.541  	UINT64 now = ia64_get_itc();
  61.542 -	//UINT64 interval = PSCB(vcpu).xen_timer_interval;
  61.543 +	//UINT64 interval = PSCB(vcpu,xen_timer_interval);
  61.544  
  61.545  	/* gloss over the wraparound problem for now... we know it exists
  61.546  	 * but it doesn't matter right now */
  61.547  
  61.548  #if 0
  61.549  	/* ensure at least next SP tick is in the future */
  61.550 -	if (!interval) PSCB(vcpu).xen_itm = now +
  61.551 +	if (!interval) PSCB(vcpu,xen_itm) = now +
  61.552  #if 0
  61.553  		(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
  61.554  		 			DEFAULT_CLOCK_RATE);
  61.555 @@ -856,16 +857,16 @@ void vcpu_set_next_timer(VCPU *vcpu)
  61.556  //printf("vcpu_set_next_timer: HACK!\n");
  61.557  #endif
  61.558  #if 0
  61.559 -	if (PSCB(vcpu).xen_itm < now)
  61.560 -		while (PSCB(vcpu).xen_itm < now + (interval>>1))
  61.561 -			PSCB(vcpu).xen_itm += interval;
  61.562 +	if (PSCB(vcpu,xen_itm) < now)
  61.563 +		while (PSCB(vcpu,xen_itm) < now + (interval>>1))
  61.564 +			PSCB(vcpu,xen_itm) += interval;
  61.565  #endif
  61.566  #endif
  61.567  
  61.568 -	if (is_idle_task(vcpu)) {
  61.569 +	if (is_idle_task(vcpu->domain)) {
  61.570  		printf("****** vcpu_set_next_timer called during idle!!\n");
  61.571  	}
  61.572 -	//s = PSCB(vcpu).xen_itm;
  61.573 +	//s = PSCB(vcpu,xen_itm);
  61.574  	if (d && (d > now) && (d < s)) {
  61.575  		vcpu_safe_set_itm(d);
  61.576  		//using_domain_as_itm++;
  61.577 @@ -879,11 +880,11 @@ void vcpu_set_next_timer(VCPU *vcpu)
  61.578  // parameter is a time interval specified in cycles
  61.579  void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
  61.580  {
  61.581 -    PSCB(vcpu).xen_timer_interval = cycles;
  61.582 +    PSCB(vcpu,xen_timer_interval) = cycles;
  61.583      vcpu_set_next_timer(vcpu);
  61.584      printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
  61.585 -             PSCB(vcpu).xen_timer_interval);
  61.586 -    __set_bit(PSCB(vcpu).itv, PSCB(vcpu).delivery_mask);
  61.587 +             PSCB(vcpu,xen_timer_interval));
  61.588 +    __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
  61.589  }
  61.590  
  61.591  IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
  61.592 @@ -892,7 +893,7 @@ IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT6
  61.593  
  61.594  	//if (val < now) val = now + 1000;
  61.595  //printf("*** vcpu_set_itm: called with %lx\n",val);
  61.596 -	PSCB(vcpu).domain_itm = val;
  61.597 +	PSCB(vcpu,domain_itm) = val;
  61.598  	vcpu_set_next_timer(vcpu);
  61.599  	return (IA64_NO_FAULT);
  61.600  }
  61.601 @@ -901,7 +902,7 @@ IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT6
  61.602  {
  61.603  	
  61.604  	UINT64 oldnow = ia64_get_itc();
  61.605 -	UINT64 olditm = PSCB(vcpu).domain_itm;
  61.606 +	UINT64 olditm = PSCB(vcpu,domain_itm);
  61.607  	unsigned long d = olditm - oldnow;
  61.608  	unsigned long x = local_cpu_data->itm_next - oldnow;
  61.609  	
  61.610 @@ -910,10 +911,10 @@ IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT6
  61.611  	local_irq_disable();
  61.612  	if (olditm) {
  61.613  printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
  61.614 -		PSCB(vcpu).domain_itm = newnow + d;
  61.615 +		PSCB(vcpu,domain_itm) = newnow + d;
  61.616  	}
  61.617  	local_cpu_data->itm_next = newnow + x;
  61.618 -	d = PSCB(vcpu).domain_itm;
  61.619 +	d = PSCB(vcpu,domain_itm);
  61.620  	x = local_cpu_data->itm_next;
  61.621  	
  61.622  	ia64_set_itc(newnow);
  61.623 @@ -946,7 +947,7 @@ IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT6
  61.624  
  61.625  void vcpu_pend_timer(VCPU *vcpu)
  61.626  {
  61.627 -	UINT64 itv = PSCB(vcpu).itv & 0xff;
  61.628 +	UINT64 itv = PSCB(vcpu,itv) & 0xff;
  61.629  
  61.630  	if (vcpu_timer_disabled(vcpu)) return;
  61.631  	vcpu_pend_interrupt(vcpu, itv);
  61.632 @@ -955,9 +956,9 @@ void vcpu_pend_timer(VCPU *vcpu)
  61.633  //FIXME: This is a hack because everything dies if a timer tick is lost
  61.634  void vcpu_poke_timer(VCPU *vcpu)
  61.635  {
  61.636 -	UINT64 itv = PSCB(vcpu).itv & 0xff;
  61.637 +	UINT64 itv = PSCB(vcpu,itv) & 0xff;
  61.638  	UINT64 now = ia64_get_itc();
  61.639 -	UINT64 itm = PSCB(vcpu).domain_itm;
  61.640 +	UINT64 itm = PSCB(vcpu,domain_itm);
  61.641  	UINT64 irr;
  61.642  
  61.643  	if (vcpu_timer_disabled(vcpu)) return;
  61.644 @@ -967,8 +968,8 @@ void vcpu_poke_timer(VCPU *vcpu)
  61.645  		while(1);
  61.646  	}
  61.647  	// using 0xef instead of itv so can get real irr
  61.648 -	if (now > itm && !test_bit(0xefL, PSCB(vcpu).insvc)) {
  61.649 -		if (!test_bit(0xefL,PSCB(vcpu).irr)) {
  61.650 +	if (now > itm && !test_bit(0xefL, PSCB(vcpu,insvc))) {
  61.651 +		if (!test_bit(0xefL,PSCB(vcpu,irr))) {
  61.652  			irr = ia64_getreg(_IA64_REG_CR_IRR3);
  61.653  			if (irr & (1L<<(0xef-0xc0))) return;
  61.654  if (now-itm>0x800000)
  61.655 @@ -985,7 +986,7 @@ Privileged operation emulation routines
  61.656  
  61.657  IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
  61.658  {
  61.659 -	PSCB(vcpu).ifa = ifa;	// privop traps don't set ifa so do it here
  61.660 +	PSCB(vcpu,ifa) = ifa;	// privop traps don't set ifa so do it here
  61.661  	return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
  61.662  }
  61.663  
  61.664 @@ -999,34 +1000,35 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
  61.665  	REGS *regs = vcpu_regs(vcpu);
  61.666  	extern void dorfirfi(void);
  61.667  
  61.668 -	psr.i64 = PSCB(vcpu).ipsr;
  61.669 -	if (psr.cpl < 3) psr.cpl = 2;
  61.670 -	if (psr.i) PSCB(vcpu).interrupt_delivery_enabled = 1;
  61.671 -	int_enable = psr.i;
  61.672 -	if (psr.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
  61.673 -	if (psr.dt && psr.rt && psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
  61.674 +	psr.i64 = PSCB(vcpu,ipsr);
  61.675 +	if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
  61.676 +	if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
  61.677 +	int_enable = psr.ia64_psr.i;
  61.678 +	if (psr.ia64_psr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
  61.679 +	if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
  61.680  	else vcpu_set_metaphysical_mode(vcpu,TRUE);
  61.681 -	psr.ic = 1; psr.i = 1;
  61.682 -	psr.dt = 1; psr.rt = 1; psr.it = 1;
  61.683 -	psr.bn = 1;
  61.684 +	psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
  61.685 +	psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
  61.686 +	psr.ia64_psr.bn = 1;
  61.687  	//psr.pk = 1;  // checking pkeys shouldn't be a problem but seems broken
  61.688 -	if (psr.be) {
  61.689 +	if (psr.ia64_psr.be) {
  61.690  		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
  61.691  		return (IA64_ILLOP_FAULT);
  61.692  	}
  61.693 -	PSCB(vcpu).incomplete_regframe = 0; // is this necessary?
  61.694 -	ifs = PSCB(vcpu).ifs;
  61.695 +	PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
  61.696 +	ifs = PSCB(vcpu,ifs);
  61.697  	//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  61.698  	//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  61.699  	if (ifs & regs->cr_ifs & 0x8000000000000000L) {
  61.700 -#define SI_OFS(x)	((char *)(&PSCB(vcpu).x) - (char *)(vcpu->shared_info))
  61.701 -if (SI_OFS(iip)!=0x150 || SI_OFS(ipsr)!=0x148 || SI_OFS(ifs)!=0x158) {
  61.702 +#define SI_OFS(x)	((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
  61.703 +if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
  61.704  printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
  61.705 +printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
  61.706  while(1);
  61.707  }
  61.708 -		// TODO: validate PSCB(vcpu).iip 
  61.709 -		// TODO: PSCB(vcpu).ipsr = psr;
  61.710 -		PSCB(vcpu).ipsr = psr.i64;
  61.711 +		// TODO: validate PSCB(vcpu,iip) 
  61.712 +		// TODO: PSCB(vcpu,ipsr) = psr;
  61.713 +		PSCB(vcpu,ipsr) = psr.i64;
  61.714  		// now set up the trampoline
  61.715  		regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
  61.716  		__asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
  61.717 @@ -1034,11 +1036,11 @@ while(1);
  61.718  	}
  61.719  	else {
  61.720  		regs->cr_ipsr = psr.i64;
  61.721 -		regs->cr_iip = PSCB(vcpu).iip;
  61.722 +		regs->cr_iip = PSCB(vcpu,iip);
  61.723  	}
  61.724 -	PSCB(vcpu).interrupt_collection_enabled = 1;
  61.725 +	PSCB(vcpu,interrupt_collection_enabled) = 1;
  61.726  	vcpu_bsw1(vcpu);
  61.727 -	PSCB(vcpu).interrupt_delivery_enabled = int_enable;
  61.728 +	PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
  61.729  	return (IA64_NO_FAULT);
  61.730  }
  61.731  
  61.732 @@ -1046,10 +1048,10 @@ IA64FAULT vcpu_cover(VCPU *vcpu)
  61.733  {
  61.734  	REGS *regs = vcpu_regs(vcpu);
  61.735  
  61.736 -	if (!PSCB(vcpu).interrupt_collection_enabled) {
  61.737 -		if (!PSCB(vcpu).incomplete_regframe)
  61.738 -			PSCB(vcpu).ifs = regs->cr_ifs;
  61.739 -		else PSCB(vcpu).incomplete_regframe = 0;
  61.740 +	if (!PSCB(vcpu,interrupt_collection_enabled)) {
  61.741 +		if (!PSCB(vcpu,incomplete_regframe))
  61.742 +			PSCB(vcpu,ifs) = regs->cr_ifs;
  61.743 +		else PSCB(vcpu,incomplete_regframe) = 0;
  61.744  	}
  61.745  	regs->cr_ifs = 0;
  61.746  	return (IA64_NO_FAULT);
  61.747 @@ -1058,7 +1060,7 @@ IA64FAULT vcpu_cover(VCPU *vcpu)
  61.748  IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  61.749  {
  61.750  	extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
  61.751 -	UINT64 pta = PSCB(vcpu).pta;
  61.752 +	UINT64 pta = PSCB(vcpu,pta);
  61.753  	UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
  61.754  	UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
  61.755  	UINT64 Mask = (1L << pta_sz) - 1;
  61.756 @@ -1078,7 +1080,7 @@ IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 
  61.757  
  61.758  	if (VHPT_addr1 == 0xe000000000000000L) {
  61.759  	    printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
  61.760 -		PSCB(vcpu).iip);
  61.761 +		PSCB(vcpu,iip));
  61.762  	    return (IA64_ILLOP_FAULT);
  61.763  	}
  61.764  //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
  61.765 @@ -1103,10 +1105,10 @@ extern unsigned long privop_trace;
  61.766  	if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
  61.767  		mask = (1L << trp->ps) - 1;
  61.768  		*padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
  61.769 -		verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu).iip,*padr);
  61.770 +		verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
  61.771  		return (IA64_NO_FAULT);
  61.772  	}
  61.773 -	verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu).iip);
  61.774 +	verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu,iip));
  61.775  	return vcpu_force_data_miss(vcpu, vadr);
  61.776  }
  61.777  
  61.778 @@ -1198,13 +1200,13 @@ IA64FAULT vcpu_bsw0(VCPU *vcpu)
  61.779  {
  61.780  	REGS *regs = vcpu_regs(vcpu);
  61.781  	unsigned long *r = &regs->r16;
  61.782 -	unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
  61.783 -	unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
  61.784 +	unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
  61.785 +	unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
  61.786  	int i;
  61.787  
  61.788 -	if (PSCB(vcpu).banknum) {
  61.789 +	if (PSCB(vcpu,banknum)) {
  61.790  		for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  61.791 -		PSCB(vcpu).banknum = 0;
  61.792 +		PSCB(vcpu,banknum) = 0;
  61.793  	}
  61.794  	return (IA64_NO_FAULT);
  61.795  }
  61.796 @@ -1213,13 +1215,13 @@ IA64FAULT vcpu_bsw1(VCPU *vcpu)
  61.797  {
  61.798  	REGS *regs = vcpu_regs(vcpu);
  61.799  	unsigned long *r = &regs->r16;
  61.800 -	unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
  61.801 -	unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
  61.802 +	unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
  61.803 +	unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
  61.804  	int i;
  61.805  
  61.806 -	if (!PSCB(vcpu).banknum) {
  61.807 +	if (!PSCB(vcpu,banknum)) {
  61.808  		for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  61.809 -		PSCB(vcpu).banknum = 1;
  61.810 +		PSCB(vcpu,banknum) = 1;
  61.811  	}
  61.812  	return (IA64_NO_FAULT);
  61.813  }
  61.814 @@ -1263,7 +1265,7 @@ unsigned long vcpu_get_rr_ve(VCPU *vcpu,
  61.815  	
  61.816  	ia64_rr rr;
  61.817  
  61.818 -	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  61.819 +	rr.rrval = PSCB(vcpu,rrs[vadr)>>61];
  61.820  	return(rr.ve);
  61.821  }
  61.822  
  61.823 @@ -1273,7 +1275,7 @@ unsigned long vcpu_get_rr_ps(VCPU *vcpu,
  61.824  	
  61.825  	ia64_rr rr;
  61.826  
  61.827 -	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  61.828 +	rr.rrval = PSCB(vcpu,rrs[vadr)>>61];
  61.829  	return(rr.ps);
  61.830  }
  61.831  
  61.832 @@ -1283,7 +1285,7 @@ unsigned long vcpu_get_rr_rid(VCPU *vcpu
  61.833  	
  61.834  	ia64_rr rr;
  61.835  
  61.836 -	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  61.837 +	rr.rrval = PSCB(vcpu,rrs[vadr)>>61];
  61.838  	return(rr.rid);
  61.839  }
  61.840  
  61.841 @@ -1291,7 +1293,7 @@ unsigned long vcpu_get_rr_rid(VCPU *vcpu
  61.842  IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  61.843  {
  61.844  	extern void set_one_rr(UINT64, UINT64);
  61.845 -	PSCB(vcpu).rrs[reg>>61] = val;
  61.846 +	PSCB(vcpu,rrs[reg)>>61] = val;
  61.847  	// warning: set_one_rr() does it "live"
  61.848  	set_one_rr(reg,val);
  61.849  	return (IA64_NO_FAULT);
  61.850 @@ -1299,7 +1301,7 @@ IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64
  61.851  
  61.852  IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  61.853  {
  61.854 -	UINT val = PSCB(vcpu).rrs[reg>>61];
  61.855 +	UINT val = PSCB(vcpu,rrs[reg)>>61];
  61.856  	*pval = val;
  61.857  	return (IA64_NO_FAULT);
  61.858  }
  61.859 @@ -1379,9 +1381,9 @@ TR_ENTRY *match_tr(VCPU *vcpu, unsigned 
  61.860  {
  61.861  	TR_ENTRY *trp;
  61.862  
  61.863 -	trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.dtrs,ifa,NDTRS);
  61.864 +	trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.dtrs,ifa,NDTRS);
  61.865  	if (trp) return trp;
  61.866 -	trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.itrs,ifa,NITRS);
  61.867 +	trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.itrs,ifa,NITRS);
  61.868  	if (trp) return trp;
  61.869  	return 0;
  61.870  }
  61.871 @@ -1392,7 +1394,7 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 
  61.872  	TR_ENTRY *trp;
  61.873  
  61.874  	if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
  61.875 -	trp = &PSCB(vcpu).dtrs[slot];
  61.876 +	trp = &PSCB(vcpu,dtrs[slot]);
  61.877  	vcpu_set_tr_entry(trp,pte,itir,ifa);
  61.878  	return IA64_NO_FAULT;
  61.879  }
  61.880 @@ -1403,7 +1405,7 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
  61.881  	TR_ENTRY *trp;
  61.882  
  61.883  	if (slot >= NITRS) return IA64_RSVDREG_FAULT;
  61.884 -	trp = &PSCB(vcpu).itrs[slot];
  61.885 +	trp = &PSCB(vcpu,itrs[slot]);
  61.886  	vcpu_set_tr_entry(trp,pte,itir,ifa);
  61.887  	return IA64_NO_FAULT;
  61.888  }
  61.889 @@ -1414,7 +1416,7 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
  61.890  
  61.891  void foobar(void) { /*vcpu_verbose = 1;*/ }
  61.892  
  61.893 -extern VCPU *dom0;
  61.894 +extern struct domain *dom0;
  61.895  
  61.896  void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
  61.897  {
  61.898 @@ -1427,13 +1429,13 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
  61.899  	ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
  61.900  	ia64_set_psr(psr);
  61.901  	// ia64_srlz_i(); // no srls req'd, will rfi later
  61.902 -	if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu).itlb,pte,logps<<2,vaddr);
  61.903 -	if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu).dtlb,pte,logps<<2,vaddr);
  61.904 +	if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,logps<<2,vaddr);
  61.905 +	if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,logps<<2,vaddr);
  61.906  }
  61.907  
  61.908  TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
  61.909  {
  61.910 -	return vcpu_match_tr_entry(vcpu,&vcpu->shared_info->arch.dtlb,ifa,1);
  61.911 +	return vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1);
  61.912  }
  61.913  
  61.914  IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  61.915 @@ -1482,7 +1484,7 @@ IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vad
  61.916  {
  61.917  	UINT64 mpaddr;
  61.918  	IA64FAULT fault;
  61.919 -	unsigned long lookup_domain_mpa(VCPU *,unsigned long);
  61.920 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
  61.921  	unsigned long pteval, dom_imva;
  61.922  
  61.923  	fault = vcpu_tpa(vcpu, vadr, &mpaddr);
  61.924 @@ -1494,7 +1496,7 @@ IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vad
  61.925  				printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
  61.926  			}
  61.927  		}
  61.928 -		pteval = lookup_domain_mpa(vcpu,mpaddr);
  61.929 +		pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
  61.930  		if (pteval) {
  61.931  			dom_imva = __va(pteval & _PFN_MASK);
  61.932  			ia64_fc(dom_imva);
  61.933 @@ -1518,8 +1520,8 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 
  61.934  	// FIXME: When VHPT is in place, flush that too!
  61.935  	local_flush_tlb_all();
  61.936  	// just invalidate the "whole" tlb
  61.937 -	vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
  61.938 -	vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
  61.939 +	vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
  61.940 +	vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
  61.941  	return IA64_NO_FAULT;
  61.942  }
  61.943  
  61.944 @@ -1536,8 +1538,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
  61.945  	// if (Xen address) return(IA64_ILLOP_FAULT);
  61.946  	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
  61.947  	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
  61.948 -	vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
  61.949 -	vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
  61.950 +	vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
  61.951 +	vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
  61.952  	return IA64_NO_FAULT;
  61.953  }
  61.954  
  61.955 @@ -1555,5 +1557,5 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v
  61.956  
  61.957  void vcpu_set_regs(VCPU *vcpu, REGS *regs)
  61.958  {
  61.959 -	vcpu->regs = regs;
  61.960 +	vcpu->arch.regs = regs;
  61.961  }
    62.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    62.2 +++ b/xen/arch/ia64/vhpt.c	Mon Feb 14 12:25:30 2005 +0000
    62.3 @@ -0,0 +1,70 @@
    62.4 +/*
    62.5 + * Initialize VHPT support.
    62.6 + *
    62.7 + * Copyright (C) 2004 Hewlett-Packard Co
    62.8 + *	Dan Magenheimer <dan.magenheimer@hp.com>
    62.9 + */
   62.10 +#include <linux/config.h>
   62.11 +#include <linux/kernel.h>
   62.12 +#include <linux/init.h>
   62.13 +
   62.14 +#include <asm/processor.h>
   62.15 +#include <asm/system.h>
   62.16 +#include <asm/pgalloc.h>
   62.17 +#include <asm/page.h>
   62.18 +#include <asm/dma.h>
   62.19 +#include <asm/vhpt.h>
   62.20 +
   62.21 +unsigned long vhpt_paddr, vhpt_pend, vhpt_pte;
   62.22 +
   62.23 +void vhpt_flush(void)
   62.24 +{
   62.25 +	struct vhpt_lf_entry *v = (void *)VHPT_ADDR;
   62.26 +	int i;
   62.27 +
   62.28 +	for (i = 0; i < VHPT_CACHE_NUM_ENTRIES; i++, v++) {
   62.29 +		v->itir = 0;
   62.30 +		v->CChain = 0;
   62.31 +		v->page_flags = 0;
   62.32 +		v->ti_tag = INVALID_TI_TAG;
   62.33 +	}
   62.34 +	// initialize cache too???
   62.35 +}
   62.36 +
   62.37 +void vhpt_map(void)
   62.38 +{
   62.39 +	unsigned long psr;
   62.40 +
   62.41 +	psr = ia64_clear_ic();
   62.42 +	ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2);
   62.43 +	ia64_set_psr(psr);
   62.44 +	ia64_srlz_i();
   62.45 +}
   62.46 +
   62.47 +void vhpt_init(void)
   62.48 +{
   62.49 +	unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
   62.50 +	extern unsigned long __alloc_bootmem(unsigned long, unsigned long, unsigned long);
   62.51 +#if !VHPT_ENABLED
   62.52 +	return;
   62.53 +#endif
   62.54 +	// allocate a huge chunk of physical memory.... how???
   62.55 +	vhpt_total_size = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   62.56 +	vhpt_alignment = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   62.57 +	printf("vhpt_init: vhpt size=%p, align=%p\n",vhpt_total_size,vhpt_alignment);
   62.58 +	vhpt_imva = __alloc_bootmem(vhpt_total_size,vhpt_alignment,
   62.59 +		__pa(MAX_DMA_ADDRESS));
   62.60 +	if (!vhpt_imva) {
   62.61 +		printf("vhpt_init: can't allocate VHPT!\n");
   62.62 +		while(1);
   62.63 +	}
   62.64 +	vhpt_paddr = __pa(vhpt_imva);
   62.65 +	vhpt_pend = vhpt_paddr + vhpt_total_size - 1;
   62.66 +	printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend);
   62.67 +	vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL));
   62.68 +	vhpt_map();
   62.69 +	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
   62.70 +		VHPT_ENABLED);
   62.71 +	vhpt_flush();
   62.72 +}
   62.73 +
    63.1 --- a/xen/arch/ia64/xenasm.S	Mon Feb 14 11:42:11 2005 +0000
    63.2 +++ b/xen/arch/ia64/xenasm.S	Mon Feb 14 12:25:30 2005 +0000
    63.3 @@ -11,6 +11,9 @@
    63.4  #include <asm/pgtable.h>
    63.5  #include <asm/vhpt.h>
    63.6  
    63.7 +#if 0
    63.8 +// FIXME: there's gotta be a better way...
    63.9 +// ski and spaski are different... moved to xenmisc.c
   63.10  #define RunningOnHpSki(rx,ry,pn) 			\
   63.11  	addl rx = 2, r0; 				\
   63.12  	addl ry = 3, r0; 				\
   63.13 @@ -22,7 +25,7 @@
   63.14  	;; 						\
   63.15  	(pn) movl rx = 0x7000004 ; 			\
   63.16  	;; 						\
   63.17 -	(pn) cmp.eq pn,p0 = ry, rx; 			\
   63.18 +	(pn) cmp.ge pn,p0 = ry, rx; 			\
   63.19  	;;
   63.20  
   63.21  //int platform_is_hp_ski(void)
   63.22 @@ -32,9 +35,12 @@ GLOBAL_ENTRY(platform_is_hp_ski)
   63.23  (p8)	mov r8 = 1
   63.24  	br.ret.sptk.many b0
   63.25  END(platform_is_hp_ski)
   63.26 +#endif
   63.27  
   63.28  // Change rr7 to the passed value while ensuring
   63.29 -// Xen is mapped into the new region
   63.30 +// Xen is mapped into the new region.
   63.31 +//   in0: new rr7 value
   63.32 +//   in1: Xen virtual address of shared info (to be pinned)
   63.33  #define PSR_BITS_TO_CLEAR						\
   63.34  	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT |		\
   63.35  	 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |	\
   63.36 @@ -45,7 +51,7 @@ END(platform_is_hp_ski)
   63.37  GLOBAL_ENTRY(ia64_new_rr7)
   63.38  	// not sure this unwind statement is correct...
   63.39  	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
   63.40 -	alloc loc1 = ar.pfs, 1, 7, 0, 0
   63.41 +	alloc loc1 = ar.pfs, 2, 7, 0, 0
   63.42  1:	{
   63.43  	  mov r28  = in0		// copy procedure index
   63.44  	  mov r8   = ip			// save ip to compute branch
   63.45 @@ -62,7 +68,7 @@ 1:	{
   63.46  	tpa loc6=loc6			// grab this BEFORE changing rr7
   63.47  	;;
   63.48  #endif
   63.49 -	movl loc5=SHAREDINFO_ADDR
   63.50 +	mov loc5=in1
   63.51  	;;
   63.52  	tpa loc5=loc5			// grab this BEFORE changing rr7
   63.53  	;;
   63.54 @@ -272,9 +278,9 @@ GLOBAL_ENTRY(__get_domain_bundle)
   63.55  END(__get_domain_bundle)
   63.56  
   63.57  GLOBAL_ENTRY(dorfirfi)
   63.58 -#define SI_CR_IIP_OFFSET 0x150
   63.59 -#define SI_CR_IPSR_OFFSET 0x148
   63.60 -#define SI_CR_IFS_OFFSET 0x158
   63.61 +#define SI_CR_IIP_OFFSET 0x10
   63.62 +#define SI_CR_IPSR_OFFSET 0x08
   63.63 +#define SI_CR_IFS_OFFSET 0x18
   63.64          movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
   63.65          movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
   63.66          movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
    64.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    64.2 +++ b/xen/arch/ia64/xenmisc.c	Mon Feb 14 12:25:30 2005 +0000
    64.3 @@ -0,0 +1,239 @@
    64.4 +/*
    64.5 + * Xen misc
    64.6 + * 
    64.7 + * Functions/decls that are/may be needed to link with Xen because
    64.8 + * of x86 dependencies
    64.9 + *
   64.10 + * Copyright (C) 2004 Hewlett-Packard Co.
   64.11 + *	Dan Magenheimer (dan.magenheimer@hp.com)
   64.12 + *
   64.13 + */
   64.14 +
   64.15 +#include <linux/config.h>
   64.16 +#include <xen/sched.h>
   64.17 +#include <linux/efi.h>
   64.18 +#include <asm/processor.h>
   64.19 +#include <xen/serial.h>
   64.20 +#include <asm/io.h>
   64.21 +
   64.22 +efi_memory_desc_t ia64_efi_io_md;
   64.23 +EXPORT_SYMBOL(ia64_efi_io_md);
   64.24 +unsigned long wait_init_idle;
   64.25 +int phys_proc_id[NR_CPUS];
   64.26 +unsigned long loops_per_jiffy = (1<<12);	// from linux/init/main.c
   64.27 +
   64.28 +unsigned int watchdog_on = 0;	// from arch/x86/nmi.c ?!?
   64.29 +
   64.30 +void unw_init(void) { printf("unw_init() skipped (NEED FOR KERNEL UNWIND)\n"); }
   64.31 +void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
   64.32 +void hpsim_setup(char **x) { printf("hpsim_setup() skipped (MAY NEED FOR CONSOLE INPUT!!!)\n"); }	
   64.33 +
   64.34 +long
   64.35 +is_platform_hp_ski(void)
   64.36 +{
   64.37 +	int i;
   64.38 +	long cpuid[6];
   64.39 +
   64.40 +	for (i = 0; i < 5; ++i)
   64.41 +		cpuid[i] = ia64_get_cpuid(i);
   64.42 +	if ((cpuid[0] & 0xff) != 'H') return 0;
   64.43 +	if ((cpuid[3] & 0xff) != 0x4) return 0;
   64.44 +	if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
   64.45 +	if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
   64.46 +	if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
   64.47 +	return 1;
   64.48 +}
   64.49 +
   64.50 +long
   64.51 +platform_is_hp_ski(void)
   64.52 +{
   64.53 +	extern long running_on_sim;
   64.54 +	return running_on_sim;
   64.55 +}
   64.56 +
   64.57 +/* calls in xen/common code that are unused on ia64 */
   64.58 +void synchronise_pagetables(unsigned long cpu_mask) { return; }
   64.59 +
   64.60 +int grant_table_create(struct domain *d) { return 0; }
   64.61 +void grant_table_destroy(struct domain *d)
   64.62 +{
   64.63 +	printf("grant_table_destroy: domain_destruct not tested!!!\n");
   64.64 +	printf("grant_table_destroy: ensure atomic_* calls work in domain_destruct!!\n");
   64.65 +	dummy();
   64.66 +	return;
   64.67 +}
   64.68 +
   64.69 +struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); }
   64.70 +
   64.71 +void cleanup_writable_pagetable(struct domain *d, int what) { return; }
   64.72 +
   64.73 +///////////////////////////////
   64.74 +// from arch/x86/apic.c
   64.75 +///////////////////////////////
   64.76 +
   64.77 +int reprogram_ac_timer(s_time_t timeout)
   64.78 +{
   64.79 +	return 1;
   64.80 +}
   64.81 +
   64.82 +///////////////////////////////
   64.83 +// from arch/x86/dompage.c
   64.84 +///////////////////////////////
   64.85 +
   64.86 +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
   64.87 +{
   64.88 +	printf("alloc_domheap_pages: called, not implemented\n");
   64.89 +}
   64.90 +
   64.91 +void free_domheap_pages(struct pfn_info *pg, unsigned int order)
   64.92 +{
   64.93 +	printf("free_domheap_pages: called, not implemented\n");
   64.94 +}
   64.95 +
   64.96 +
   64.97 +unsigned long avail_domheap_pages(void)
   64.98 +{
   64.99 +	printf("avail_domheap_pages: called, not implemented\n");
  64.100 +	return 0;
  64.101 +}
  64.102 +
  64.103 +///////////////////////////////
  64.104 +// from arch/x86/flushtlb.c
  64.105 +///////////////////////////////
  64.106 +
  64.107 +u32 tlbflush_clock;
  64.108 +u32 tlbflush_time[NR_CPUS];
  64.109 +
  64.110 +///////////////////////////////
  64.111 +// from arch/x86/memory.c
  64.112 +///////////////////////////////
  64.113 +
  64.114 +void init_percpu_info(void)
  64.115 +{
  64.116 +	dummy();
  64.117 +    //memset(percpu_info, 0, sizeof(percpu_info));
  64.118 +}
  64.119 +
  64.120 +void free_page_type(struct pfn_info *page, unsigned int type)
  64.121 +{
  64.122 +	dummy();
  64.123 +}
  64.124 +
  64.125 +///////////////////////////////
  64.126 +// from arch/x86/pci.c
  64.127 +///////////////////////////////
  64.128 +
  64.129 +int
  64.130 +pcibios_prep_mwi (struct pci_dev *dev)
  64.131 +{
  64.132 +	dummy();
  64.133 +}
  64.134 +
  64.135 +///////////////////////////////
  64.136 +// from arch/x86/pci-irq.c
  64.137 +///////////////////////////////
  64.138 +
  64.139 +void pcibios_enable_irq(struct pci_dev *dev)
  64.140 +{
  64.141 +	dummy();
  64.142 +}
  64.143 +
  64.144 +///////////////////////////////
  64.145 +// from arch/ia64/pci-pc.c
  64.146 +///////////////////////////////
  64.147 +
  64.148 +#include <xen/pci.h>
  64.149 +
  64.150 +int pcibios_enable_device(struct pci_dev *dev, int mask)
  64.151 +{
  64.152 +	dummy();
  64.153 +	return 0;
  64.154 +}
  64.155 +
  64.156 +int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value) = NULL;
  64.157 +int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value) = NULL;
  64.158 +
  64.159 +//struct pci_fixup pcibios_fixups[] = { { 0 } };
  64.160 +struct pci_fixup pcibios_fixups[] = { { 0 } };
  64.161 +
  64.162 +void
  64.163 +pcibios_align_resource(void *data, struct resource *res,
  64.164 +		       unsigned long size, unsigned long align)
  64.165 +{
  64.166 +	dummy();
  64.167 +}
  64.168 +
  64.169 +void
  64.170 +pcibios_update_resource(struct pci_dev *dev, struct resource *root,
  64.171 +			struct resource *res, int resource)
  64.172 +{
  64.173 +	dummy();
  64.174 +}
  64.175 +
  64.176 +void __devinit  pcibios_fixup_bus(struct pci_bus *b)
  64.177 +{
  64.178 +	dummy();
  64.179 +}
  64.180 +
  64.181 +void __init pcibios_init(void)
  64.182 +{
  64.183 +	dummy();
  64.184 +}
  64.185 +
  64.186 +char * __devinit  pcibios_setup(char *str)
  64.187 +{
  64.188 +	dummy();
  64.189 +	return 0;
  64.190 +}
  64.191 +
  64.192 +///////////////////////////////
  64.193 +// from arch/ia64/traps.c
  64.194 +///////////////////////////////
  64.195 +
  64.196 +void show_registers(struct pt_regs *regs)
  64.197 +{
  64.198 +	dummy();
  64.199 +}	
  64.200 +
  64.201 +///////////////////////////////
  64.202 +// from common/keyhandler.c
  64.203 +///////////////////////////////
  64.204 +void dump_pageframe_info(struct domain *d)
  64.205 +{
  64.206 +	printk("dump_pageframe_info not implemented\n");
  64.207 +}
  64.208 +
  64.209 +///////////////////////////////
  64.210 +// from drivers/char/serial.c
  64.211 +///////////////////////////////
  64.212 +
  64.213 +#include <asm/hpsim_ssc.h>
  64.214 +
  64.215 +int
  64.216 +ia64_serial_putc(unsigned char c)
  64.217 +{
  64.218 +	if (platform_is_hp_ski()) {
  64.219 +		ia64_ssc(c, 0, 0, 0, SSC_PUTCHAR);
  64.220 +	}
  64.221 +	else {
  64.222 +// this is tested on HP Longs Peak platform... it
  64.223 +// will probably work on other Itanium platforms as
  64.224 +// well, but undoubtedly needs work
  64.225 +		longs_peak_putc(c);
  64.226 +	}
  64.227 +	return 1;
  64.228 +}
  64.229 +
  64.230 +///////////////////////////////
  64.231 +// from common/physdev.c
  64.232 +///////////////////////////////
  64.233 +void
  64.234 +physdev_init_dom0(struct domain *d)
  64.235 +{
  64.236 +}
  64.237 +
  64.238 +int
  64.239 +physdev_pci_access_modify(domid_t id, int bus, int dev, int func, int enable)
  64.240 +{
  64.241 +	return -EINVAL;
  64.242 +}
    65.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    65.2 +++ b/xen/arch/ia64/xensetup.c	Mon Feb 14 12:25:30 2005 +0000
    65.3 @@ -0,0 +1,415 @@
    65.4 +/******************************************************************************
    65.5 + * kernel.c
    65.6 + * 
    65.7 + * This file should contain architecture-independent bootstrap and low-level
    65.8 + * help routines. It's a bit x86/PC specific right now!
    65.9 + * 
   65.10 + * Copyright (c) 2002-2003 K A Fraser
   65.11 + */
   65.12 +
   65.13 +//#include <stdarg.h>
   65.14 +#include <xen/config.h>
   65.15 +#include <xen/lib.h>
   65.16 +#include <xen/errno.h>
   65.17 +//#include <xen/spinlock.h>
   65.18 +#include <xen/multiboot.h>
   65.19 +#include <xen/sched.h>
   65.20 +#include <xen/mm.h>
   65.21 +//#include <xen/delay.h>
   65.22 +#include <xen/compile.h>
   65.23 +//#include <xen/console.h>
   65.24 +//#include <xen/serial.h>
   65.25 +#include <xen/trace.h>
   65.26 +//#include <asm/shadow.h>
   65.27 +//#include <asm/io.h>
   65.28 +//#include <asm/uaccess.h>
   65.29 +//#include <asm/do