ia64/xen-unstable

changeset 6017:f294acb25858

MErge.
author kaf24@firebug.cl.cam.ac.uk
date Wed Aug 03 09:35:38 2005 +0000 (2005-08-03)
parents 63995acdd34a 04dfb5158f3a
children d9442abaa980
files .hgignore Config.mk Makefile buildconfigs/Rules.mk buildconfigs/mk.linux-2.6-xen buildconfigs/mk.linux-2.6-xen0 buildconfigs/mk.linux-2.6-xenU docs/misc/shype4xen_readme.txt docs/src/user.tex linux-2.4-xen-sparse/arch/xen/Makefile linux-2.4-xen-sparse/arch/xen/config.in linux-2.4-xen-sparse/arch/xen/kernel/time.c linux-2.4-xen-sparse/include/asm-xen/bugs.h linux-2.4-xen-sparse/include/asm-xen/fixmap.h linux-2.4-xen-sparse/include/asm-xen/highmem.h linux-2.4-xen-sparse/include/asm-xen/hw_irq.h linux-2.4-xen-sparse/include/asm-xen/io.h linux-2.4-xen-sparse/include/asm-xen/irq.h linux-2.4-xen-sparse/include/asm-xen/mmu_context.h linux-2.4-xen-sparse/include/asm-xen/page.h linux-2.4-xen-sparse/include/asm-xen/pci.h linux-2.4-xen-sparse/include/asm-xen/pgalloc.h linux-2.4-xen-sparse/include/asm-xen/pgtable.h linux-2.4-xen-sparse/include/asm-xen/processor.h linux-2.4-xen-sparse/include/asm-xen/segment.h linux-2.4-xen-sparse/include/asm-xen/smp.h linux-2.4-xen-sparse/include/asm-xen/system.h linux-2.4-xen-sparse/include/asm-xen/vga.h linux-2.4-xen-sparse/include/linux/blk.h linux-2.4-xen-sparse/include/linux/highmem.h linux-2.4-xen-sparse/include/linux/irq.h linux-2.4-xen-sparse/include/linux/mm.h linux-2.4-xen-sparse/include/linux/sched.h linux-2.4-xen-sparse/include/linux/skbuff.h linux-2.4-xen-sparse/include/linux/timer.h linux-2.4-xen-sparse/mkbuildtree linux-2.6-xen-sparse/arch/xen/Kconfig linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c linux-2.6-xen-sparse/arch/xen/i386/kernel/mpparse.c linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6-xen-sparse/arch/xen/i386/pci/irq.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c linux-2.6-xen-sparse/arch/xen/kernel/reboot.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile linux-2.6-xen-sparse/drivers/xen/Makefile linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c linux-2.6-xen-sparse/include/asm-generic/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/io.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/balloon.h linux-2.6-xen-sparse/include/asm-xen/ctrl_if.h linux-2.6-xen-sparse/include/asm-xen/evtchn.h linux-2.6-xen-sparse/include/asm-xen/hypervisor.h linux-2.6-xen-sparse/include/asm-xen/xenbus.h linux-2.6-xen-sparse/include/linux/mm.h linux-2.6-xen-sparse/kernel/irq/manage.c linux-2.6-xen-sparse/mm/memory.c patches/linux-2.6.12/smp-alts.patch tools/Makefile tools/consoled/io.c tools/consoled/xc_console.c tools/debugger/gdb/gdbbuild tools/debugger/libxendebug/xendebug.c tools/debugger/pdb/Makefile tools/debugger/pdb/PDB.ml tools/debugger/pdb/Process.ml tools/debugger/pdb/Process.mli tools/debugger/pdb/Util.ml tools/debugger/pdb/Xen_domain.ml tools/debugger/pdb/Xen_domain.mli tools/debugger/pdb/debugger.ml tools/debugger/pdb/linux-2.6-module/Makefile tools/debugger/pdb/linux-2.6-module/debug.c tools/debugger/pdb/linux-2.6-module/module.c tools/debugger/pdb/linux-2.6-module/pdb_debug.h tools/debugger/pdb/linux-2.6-module/pdb_module.h tools/debugger/pdb/linux-2.6-patches/Makefile tools/debugger/pdb/linux-2.6-patches/i386_ksyms.patch tools/debugger/pdb/linux-2.6-patches/kdebug.patch tools/debugger/pdb/linux-2.6-patches/makefile.patch tools/debugger/pdb/linux-2.6-patches/ptrace.patch tools/debugger/pdb/linux-2.6-patches/traps.patch tools/debugger/pdb/pdb_caml_process.c tools/debugger/pdb/pdb_caml_xcs.c tools/debugger/pdb/pdb_caml_xen.h tools/debugger/pdb/readme tools/debugger/pdb/server.ml tools/examples/xend-config.sxp tools/firmware/rombios/rombios.c tools/ioemu/monitor.c tools/ioemu/target-i386-dm/qemu-dm.debug tools/ioemu/vnc.c tools/libxc/Makefile tools/libxc/xc.h tools/libxc/xc_domain.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_linux_save.c tools/libxc/xc_load_elf.c tools/libxc/xc_private.c tools/libxc/xc_private.h tools/libxc/xc_ptrace.c tools/misc/Makefile tools/misc/policyprocessor/XmlToBinInterface.java tools/python/setup.py tools/python/xen/lowlevel/xc/xc.c tools/python/xen/lowlevel/xs/xs.c tools/python/xen/lowlevel/xu/xu.c tools/python/xen/sv/CreateDomain.py tools/python/xen/sv/Daemon.py tools/python/xen/sv/DomInfo.py tools/python/xen/sv/DomList.py tools/python/xen/sv/GenTabbed.py tools/python/xen/sv/HTMLBase.py tools/python/xen/sv/Main.py tools/python/xen/sv/MigrateDomain.py tools/python/xen/sv/NodeInfo.py tools/python/xen/sv/RestoreDomain.py tools/python/xen/sv/SaveDomain.py tools/python/xen/sv/TabView.py tools/python/xen/sv/Wizard.py tools/python/xen/sv/__init__.py tools/python/xen/sv/params.py tools/python/xen/sv/util.py tools/python/xen/xend/XendCheckpoint.py tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xend/XendRoot.py tools/python/xen/xend/image.py tools/python/xen/xend/server/SrvDaemon.py tools/python/xen/xend/server/channel.py tools/python/xen/xend/xenstore/xsnode.py tools/python/xen/xm/create.py tools/python/xen/xm/main.py tools/sv/Makefile tools/sv/images/destroy.png tools/sv/images/finish.png tools/sv/images/internet.jpg tools/sv/images/internet.psd tools/sv/images/next.png tools/sv/images/orb_01.jpg tools/sv/images/orb_02.jpg tools/sv/images/pause.png tools/sv/images/previous.png tools/sv/images/reboot.png tools/sv/images/seperator-left-highlight.jpg tools/sv/images/seperator-right-highlight.jpg tools/sv/images/seperator.jpg tools/sv/images/shutdown.png tools/sv/images/small-destroy.png tools/sv/images/small-pause.png tools/sv/images/small-unpause.png tools/sv/images/unpause.png tools/sv/images/xen.png tools/sv/inc/script.js tools/sv/inc/style.css tools/sv/index.psp tools/xcs/xcs.h tools/xcutils/xc_restore.c tools/xenstore/Makefile tools/xenstore/TODO tools/xenstore/testsuite/02directory.sh tools/xenstore/testsuite/07watch.sh tools/xenstore/testsuite/08transaction.sh tools/xenstore/testsuite/09domain.sh tools/xenstore/testsuite/10domain-homedir.sh tools/xenstore/testsuite/11domain-watch.sh tools/xenstore/testsuite/12readonly.sh tools/xenstore/testsuite/13watch-ack.sh tools/xenstore/testsuite/test.sh tools/xenstore/xenstored_core.c tools/xenstore/xenstored_core.h tools/xenstore/xenstored_domain.c tools/xenstore/xenstored_domain.h tools/xenstore/xenstored_transaction.c tools/xenstore/xenstored_transaction.h tools/xenstore/xenstored_watch.c tools/xenstore/xenstored_watch.h tools/xenstore/xs.c tools/xenstore/xs.h tools/xenstore/xs_lib.h tools/xenstore/xs_random.c tools/xenstore/xs_test.c xen/Makefile xen/acm/acm_core.c xen/acm/acm_policy.c xen/arch/ia64/Makefile xen/arch/ia64/asm-offsets.c xen/arch/ia64/asm-xsi-offsets.c xen/arch/ia64/dom_fw.c xen/arch/ia64/domain.c xen/arch/ia64/hypercall.c xen/arch/ia64/hyperprivop.S xen/arch/ia64/mmio.c xen/arch/ia64/pal_emul.c xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c xen/arch/ia64/patch/linux-2.6.11/kregs.h xen/arch/ia64/pcdp.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/vcpu.c xen/arch/ia64/vlsapic.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_hypercall.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_support.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vmx_virt.c xen/arch/ia64/vtlb.c xen/arch/ia64/xenasm.S xen/arch/ia64/xenmem.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/arch/ia64/xentime.c xen/arch/x86/Makefile xen/arch/x86/acpi/boot.c xen/arch/x86/apic.c xen/arch/x86/cpu/common.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/genapic/es7000plat.c xen/arch/x86/i8259.c xen/arch/x86/io_apic.c xen/arch/x86/mm.c xen/arch/x86/mpparse.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/smpboot.c xen/arch/x86/time.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/common/ac_timer.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/grant_table.c xen/common/page_alloc.c xen/common/sched_sedf.c xen/common/symbols.c xen/common/xmalloc.c xen/drivers/char/console.c xen/include/acm/acm_core.h xen/include/acm/acm_hooks.h xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/event.h xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_uaccess.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xensystem.h xen/include/asm-x86/apicdef.h xen/include/asm-x86/config.h xen/include/asm-x86/fixmap.h xen/include/asm-x86/genapic.h xen/include/asm-x86/hpet.h xen/include/asm-x86/mach-bigsmp/mach_apic.h xen/include/asm-x86/mach-default/mach_apic.h xen/include/asm-x86/mach-es7000/mach_apic.h xen/include/asm-x86/mach-generic/mach_apic.h xen/include/asm-x86/mach-summit/mach_apic.h xen/include/asm-x86/mach-summit/mach_mpparse.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_public.h xen/include/asm-x86/time.h xen/include/asm-x86/types.h xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_virpit.h xen/include/asm-x86/vmx_vmcs.h xen/include/public/arch-ia64.h xen/include/public/grant_table.h xen/include/public/io/blkif.h xen/include/public/io/netif.h xen/include/public/xen.h xen/include/xen/ac_timer.h xen/include/xen/mm.h xen/include/xen/sched.h xen/include/xen/symbols.h xen/include/xen/time.h xen/tools/Makefile xen/tools/symbols.c
line diff
     1.1 --- a/xen/arch/ia64/Makefile	Wed Aug 03 09:35:16 2005 +0000
     1.2 +++ b/xen/arch/ia64/Makefile	Wed Aug 03 09:35:38 2005 +0000
     1.3 @@ -34,9 +34,28 @@ default: $(OBJS) head.o ia64lib.o xen.ld
     1.4  		 > $(BASEDIR)/System.map
     1.5  
     1.6  
     1.7 -asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
     1.8 +asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
     1.9  	$(CC) $(CFLAGS) -S -o $@ $<
    1.10  
    1.11 +asm-xsi-offsets.s: asm-xsi-offsets.c 
    1.12 +	$(CC) $(CFLAGS) -S -o $@ $<
    1.13 +	
    1.14 +$(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s
    1.15 +	@(set -e; \
    1.16 +	  echo "/*"; \
    1.17 +	  echo " * DO NOT MODIFY."; \
    1.18 +	  echo " *"; \
    1.19 +	  echo " * This file was auto-generated from $<"; \
    1.20 +	  echo " *"; \
    1.21 +	  echo " */"; \
    1.22 +	  echo ""; \
    1.23 +	  echo "#ifndef __ASM_XSI_OFFSETS_H__"; \
    1.24 +	  echo "#define __ASM_XSI_OFFSETS_H__"; \
    1.25 +	  echo ""; \
    1.26 +	  sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \
    1.27 +	  echo ""; \
    1.28 +	  echo "#endif") <$< >$@
    1.29 +
    1.30  $(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
    1.31  # Need such symbol link to make linux headers available
    1.32  	[ -e $(BASEDIR)/include/linux ] \
    1.33 @@ -60,6 +79,7 @@ ia64lib.o:
    1.34  
    1.35  clean:
    1.36  	rm -f *.o *~ core  xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s
    1.37 +	rm -f asm-xsi-offsets.s $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
    1.38  	rm -f lib/*.o
    1.39  
    1.40  .PHONY: default clean
     2.1 --- a/xen/arch/ia64/asm-offsets.c	Wed Aug 03 09:35:16 2005 +0000
     2.2 +++ b/xen/arch/ia64/asm-offsets.c	Wed Aug 03 09:35:38 2005 +0000
     2.3 @@ -42,29 +42,34 @@ void foo(void)
     2.4  
     2.5  	BLANK();
     2.6  
     2.7 -	DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, arch.interrupt_collection_enabled));
     2.8 -	DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled)));
     2.9 -	DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled));
    2.10 -	DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
    2.11 -	DEFINE(XSI_IFA_OFS, offsetof(vcpu_info_t, arch.ifa));
    2.12 -	DEFINE(XSI_ITIR_OFS, offsetof(vcpu_info_t, arch.itir));
    2.13 -	DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
    2.14 -	DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
    2.15 -	DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
    2.16 -	DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr));
    2.17 -	DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
    2.18 -	DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
    2.19 -	DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
    2.20 -	DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
    2.21 -	DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
    2.22 -	DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
    2.23 -	DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
    2.24 -	DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
    2.25 -	DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
    2.26 -	DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
    2.27 -	DEFINE(XSI_TPR_OFS, offsetof(vcpu_info_t, arch.tpr));
    2.28 -	DEFINE(XSI_PTA_OFS, offsetof (vcpu_info_t, arch.pta));
    2.29 -	DEFINE(XSI_ITV_OFS, offsetof(vcpu_info_t, arch.itv));
    2.30 +	DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled));
    2.31 +	DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
    2.32 +	DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled));
    2.33 +	DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
    2.34 +	DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
    2.35 +	DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
    2.36 +	DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
    2.37 +	DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
    2.38 +	DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
    2.39 +
    2.40 +	DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
    2.41 +	DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
    2.42 +	DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
    2.43 +	DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
    2.44 +	DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
    2.45 +	DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
    2.46 +	DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
    2.47 +	DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
    2.48 +	DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
    2.49 +	DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
    2.50 +	DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
    2.51 +	DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
    2.52 +	DEFINE(XSI_INCOMPL_REG_OFS, offsetof(mapped_regs_t, incomplete_regframe));
    2.53 +	DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
    2.54 +	DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
    2.55 +	DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
    2.56 +	DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
    2.57 +	DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
    2.58  	//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
    2.59  	//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
    2.60  	//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c	Wed Aug 03 09:35:38 2005 +0000
     3.3 @@ -0,0 +1,110 @@
     3.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     3.5 +/*
     3.6 + * asm-xsi-offsets.c_
     3.7 + * Copyright (c) 2005, Intel Corporation.
     3.8 + *      Kun Tian (Kevin Tian) <kevin.tian@intel.com>
     3.9 + *      Eddie Dong  <eddie.dong@intel.com>
    3.10 + *      Fred Yang <fred.yang@intel.com>
    3.11 + *
    3.12 + * This program is free software; you can redistribute it and/or modify it
    3.13 + * under the terms and conditions of the GNU General Public License,
    3.14 + * version 2, as published by the Free Software Foundation.
    3.15 + *
    3.16 + * This program is distributed in the hope it will be useful, but WITHOUT
    3.17 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.18 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    3.19 + * more details.
    3.20 + *
    3.21 + * You should have received a copy of the GNU General Public License along with
    3.22 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    3.23 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    3.24 + *
    3.25 + */
    3.26 +
    3.27 +/*
    3.28 + * Generate definitions needed by assembly language modules.
    3.29 + * This code generates raw asm output which is post-processed
    3.30 + * to extract and format the required data.
    3.31 + */
    3.32 +
    3.33 +#include <xen/config.h>
    3.34 +#include <xen/sched.h>
    3.35 +#include <asm/processor.h>
    3.36 +#include <asm/ptrace.h>
    3.37 +#include <public/xen.h>
    3.38 +#ifdef CONFIG_VTI
    3.39 +#include <asm/tlb.h>
    3.40 +#include <asm/regs.h>
    3.41 +#endif // CONFIG_VTI
    3.42 +
    3.43 +#define task_struct vcpu
    3.44 +
    3.45 +#define DEFINE(sym, val) \
    3.46 +        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
    3.47 +
    3.48 +#define BLANK() asm volatile("\n->" : : )
    3.49 +
    3.50 +#define OFFSET(_sym, _str, _mem) \
    3.51 +    DEFINE(_sym, offsetof(_str, _mem));
    3.52 +
    3.53 +void foo(void)
    3.54 +{
    3.55 +
    3.56 +	DEFINE(XSI_BASE, SHARED_ARCHINFO_ADDR);
    3.57 +
    3.58 +	DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled));
    3.59 +	DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled)));
    3.60 +	DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
    3.61 +	DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
    3.62 +	DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
    3.63 +	DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
    3.64 +	DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
    3.65 +	DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
    3.66 +	DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
    3.67 +	DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, precover_ifs)));
    3.68 +	DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
    3.69 +	DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr)));
    3.70 +	DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
    3.71 +	DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
    3.72 +	DEFINE(XSI_IIPA_OFS, offsetof(mapped_regs_t, iipa));
    3.73 +	DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa)));
    3.74 +	DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
    3.75 +	DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim)));
    3.76 +	DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
    3.77 +	DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr)));
    3.78 +	DEFINE(XSI_IHA_OFS, offsetof(mapped_regs_t, iha));
    3.79 +	DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha)));
    3.80 +	DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
    3.81 +	DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
    3.82 +	DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
    3.83 +	DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv)));
    3.84 +	DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
    3.85 +	DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta)));
    3.86 +	DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled));
    3.87 +	DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
    3.88 +	DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
    3.89 +	DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pending_interruption)));
    3.90 +	DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t, incomplete_regframe));
    3.91 +	DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, incomplete_regframe)));
    3.92 +	DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0]));
    3.93 +	DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, delivery_mask[0])));
    3.94 +	DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
    3.95 +	DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, metaphysical_mode)));
    3.96 +
    3.97 +	DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
    3.98 +	DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, banknum)));
    3.99 +
   3.100 +	DEFINE(XSI_BANK0_R16_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
   3.101 +	DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank0_regs[0])));
   3.102 +	DEFINE(XSI_BANK1_R16_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
   3.103 +	DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank1_regs[0])));
   3.104 +	DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
   3.105 +	DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0])));
   3.106 +	DEFINE(XSI_KR0_OFS, offsetof(mapped_regs_t, krs[0]));
   3.107 +	DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0])));
   3.108 +	DEFINE(XSI_PKR0_OFS, offsetof(mapped_regs_t, pkrs[0]));
   3.109 +	DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0])));
   3.110 +	DEFINE(XSI_TMP0_OFS, offsetof(mapped_regs_t, tmp[0]));
   3.111 +	DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0])));
   3.112 +	
   3.113 +}
     4.1 --- a/xen/arch/ia64/dom_fw.c	Wed Aug 03 09:35:16 2005 +0000
     4.2 +++ b/xen/arch/ia64/dom_fw.c	Wed Aug 03 09:35:38 2005 +0000
     4.3 @@ -291,6 +291,10 @@ xen_pal_emulator(unsigned long index, un
     4.4  	long r11 = 0;
     4.5  	long status = -1;
     4.6  
     4.7 +#define USE_PAL_EMULATOR
     4.8 +#ifdef USE_PAL_EMULATOR
     4.9 +	return pal_emulator_static(index);
    4.10 +#endif
    4.11  	if (running_on_sim) return pal_emulator_static(index);
    4.12  	if (index >= PAL_COPY_PAL) {
    4.13  		printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %d!!!!\n",
    4.14 @@ -314,12 +318,10 @@ xen_pal_emulator(unsigned long index, un
    4.15  		break;
    4.16  	    case PAL_PTCE_INFO:
    4.17  		{
    4.18 -			ia64_ptce_info_t ptce;
    4.19 -			status = ia64_get_ptce(&ptce);
    4.20 -			if (status != 0) break;
    4.21 -			r9 = ptce.base;
    4.22 -			r10 = (ptce.count[0]<<32)|(ptce.count[1]&0xffffffffL);
    4.23 -			r11 = (ptce.stride[0]<<32)|(ptce.stride[1]&0xffffffffL);
    4.24 +			// return hard-coded xen-specific values because ptc.e
    4.25 +			// is emulated on xen to always flush everything
    4.26 +			// these values result in only one ptc.e instruction
    4.27 +			status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
    4.28  		}
    4.29  		break;
    4.30  	    case PAL_VERSION:
    4.31 @@ -335,7 +337,10 @@ xen_pal_emulator(unsigned long index, un
    4.32  		status = ia64_pal_cache_summary(&r9,&r10);
    4.33  		break;
    4.34  	    case PAL_VM_SUMMARY:
    4.35 -		status = ia64_pal_vm_summary(&r9,&r10);
    4.36 +		// FIXME: what should xen return for these, figure out later
    4.37 +		// For now, linux does the right thing if pal call fails
    4.38 +		// In particular, rid_size must be set properly!
    4.39 +		//status = ia64_pal_vm_summary(&r9,&r10);
    4.40  		break;
    4.41  	    case PAL_RSE_INFO:
    4.42  		status = ia64_pal_rse_info(&r9,&r10);
     5.1 --- a/xen/arch/ia64/domain.c	Wed Aug 03 09:35:16 2005 +0000
     5.2 +++ b/xen/arch/ia64/domain.c	Wed Aug 03 09:35:38 2005 +0000
     5.3 @@ -212,6 +212,10 @@ void arch_do_createdomain(struct vcpu *v
     5.4  	 */
     5.5  
     5.6  	memset(d->shared_info, 0, PAGE_SIZE);
     5.7 +	d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
     5.8 +			alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
     5.9 +	printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
    5.10 +	memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
    5.11  	v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    5.12  	/* Mask all events, and specific port will be unmasked
    5.13  	 * when customer subscribes to it.
    5.14 @@ -232,8 +236,8 @@ void arch_do_createdomain(struct vcpu *v
    5.15  	/* FIXME: This is identity mapped address for xenheap. 
    5.16  	 * Do we need it at all?
    5.17  	 */
    5.18 -	d->xen_vastart = 0xf000000000000000;
    5.19 -	d->xen_vaend = 0xf300000000000000;
    5.20 +	d->xen_vastart = XEN_START_ADDR;
    5.21 +	d->xen_vaend = XEN_END_ADDR;
    5.22  	d->arch.breakimm = 0x1000;
    5.23  }
    5.24  #else // CONFIG_VTI
    5.25 @@ -252,12 +256,16 @@ void arch_do_createdomain(struct vcpu *v
    5.26     		while (1);
    5.27  	}
    5.28  	memset(d->shared_info, 0, PAGE_SIZE);
    5.29 +	d->shared_info->vcpu_data[0].arch.privregs = 
    5.30 +			alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
    5.31 +	printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
    5.32 +	memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE);
    5.33  	v->vcpu_info = &(d->shared_info->vcpu_data[0]);
    5.34  
    5.35 -	d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
    5.36 +	d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
    5.37  	if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
    5.38  		BUG();
    5.39 -	v->vcpu_info->arch.metaphysical_mode = 1;
    5.40 +	VCPU(v, metaphysical_mode) = 1;
    5.41  	v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
    5.42  	v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
    5.43  #define DOMAIN_RID_BITS_DEFAULT 18
    5.44 @@ -266,9 +274,9 @@ void arch_do_createdomain(struct vcpu *v
    5.45  	v->arch.starting_rid = d->arch.starting_rid;
    5.46  	v->arch.ending_rid = d->arch.ending_rid;
    5.47  	// the following will eventually need to be negotiated dynamically
    5.48 -	d->xen_vastart = 0xf000000000000000;
    5.49 -	d->xen_vaend = 0xf300000000000000;
    5.50 -	d->shared_info_va = 0xf100000000000000;
    5.51 +	d->xen_vastart = XEN_START_ADDR;
    5.52 +	d->xen_vaend = XEN_END_ADDR;
    5.53 +	d->shared_info_va = SHAREDINFO_ADDR;
    5.54  	d->arch.breakimm = 0x1000;
    5.55  	v->arch.breakimm = d->arch.breakimm;
    5.56  
    5.57 @@ -292,7 +300,15 @@ void arch_getdomaininfo_ctxt(struct vcpu
    5.58  
    5.59  	printf("arch_getdomaininfo_ctxt\n");
    5.60  	c->regs = *regs;
    5.61 -	c->vcpu = v->vcpu_info->arch;
    5.62 +	c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
    5.63 +#if 0
    5.64 +	if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs,
    5.65 +			v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) {
    5.66 +		printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs);
    5.67 +		return -EFAULT;
    5.68 +	}
    5.69 +#endif
    5.70 +
    5.71  	c->shared = v->domain->shared_info->arch;
    5.72  }
    5.73  
    5.74 @@ -307,13 +323,20 @@ int arch_set_info_guest(struct vcpu *v, 
    5.75  	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
    5.76  	regs->ar_rsc |= (2 << 2); /* force PL2/3 */
    5.77  
    5.78 -	v->vcpu_info->arch = c->vcpu;
    5.79 + 	v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
    5.80 +	if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
    5.81 +			   c->vcpu.privregs, sizeof(mapped_regs_t))) {
    5.82 +	    printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", c->vcpu.privregs);
    5.83 +	    return -EFAULT;
    5.84 +	}
    5.85 +
    5.86  	init_all_rr(v);
    5.87  
    5.88  	// this should be in userspace
    5.89 -	regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS console=ttyS0",256L);  //FIXME
    5.90 -	v->vcpu_info->arch.banknum = 1;
    5.91 -	v->vcpu_info->arch.metaphysical_mode = 1;
    5.92 +	regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=tty0 console=tty0 root=/dev/hda1",256L);  //FIXME
    5.93 +	v->arch.domain_itm_last = -1L;
    5.94 + 	VCPU(v, banknum) = 1;
    5.95 + 	VCPU(v, metaphysical_mode) = 1;
    5.96  
    5.97  	v->domain->shared_info->arch = c->shared;
    5.98  	return 0;
    5.99 @@ -325,6 +348,7 @@ int arch_set_info_guest(
   5.100      struct domain *d = v->domain;
   5.101      int i, rc, ret;
   5.102      unsigned long progress = 0;
   5.103 +    shared_iopage_t *sp;
   5.104  
   5.105      if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   5.106          return 0;
   5.107 @@ -350,8 +374,17 @@ int arch_set_info_guest(
   5.108      /* FIXME: only support PMT table continuously by far */
   5.109      d->arch.pmt = __va(c->pt_base);
   5.110      d->arch.max_pfn = c->pt_max_pfn;
   5.111 -    v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg);
   5.112 -    memset((char *)__va(c->share_io_pg),0,PAGE_SIZE);
   5.113 +    d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
   5.114 +    sp = get_sp(d);
   5.115 +    memset((char *)sp,0,PAGE_SIZE);
   5.116 +    /* FIXME: temp due to old CP */
   5.117 +    sp->sp_global.eport = 2;
   5.118 +#ifdef V_IOSAPIC_READY
   5.119 +    sp->vcpu_number = 1;
   5.120 +#endif
   5.121 +    /* TEMP */
   5.122 +    d->arch.vmx_platform.pib_base = 0xfee00000UL;
   5.123 +    
   5.124  
   5.125      if (c->flags & VGCF_VMX_GUEST) {
   5.126  	if (!vmx_enabled)
   5.127 @@ -370,7 +403,7 @@ int arch_set_info_guest(
   5.128      if (v == d->vcpu[0]) {
   5.129  	memset(&d->shared_info->evtchn_mask[0], 0xff,
   5.130  		sizeof(d->shared_info->evtchn_mask));
   5.131 -	clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]);
   5.132 +	clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
   5.133      }
   5.134      /* Setup domain context. Actually IA-64 is a bit different with
   5.135       * x86, with almost all system resources better managed by HV
   5.136 @@ -380,8 +413,8 @@ int arch_set_info_guest(
   5.137      new_thread(v, c->guest_iip, 0, 0);
   5.138  
   5.139  
   5.140 -    d->xen_vastart = 0xf000000000000000;
   5.141 -    d->xen_vaend = 0xf300000000000000;
   5.142 +    d->xen_vastart = XEN_START_ADDR;
   5.143 +    d->xen_vaend = XEN_END_ADDR;
   5.144      d->arch.breakimm = 0x1000 + d->domain_id;
   5.145      v->arch._thread.on_ustack = 0;
   5.146  
   5.147 @@ -394,7 +427,13 @@ int arch_set_info_guest(
   5.148  
   5.149  void arch_do_boot_vcpu(struct vcpu *v)
   5.150  {
   5.151 +	struct domain *d = v->domain;
   5.152  	printf("arch_do_boot_vcpu: not implemented\n");
   5.153 +
   5.154 +	d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
   5.155 +			alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
   5.156 +	printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[v->vcpu_id].arch.privregs);
   5.157 +	memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
   5.158  	return;
   5.159  }
   5.160  
   5.161 @@ -449,8 +488,8 @@ void new_thread(struct vcpu *v,
   5.162  		VPD_CR(v, dcr) = 0;
   5.163  	} else {
   5.164  		regs->r28 = dom_fw_setup(d,saved_command_line,256L);
   5.165 -		v->vcpu_info->arch.banknum = 1;
   5.166 -		v->vcpu_info->arch.metaphysical_mode = 1;
   5.167 +		VCPU(v, banknum) = 1;
   5.168 +		VCPU(v, metaphysical_mode) = 1;
   5.169  		d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
   5.170  	}
   5.171  }
   5.172 @@ -482,8 +521,8 @@ void new_thread(struct vcpu *v,
   5.173  	regs->ar_fpsr = FPSR_DEFAULT;
   5.174  	init_all_rr(v);
   5.175  	regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   5.176 -	v->vcpu_info->arch.banknum = 1;
   5.177 -	v->vcpu_info->arch.metaphysical_mode = 1;
   5.178 +	VCPU(v, banknum) = 1;
   5.179 +	VCPU(v, metaphysical_mode) = 1;
   5.180  	d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
   5.181  }
   5.182  #endif // CONFIG_VTI
   5.183 @@ -894,7 +933,6 @@ void build_shared_info(struct domain *d)
   5.184  
   5.185      /* Set up shared-info area. */
   5.186      update_dom_time(d);
   5.187 -    d->shared_info->domain_time = 0;
   5.188  
   5.189      /* Mask all upcalls... */
   5.190      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   5.191 @@ -1072,12 +1110,12 @@ if (d == dom0)
   5.192  #endif
   5.193      serial_input_init();
   5.194      if (d == dom0) {
   5.195 -    	v->vcpu_info->arch.delivery_mask[0] = -1L;
   5.196 -    	v->vcpu_info->arch.delivery_mask[1] = -1L;
   5.197 -    	v->vcpu_info->arch.delivery_mask[2] = -1L;
   5.198 -    	v->vcpu_info->arch.delivery_mask[3] = -1L;
   5.199 +    	VCPU(v, delivery_mask[0]) = -1L;
   5.200 +    	VCPU(v, delivery_mask[1]) = -1L;
   5.201 +    	VCPU(v, delivery_mask[2]) = -1L;
   5.202 +    	VCPU(v, delivery_mask[3]) = -1L;
   5.203      }
   5.204 -    else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
   5.205 +    else __set_bit(0x30,VCPU(v, delivery_mask));
   5.206  
   5.207      return 0;
   5.208  }
   5.209 @@ -1233,12 +1271,12 @@ if (d == dom0)
   5.210  #endif
   5.211  	serial_input_init();
   5.212  	if (d == dom0) {
   5.213 -		v->vcpu_info->arch.delivery_mask[0] = -1L;
   5.214 -		v->vcpu_info->arch.delivery_mask[1] = -1L;
   5.215 -		v->vcpu_info->arch.delivery_mask[2] = -1L;
   5.216 -		v->vcpu_info->arch.delivery_mask[3] = -1L;
   5.217 +		VCPU(v, delivery_mask[0]) = -1L;
   5.218 +		VCPU(v, delivery_mask[1]) = -1L;
   5.219 +		VCPU(v, delivery_mask[2]) = -1L;
   5.220 +		VCPU(v, delivery_mask[3]) = -1L;
   5.221  	}
   5.222 -	else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
   5.223 +	else __set_bit(0x30, VCPU(v, delivery_mask));
   5.224  
   5.225  	return 0;
   5.226  }
   5.227 @@ -1285,7 +1323,7 @@ int construct_domU(struct domain *d,
   5.228  #endif
   5.229  	new_thread(v, pkern_entry, 0, 0);
   5.230  	printk("new_thread returns\n");
   5.231 -	__set_bit(0x30,v->vcpu_info->arch.delivery_mask);
   5.232 +	__set_bit(0x30, VCPU(v, delivery_mask));
   5.233  
   5.234  	return 0;
   5.235  }
     6.1 --- a/xen/arch/ia64/hypercall.c	Wed Aug 03 09:35:16 2005 +0000
     6.2 +++ b/xen/arch/ia64/hypercall.c	Wed Aug 03 09:35:38 2005 +0000
     6.3 @@ -41,13 +41,13 @@ ia64_hypercall (struct pt_regs *regs)
     6.4  		// to a yet-to-be-found bug where pending_interruption
     6.5  		// is zero when it shouldn't be. Since PAL is called
     6.6  		// in the idle loop, this should resolve it
     6.7 -		v->vcpu_info->arch.pending_interruption = 1;
     6.8 +		VCPU(v,pending_interruption) = 1;
     6.9  #endif
    6.10  		if (regs->r28 == PAL_HALT_LIGHT) {
    6.11  #define SPURIOUS_VECTOR 15
    6.12  			pi = vcpu_check_pending_interrupts(v);
    6.13  			if (pi != SPURIOUS_VECTOR) {
    6.14 -				if (!v->vcpu_info->arch.pending_interruption)
    6.15 +				if (!VCPU(v,pending_interruption))
    6.16  					idle_when_pending++;
    6.17  				vcpu_pend_unspecified_interrupt(v);
    6.18  //printf("idle w/int#%d pending!\n",pi);
     7.1 --- a/xen/arch/ia64/hyperprivop.S	Wed Aug 03 09:35:16 2005 +0000
     7.2 +++ b/xen/arch/ia64/hyperprivop.S	Wed Aug 03 09:35:38 2005 +0000
     7.3 @@ -18,12 +18,18 @@
     7.4  #define FAST_HYPERPRIVOPS
     7.5  #define FAST_HYPERPRIVOP_CNT
     7.6  #define FAST_REFLECT_CNT
     7.7 -#define FAST_TICK
     7.8 +//#define FAST_TICK
     7.9  #define FAST_BREAK
    7.10  #define FAST_ACCESS_REFLECT
    7.11 +#define FAST_RFI
    7.12 +#define FAST_SSM_I
    7.13 +#define FAST_PTC_GA
    7.14  #undef RFI_TO_INTERRUPT // not working yet
    7.15  #endif
    7.16  
    7.17 +// FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
    7.18 +//#define HANDLE_AR_UNAT
    7.19 +
    7.20  // FIXME: This is defined in include/asm-ia64/hw_irq.h but this
    7.21  // doesn't appear to be include'able from assembly?
    7.22  #define IA64_TIMER_VECTOR 0xef
    7.23 @@ -183,6 +189,9 @@ 1:	// when we get to here r20=~=interrup
    7.24  //	r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
    7.25  //	r31 == pr
    7.26  ENTRY(hyper_ssm_i)
    7.27 +#ifndef FAST_SSM_I
    7.28 +	br.spnt.few dispatch_break_fault ;;
    7.29 +#endif
    7.30  	// give up for now if: ipsr.be==1, ipsr.pp==1
    7.31  	mov r30=cr.ipsr;;
    7.32  	mov r29=cr.iip;;
    7.33 @@ -259,7 +268,8 @@ ENTRY(hyper_ssm_i)
    7.34  	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
    7.35  	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
    7.36  	bsw.1;;
    7.37 -	// FIXME: need to handle ar.unat!
    7.38 +	// FIXME?: ar.unat is not really handled correctly,
    7.39 +	// but may not matter if the OS is NaT-clean
    7.40  	.mem.offset 0,0; st8.spill [r2]=r16,16;
    7.41  	.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
    7.42  	.mem.offset 0,0; st8.spill [r2]=r18,16;
    7.43 @@ -425,10 +435,12 @@ GLOBAL_ENTRY(fast_tick_reflect)
    7.44  	mov cr.iip=r24;;
    7.45  	// OK, now all set to go except for switch to virtual bank0
    7.46  	mov r30=r2; mov r29=r3;;
    7.47 +#ifdef HANDLE_AR_UNAT
    7.48 +	mov r28=ar.unat;
    7.49 +#endif
    7.50  	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
    7.51  	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
    7.52  	bsw.1;;
    7.53 -	// FIXME: need to handle ar.unat!
    7.54  	.mem.offset 0,0; st8.spill [r2]=r16,16;
    7.55  	.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
    7.56  	.mem.offset 0,0; st8.spill [r2]=r18,16;
    7.57 @@ -445,9 +457,18 @@ GLOBAL_ENTRY(fast_tick_reflect)
    7.58  	.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
    7.59  	.mem.offset 0,0; st8.spill [r2]=r30,16;
    7.60  	.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
    7.61 -	movl r31=XSI_IPSR;;
    7.62 +#ifdef HANDLE_AR_UNAT
    7.63 +	// bank0 regs have no NaT bit, so ensure they are NaT clean
    7.64 +	mov r16=r0; mov r17=r0; mov r18=r0; mov r19=r0;
    7.65 +	mov r20=r0; mov r21=r0; mov r22=r0; mov r23=r0;
    7.66 +	mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0;
    7.67 +	mov r28=r0; mov r29=r0; mov r30=r0; movl r31=XSI_IPSR;;
    7.68 +#endif
    7.69  	bsw.0 ;;
    7.70  	mov r2=r30; mov r3=r29;;
    7.71 +#ifdef HANDLE_AR_UNAT
    7.72 +	mov ar.unat=r28;
    7.73 +#endif
    7.74  	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
    7.75  	st4 [r20]=r0 ;;
    7.76  fast_tick_reflect_done:
    7.77 @@ -567,10 +588,12 @@ ENTRY(fast_reflect)
    7.78  	mov cr.iip=r20;;
    7.79  	// OK, now all set to go except for switch to virtual bank0
    7.80  	mov r30=r2; mov r29=r3;;
    7.81 +#ifdef HANDLE_AR_UNAT
    7.82 +	mov r28=ar.unat;
    7.83 +#endif
    7.84  	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
    7.85  	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
    7.86  	bsw.1;;
    7.87 -	// FIXME: need to handle ar.unat!
    7.88  	.mem.offset 0,0; st8.spill [r2]=r16,16;
    7.89  	.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
    7.90  	.mem.offset 0,0; st8.spill [r2]=r18,16;
    7.91 @@ -587,9 +610,19 @@ ENTRY(fast_reflect)
    7.92  	.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
    7.93  	.mem.offset 0,0; st8.spill [r2]=r30,16;
    7.94  	.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
    7.95 +#ifdef HANDLE_AR_UNAT
    7.96 +	// bank0 regs have no NaT bit, so ensure they are NaT clean
    7.97 +	mov r16=r0; mov r17=r0; mov r18=r0; mov r19=r0;
    7.98 +	mov r20=r0; mov r21=r0; mov r22=r0; mov r23=r0;
    7.99 +	mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0;
   7.100 +	mov r28=r0; mov r29=r0; mov r30=r0; movl r31=XSI_IPSR;;
   7.101 +#endif
   7.102  	movl r31=XSI_IPSR;;
   7.103  	bsw.0 ;;
   7.104  	mov r2=r30; mov r3=r29;;
   7.105 +#ifdef HANDLE_AR_UNAT
   7.106 +	mov ar.unat=r28;
   7.107 +#endif
   7.108  	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
   7.109  	st4 [r20]=r0 ;;
   7.110  	mov pr=r31,-1 ;;
   7.111 @@ -637,6 +670,9 @@ GLOBAL_ENTRY(fast_access_reflect)
   7.112  
   7.113  // ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
   7.114  ENTRY(hyper_rfi)
   7.115 +#ifndef FAST_RFI
   7.116 +	br.spnt.few dispatch_break_fault ;;
   7.117 +#endif
   7.118  	// if no interrupts pending, proceed
   7.119  	mov r30=r0
   7.120  	cmp.eq p7,p0=r20,r0
   7.121 @@ -736,7 +772,8 @@ just_do_rfi:
   7.122  	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
   7.123  	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
   7.124  	bsw.1;;
   7.125 -	// FIXME: need to handle ar.unat!
   7.126 +	// FIXME?: ar.unat is not really handled correctly,
   7.127 +	// but may not matter if the OS is NaT-clean
   7.128  	.mem.offset 0,0; ld8.fill r16=[r2],16 ;
   7.129  	.mem.offset 8,0; ld8.fill r17=[r3],16 ;;
   7.130  	.mem.offset 0,0; ld8.fill r18=[r2],16 ;
   7.131 @@ -1461,6 +1498,9 @@ ENTRY(hyper_ptc_ga)
   7.132  #ifdef CONFIG_SMP
   7.133  FIXME: ptc.ga instruction requires spinlock for SMP
   7.134  #endif
   7.135 +#ifndef FAST_PTC_GA
   7.136 +	br.spnt.few dispatch_break_fault ;;
   7.137 +#endif
   7.138  	// FIXME: validate not flushing Xen addresses
   7.139  #ifdef FAST_HYPERPRIVOP_CNT
   7.140  	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_PTC_GA);;
     8.1 --- a/xen/arch/ia64/mmio.c	Wed Aug 03 09:35:16 2005 +0000
     8.2 +++ b/xen/arch/ia64/mmio.c	Wed Aug 03 09:35:38 2005 +0000
     8.3 @@ -66,7 +66,7 @@ static void pib_write(VCPU *vcpu, void *
     8.4      default:
     8.5          if ( PIB_LOW_HALF(pib_off) ) {   // lower half
     8.6              if ( s != 8 || ma != 0x4 /* UC */ ) {
     8.7 -                panic("Undefined IPI-LHF write!\n");
     8.8 +                panic("Undefined IPI-LHF write with s %d, ma %d!\n", s, ma);
     8.9              }
    8.10              else {
    8.11                  write_ipi(vcpu, pib_off, *(uint64_t *)src);
    8.12 @@ -135,13 +135,13 @@ static void low_mmio_access(VCPU *vcpu, 
    8.13      ioreq_t *p;
    8.14      unsigned long addr;
    8.15  
    8.16 -    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
    8.17 +    vio = get_vio(v->domain, v->vcpu_id);
    8.18      if (vio == 0) {
    8.19          panic("bad shared page: %lx", (unsigned long)vio);
    8.20      }
    8.21      p = &vio->vp_ioreq;
    8.22      p->addr = pa;
    8.23 -    p->size = 1<<s;
    8.24 +    p->size = s;
    8.25      p->count = 1;
    8.26      p->dir = dir;
    8.27      if(dir==IOREQ_WRITE)     //write;
    8.28 @@ -152,9 +152,9 @@ static void low_mmio_access(VCPU *vcpu, 
    8.29  
    8.30      set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    8.31      p->state = STATE_IOREQ_READY;
    8.32 -    evtchn_send(IOPACKET_PORT);
    8.33 +    evtchn_send(iopacket_port(v->domain));
    8.34      vmx_wait_io();
    8.35 -    if(dir){ //read
    8.36 +    if(dir==IOREQ_READ){ //read
    8.37          *val=p->u.data;
    8.38      }
    8.39      return;
    8.40 @@ -168,13 +168,13 @@ static void legacy_io_access(VCPU *vcpu,
    8.41      ioreq_t *p;
    8.42      unsigned long addr;
    8.43  
    8.44 -    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
    8.45 +    vio = get_vio(v->domain, v->vcpu_id);
    8.46      if (vio == 0) {
    8.47          panic("bad shared page: %lx");
    8.48      }
    8.49      p = &vio->vp_ioreq;
    8.50      p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
    8.51 -    p->size = 1<<s;
    8.52 +    p->size = s;
    8.53      p->count = 1;
    8.54      p->dir = dir;
    8.55      if(dir==IOREQ_WRITE)     //write;
    8.56 @@ -185,11 +185,20 @@ static void legacy_io_access(VCPU *vcpu,
    8.57  
    8.58      set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    8.59      p->state = STATE_IOREQ_READY;
    8.60 -    evtchn_send(IOPACKET_PORT);
    8.61 +    evtchn_send(iopacket_port(v->domain));
    8.62 +
    8.63      vmx_wait_io();
    8.64 -    if(dir){ //read
    8.65 +    if(dir==IOREQ_READ){ //read
    8.66          *val=p->u.data;
    8.67      }
    8.68 +#ifdef DEBUG_PCI
    8.69 +    if(dir==IOREQ_WRITE)
    8.70 +        if(p->addr == 0xcf8UL)
    8.71 +            printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
    8.72 +    else
    8.73 +        if(p->addr == 0xcfcUL)
    8.74 +            printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
    8.75 +#endif //DEBUG_PCI
    8.76      return;
    8.77  }
    8.78  
    8.79 @@ -204,12 +213,13 @@ static void mmio_access(VCPU *vcpu, u64 
    8.80      switch (iot) {
    8.81      case GPFN_PIB:
    8.82          if(!dir)
    8.83 -            pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    8.84 +            pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
    8.85          else
    8.86              pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    8.87          break;
    8.88      case GPFN_GFW:
    8.89          break;
    8.90 +    case GPFN_IOSAPIC:
    8.91      case GPFN_FRAME_BUFFER:
    8.92      case GPFN_LOW_MMIO:
    8.93          low_mmio_access(vcpu, src_pa, dest, s, dir);
    8.94 @@ -217,7 +227,6 @@ static void mmio_access(VCPU *vcpu, u64 
    8.95      case GPFN_LEGACY_IO:
    8.96          legacy_io_access(vcpu, src_pa, dest, s, dir);
    8.97          break;
    8.98 -    case GPFN_IOSAPIC:
    8.99      default:
   8.100          panic("Bad I/O access\n");
   8.101          break;
   8.102 @@ -342,6 +351,8 @@ static inline VCPU *lid_2_vcpu (struct d
   8.103  	LID	  lid;
   8.104  	for (i=0; i<MAX_VIRT_CPUS; i++) {
   8.105  		vcpu = d->vcpu[i];
   8.106 + 		if (!vcpu)
   8.107 + 			continue;
   8.108  		lid.val = VPD_CR(vcpu, lid);
   8.109  		if ( lid.id == id && lid.eid == eid ) {
   8.110  		    return vcpu;
   8.111 @@ -379,15 +390,16 @@ static int write_ipi (VCPU *vcpu, uint64
   8.112      inst_type 0:integer 1:floating point
   8.113   */
   8.114  extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
   8.115 -
   8.116 +#define SL_INTEGER  0        // store/load interger
   8.117 +#define SL_FLOATING    1       // store/load floating
   8.118  
   8.119  void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
   8.120  {
   8.121      REGS *regs;
   8.122      IA64_BUNDLE bundle;
   8.123 -    int slot, dir, inst_type=0;
   8.124 +    int slot, dir, inst_type;
   8.125      size_t size;
   8.126 -    u64 data, value, slot1a, slot1b;
   8.127 +    u64 data, value,post_update, slot1a, slot1b, temp;
   8.128      INST64 inst;
   8.129      regs=vcpu_regs(vcpu);
   8.130      bundle = __vmx_get_domain_bundle(regs->cr_iip);
   8.131 @@ -400,28 +412,70 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   8.132      }
   8.133      else if (slot == 2) inst.inst = bundle.slot2;
   8.134  
   8.135 +
   8.136 +    // Integer Load/Store
   8.137      if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
   8.138 -        inst_type=0;  //fp
   8.139 +        inst_type = SL_INTEGER;  //
   8.140          size=(inst.M1.x6&0x3);
   8.141          if((inst.M1.x6>>2)>0xb){      // write
   8.142 -            vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
   8.143              dir=IOREQ_WRITE;     //write
   8.144 +            vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
   8.145          }else if((inst.M1.x6>>2)<0xb){   //  read
   8.146 -            vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
   8.147              dir=IOREQ_READ;
   8.148 -        }else{
   8.149 -            printf("This memory access instruction can't be emulated one : %lx\n",inst.inst);
   8.150 -            while(1);
   8.151 +            vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
   8.152          }
   8.153 -    }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
   8.154 -        inst_type=1;  //fp
   8.155 -        dir=IOREQ_READ;
   8.156 -        size=3;     //ldfd
   8.157 -    }else{
   8.158 +    }
   8.159 +    // Integer Load + Reg update
   8.160 +    else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
   8.161 +        inst_type = SL_INTEGER;
   8.162 +        dir = IOREQ_READ;     //write
   8.163 +        size = (inst.M2.x6&0x3);
   8.164 +        vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
   8.165 +        vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
   8.166 +        vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
   8.167 +        temp += post_update;
   8.168 +        vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
   8.169 +    }
   8.170 +    // Integer Load/Store + Imm update
   8.171 +    else if(inst.M3.major==5){
   8.172 +        inst_type = SL_INTEGER;  //
   8.173 +        size=(inst.M3.x6&0x3);
   8.174 +        if((inst.M5.x6>>2)>0xb){      // write
   8.175 +            dir=IOREQ_WRITE;     //write
   8.176 +            vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
   8.177 +            vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
   8.178 +            post_update = (inst.M5.i<<7)+inst.M5.imm7;
   8.179 +            if(inst.M5.s)
   8.180 +                temp -= post_update;
   8.181 +            else
   8.182 +                temp += post_update;
   8.183 +            vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
   8.184 +
   8.185 +        }else if((inst.M3.x6>>2)<0xb){   //  read
   8.186 +            dir=IOREQ_READ;
   8.187 +            vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
   8.188 +            vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
   8.189 +            post_update = (inst.M3.i<<7)+inst.M3.imm7;
   8.190 +            if(inst.M3.s)
   8.191 +                temp -= post_update;
   8.192 +            else
   8.193 +                temp += post_update;
   8.194 +            vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
   8.195 +
   8.196 +        }
   8.197 +    }
   8.198 +    // Floating-point Load/Store
   8.199 +//    else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
   8.200 +//        inst_type=SL_FLOATING;  //fp
   8.201 +//        dir=IOREQ_READ;
   8.202 +//        size=3;     //ldfd
   8.203 +//    }
   8.204 +    else{
   8.205          printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst);
   8.206          while(1);
   8.207      }
   8.208  
   8.209 +    size = 1 << size;
   8.210      if(dir==IOREQ_WRITE){
   8.211          mmio_access(vcpu, padr, &data, size, ma, dir);
   8.212      }else{
   8.213 @@ -433,7 +487,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   8.214          else if(size==2)
   8.215              data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
   8.216  
   8.217 -        if(inst_type==0){       //gp
   8.218 +        if(inst_type==SL_INTEGER){       //gp
   8.219              vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
   8.220          }else{
   8.221              panic("Don't support ldfd now !");
     9.1 --- a/xen/arch/ia64/pal_emul.c	Wed Aug 03 09:35:16 2005 +0000
     9.2 +++ b/xen/arch/ia64/pal_emul.c	Wed Aug 03 09:35:38 2005 +0000
     9.3 @@ -1,7 +1,21 @@
     9.4 -/* PAL/SAL call delegation
     9.5 +/*
     9.6 + * PAL/SAL call delegation
     9.7   *
     9.8   * Copyright (c) 2004 Li Susie <susie.li@intel.com>
     9.9   * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
    9.10 + *
    9.11 + * This program is free software; you can redistribute it and/or modify it
    9.12 + * under the terms and conditions of the GNU General Public License,
    9.13 + * version 2, as published by the Free Software Foundation.
    9.14 + *
    9.15 + * This program is distributed in the hope it will be useful, but WITHOUT
    9.16 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.17 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    9.18 + * more details.
    9.19 + *
    9.20 + * You should have received a copy of the GNU General Public License along with
    9.21 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    9.22 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    9.23   */
    9.24  
    9.25  #include <asm/vmx_vcpu.h>
    9.26 @@ -98,23 +112,6 @@ pal_halt (VCPU *vcpu) {
    9.27  
    9.28  static struct ia64_pal_retval
    9.29  pal_halt_light (VCPU *vcpu) {
    9.30 -#if 0	
    9.31 -	// GVMM will go back to HVMM and ask HVMM to call yield().
    9.32 -	vmmdata.p_ctlblk->status = VM_OK;
    9.33 -	vmmdata.p_ctlblk->ctlcode = ExitVM_YIELD;
    9.34 -
    9.35 -	vmm_transition((UINT64)&vmmdata.p_gsa->guest,
    9.36 -    			(UINT64)&vmmdata.p_gsa->host,
    9.37 -    			(UINT64) vmmdata.p_tramp,0,0);
    9.38 -
    9.39 -
    9.40 -	result.status = 0;
    9.41 -	result.pal_result[0]=0;
    9.42 -	result.pal_result[1]=0;
    9.43 -	result.pal_result[2]=0;
    9.44 -
    9.45 -	return result;
    9.46 -#endif
    9.47  	struct ia64_pal_retval result;
    9.48  
    9.49  	result.status= -1; //unimplemented
    10.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Wed Aug 03 09:35:16 2005 +0000
    10.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Wed Aug 03 09:35:38 2005 +0000
    10.3 @@ -20,11 +20,19 @@
    10.4   			__do_IRQ(local_vector_to_irq(vector), regs);
    10.5   
    10.6   			/*
    10.7 -@@ -167,6 +173,95 @@
    10.8 +@@ -167,6 +173,103 @@
    10.9   	irq_exit();
   10.10   }
   10.11   
   10.12  +#ifdef  CONFIG_VTI
   10.13 ++#define vmx_irq_enter()		\
   10.14 ++	add_preempt_count(HARDIRQ_OFFSET);
   10.15 ++
   10.16 ++/* Now softirq will be checked when leaving hypervisor, or else
   10.17 ++ * scheduler irq will be executed too early.
   10.18 ++ */
   10.19 ++#define vmx_irq_exit(void)	\
   10.20 ++	sub_preempt_count(HARDIRQ_OFFSET);
   10.21  +/*
   10.22  + * That's where the IVT branches when we get an external
   10.23  + * interrupt. This branches to the correct hardware IRQ handler via
   10.24 @@ -72,7 +80,7 @@
   10.25  +	 * 16 (without this, it would be ~240, which could easily lead
   10.26  +	 * to kernel stack overflows).
   10.27  +	 */
   10.28 -+	irq_enter();
   10.29 ++	vmx_irq_enter();
   10.30  +	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
   10.31  +	ia64_srlz_d();
   10.32  +	while (vector != IA64_SPURIOUS_INT_VECTOR) {
   10.33 @@ -106,7 +114,7 @@
   10.34  +	 * handler needs to be able to wait for further keyboard interrupts, which can't
   10.35  +	 * come through until ia64_eoi() has been done.
   10.36  +	 */
   10.37 -+	irq_exit();
   10.38 ++	vmx_irq_exit();
   10.39  +	if ( wake_dom0 && current != dom0 ) 
   10.40  +		domain_wake(dom0->vcpu[0]);
   10.41  +}
    11.1 --- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h	Wed Aug 03 09:35:16 2005 +0000
    11.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/kregs.h	Wed Aug 03 09:35:38 2005 +0000
    11.3 @@ -1,6 +1,6 @@
    11.4  --- /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h	2005-03-01 23:37:49.000000000 -0800
    11.5  +++ /home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h	2005-05-18 12:40:50.000000000 -0700
    11.6 -@@ -29,8 +29,20 @@
    11.7 +@@ -29,8 +29,21 @@
    11.8    */
    11.9   #define IA64_TR_KERNEL		0	/* itr0, dtr0: maps kernel image (code & data) */
   11.10   #define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
   11.11 @@ -12,6 +12,7 @@
   11.12  +#ifdef XEN
   11.13  +#define IA64_TR_SHARED_INFO	3	/* dtr3: page shared with domain */
   11.14  +#define	IA64_TR_VHPT		4	/* dtr4: vhpt */
   11.15 ++#define IA64_TR_ARCH_INFO      5
   11.16  +#ifdef CONFIG_VTI
   11.17  +#define IA64_TR_VHPT_IN_DOM	5	/* dtr5: Double mapping for vhpt table in domain space */
   11.18  +#define IA64_TR_RR7_SWITCH_STUB	7	/* dtr7: mapping for rr7 switch stub */
    12.1 --- a/xen/arch/ia64/process.c	Wed Aug 03 09:35:16 2005 +0000
    12.2 +++ b/xen/arch/ia64/process.c	Wed Aug 03 09:35:38 2005 +0000
    12.3 @@ -50,7 +50,7 @@ extern unsigned long dom0_start, dom0_si
    12.4  			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
    12.5  			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    12.6  
    12.7 -#define PSCB(x,y)	x->vcpu_info->arch.y
    12.8 +#define PSCB(x,y)	VCPU(x,y)
    12.9  #define PSCBX(x,y)	x->arch.y
   12.10  
   12.11  extern unsigned long vcpu_verbose;
   12.12 @@ -226,7 +226,7 @@ panic_domain(regs,"psr.ic off, deliverin
   12.13  #ifdef CONFIG_SMP
   12.14  #error "sharedinfo doesn't handle smp yet"
   12.15  #endif
   12.16 -	regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch;
   12.17 +	regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
   12.18  
   12.19  	PSCB(v,interrupt_delivery_enabled) = 0;
   12.20  	PSCB(v,interrupt_collection_enabled) = 0;
    13.1 --- a/xen/arch/ia64/regionreg.c	Wed Aug 03 09:35:16 2005 +0000
    13.2 +++ b/xen/arch/ia64/regionreg.c	Wed Aug 03 09:35:38 2005 +0000
    13.3 @@ -14,6 +14,8 @@
    13.4  #include <asm/page.h>
    13.5  #include <asm/regionreg.h>
    13.6  #include <asm/vhpt.h>
    13.7 +#include <asm/vcpu.h>
    13.8 +extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
    13.9  
   13.10  
   13.11  #define	IA64_MIN_IMPL_RID_BITS	(IA64_MIN_IMPL_RID_MSB+1)
   13.12 @@ -273,7 +275,8 @@ int set_one_rr(unsigned long rr, unsigne
   13.13  		newrrv.rid = newrid;
   13.14  		newrrv.ve = VHPT_ENABLED_REGION_7;
   13.15  		newrrv.ps = IA64_GRANULE_SHIFT;
   13.16 -		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
   13.17 +		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
   13.18 +				v->vcpu_info->arch.privregs);
   13.19  	}
   13.20  	else {
   13.21  		newrrv.rid = newrid;
   13.22 @@ -290,7 +293,8 @@ int set_one_rr(unsigned long rr, unsigne
   13.23  	newrrv.ve = 1;  // VHPT now enabled for region 7!!
   13.24  	newrrv.ps = PAGE_SHIFT;
   13.25  	if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
   13.26 -	if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
   13.27 +	if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
   13.28 +				v->vcpu_info->arch.privregs);
   13.29  	else set_rr(rr,newrrv.rrval);
   13.30  #endif
   13.31  	return 1;
   13.32 @@ -332,14 +336,14 @@ void init_all_rr(struct vcpu *v)
   13.33  	rrv.ps = PAGE_SHIFT;
   13.34  	rrv.ve = 1;
   13.35  if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
   13.36 -	v->vcpu_info->arch.rrs[0] = -1;
   13.37 -	v->vcpu_info->arch.rrs[1] = rrv.rrval;
   13.38 -	v->vcpu_info->arch.rrs[2] = rrv.rrval;
   13.39 -	v->vcpu_info->arch.rrs[3] = rrv.rrval;
   13.40 -	v->vcpu_info->arch.rrs[4] = rrv.rrval;
   13.41 -	v->vcpu_info->arch.rrs[5] = rrv.rrval;
   13.42 +	VCPU(v,rrs[0]) = -1;
   13.43 +	VCPU(v,rrs[1]) = rrv.rrval;
   13.44 +	VCPU(v,rrs[2]) = rrv.rrval;
   13.45 +	VCPU(v,rrs[3]) = rrv.rrval;
   13.46 +	VCPU(v,rrs[4]) = rrv.rrval;
   13.47 +	VCPU(v,rrs[5]) = rrv.rrval;
   13.48  	rrv.ve = 0; 
   13.49 -	v->vcpu_info->arch.rrs[6] = rrv.rrval;
   13.50 +	VCPU(v,rrs[6]) = rrv.rrval;
   13.51  //	v->shared_info->arch.rrs[7] = rrv.rrval;
   13.52  }
   13.53  
   13.54 @@ -378,7 +382,7 @@ unsigned long load_region_regs(struct vc
   13.55  	// TODO: These probably should be validated
   13.56  	unsigned long bad = 0;
   13.57  
   13.58 -	if (v->vcpu_info->arch.metaphysical_mode) {
   13.59 +	if (VCPU(v,metaphysical_mode)) {
   13.60  		ia64_rr rrv;
   13.61  
   13.62  		rrv.rrval = 0;
   13.63 @@ -390,16 +394,16 @@ unsigned long load_region_regs(struct vc
   13.64  		ia64_srlz_d();
   13.65  	}
   13.66  	else {
   13.67 -		rr0 =  v->vcpu_info->arch.rrs[0];
   13.68 +		rr0 =  VCPU(v,rrs[0]);
   13.69  		if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
   13.70  	}
   13.71 -	rr1 =  v->vcpu_info->arch.rrs[1];
   13.72 -	rr2 =  v->vcpu_info->arch.rrs[2];
   13.73 -	rr3 =  v->vcpu_info->arch.rrs[3];
   13.74 -	rr4 =  v->vcpu_info->arch.rrs[4];
   13.75 -	rr5 =  v->vcpu_info->arch.rrs[5];
   13.76 -	rr6 =  v->vcpu_info->arch.rrs[6];
   13.77 -	rr7 =  v->vcpu_info->arch.rrs[7];
   13.78 +	rr1 =  VCPU(v,rrs[1]);
   13.79 +	rr2 =  VCPU(v,rrs[2]);
   13.80 +	rr3 =  VCPU(v,rrs[3]);
   13.81 +	rr4 =  VCPU(v,rrs[4]);
   13.82 +	rr5 =  VCPU(v,rrs[5]);
   13.83 +	rr6 =  VCPU(v,rrs[6]);
   13.84 +	rr7 =  VCPU(v,rrs[7]);
   13.85  	if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
   13.86  	if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
   13.87  	if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
   13.88 @@ -410,4 +414,5 @@ unsigned long load_region_regs(struct vc
   13.89  	if (bad) {
   13.90  		panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
   13.91  	}
   13.92 +	return 0;
   13.93  }
    14.1 --- a/xen/arch/ia64/vcpu.c	Wed Aug 03 09:35:16 2005 +0000
    14.2 +++ b/xen/arch/ia64/vcpu.c	Wed Aug 03 09:35:38 2005 +0000
    14.3 @@ -28,7 +28,7 @@ typedef	union {
    14.4  
    14.5  // this def for vcpu_regs won't work if kernel stack is present
    14.6  #define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
    14.7 -#define	PSCB(x,y)	x->vcpu_info->arch.y
    14.8 +#define	PSCB(x,y)	VCPU(x,y)
    14.9  #define	PSCBX(x,y)	x->arch.y
   14.10  
   14.11  #define	TRUE	1
   14.12 @@ -155,7 +155,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
   14.13  	// interrupt collection flag
   14.14  	//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
   14.15  	// just handle psr.up and psr.pp for now
   14.16 -	if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
   14.17 +	if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
   14.18  		| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
   14.19  		| IA64_PSR_DFL | IA64_PSR_DFH))
   14.20  			return (IA64_ILLOP_FAULT);
   14.21 @@ -164,6 +164,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
   14.22  	if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
   14.23  	if (imm.up) { ipsr->up = 0; psr.up = 0; }
   14.24  	if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
   14.25 +	if (imm.be) ipsr->be = 0;
   14.26  	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
   14.27  	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   14.28  	return IA64_NO_FAULT;
   14.29 @@ -214,6 +215,7 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
   14.30  	if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
   14.31  	// TODO: do this faster
   14.32  	if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   14.33 +	if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
   14.34  	if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
   14.35  	if (imm.up) { ipsr->up = 1; psr.up = 1; }
   14.36  	if (imm.be) {
   14.37 @@ -262,6 +264,7 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UIN
   14.38  	}
   14.39  	if (newpsr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
   14.40  	if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   14.41 +	if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
   14.42  	if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
   14.43  	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   14.44  	if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   14.45 @@ -389,6 +392,21 @@ IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT6
   14.46  	return (IA64_NO_FAULT);
   14.47  }
   14.48  
   14.49 +unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
   14.50 +{
   14.51 +	ia64_rr rr;
   14.52 +
   14.53 +	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   14.54 +	return(rr.ps);
   14.55 +}
   14.56 +
   14.57 +unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
   14.58 +{
   14.59 +	ia64_rr rr;
   14.60 +
   14.61 +	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   14.62 +	return(rr.rid);
   14.63 +}
   14.64  
   14.65  unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
   14.66  {
   14.67 @@ -881,6 +899,15 @@ IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT
   14.68  	return (IA64_NO_FAULT);
   14.69  }
   14.70  
   14.71 +// parameter is a time interval specified in cycles
   14.72 +void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
   14.73 +{
   14.74 +    PSCBX(vcpu,xen_timer_interval) = cycles;
   14.75 +    vcpu_set_next_timer(vcpu);
   14.76 +    printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
   14.77 +             PSCBX(vcpu,xen_timer_interval));
   14.78 +    __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
   14.79 +}
   14.80  
   14.81  IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
   14.82  {
   14.83 @@ -1009,16 +1036,6 @@ void vcpu_set_next_timer(VCPU *vcpu)
   14.84  	}
   14.85  }
   14.86  
   14.87 -// parameter is a time interval specified in cycles
   14.88 -void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
   14.89 -{
   14.90 -    PSCBX(vcpu,xen_timer_interval) = cycles;
   14.91 -    vcpu_set_next_timer(vcpu);
   14.92 -    printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
   14.93 -             PSCBX(vcpu,xen_timer_interval));
   14.94 -    __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
   14.95 -}
   14.96 -
   14.97  IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
   14.98  {
   14.99  	UINT now = ia64_get_itc();
  14.100 @@ -1182,12 +1199,6 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
  14.101  	//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  14.102  	//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  14.103  	if (ifs & regs->cr_ifs & 0x8000000000000000L) {
  14.104 -#define SI_OFS(x)	((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
  14.105 -if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
  14.106 -printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
  14.107 -printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
  14.108 -while(1);
  14.109 -}
  14.110  		// TODO: validate PSCB(vcpu,iip)
  14.111  		// TODO: PSCB(vcpu,ipsr) = psr;
  14.112  		PSCB(vcpu,ipsr) = psr.i64;
  14.113 @@ -1222,7 +1233,6 @@ IA64FAULT vcpu_cover(VCPU *vcpu)
  14.114  
  14.115  IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  14.116  {
  14.117 -	extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
  14.118  	UINT64 pta = PSCB(vcpu,pta);
  14.119  	UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
  14.120  	UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
  14.121 @@ -1263,7 +1273,6 @@ IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 v
  14.122  #define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
  14.123  
  14.124  unsigned long vhpt_translate_count = 0;
  14.125 -int in_vcpu_tpa = 0;
  14.126  
  14.127  IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir)
  14.128  {
  14.129 @@ -1278,12 +1287,6 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  14.130  			unsigned long vipsr = PSCB(vcpu,ipsr);
  14.131  			unsigned long iip = regs->cr_iip;
  14.132  			unsigned long ipsr = regs->cr_ipsr;
  14.133 -#if 0
  14.134 -			printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p\n", address, viip, vipsr, iip, ipsr);
  14.135 -			if (in_vcpu_tpa) printk("vcpu_translate called from vcpu_tpa\n");
  14.136 -			while(1);
  14.137 -			panic_domain(0,"vcpu_translate: bad address %p\n", address);
  14.138 -#endif
  14.139  			printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
  14.140  		}
  14.141  
  14.142 @@ -1304,7 +1307,6 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  14.143  	/* check 1-entry TLB */
  14.144  	if ((trp = match_dtlb(vcpu,address))) {
  14.145  		dtlb_translate_count++;
  14.146 -if (!in_vcpu_tpa) printf("vcpu_translate: found in vdtlb\n");
  14.147  		*pteval = trp->page_flags;
  14.148  		*itir = trp->itir;
  14.149  		return IA64_NO_FAULT;
  14.150 @@ -1356,9 +1358,7 @@ IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 va
  14.151  	UINT64 pteval, itir, mask;
  14.152  	IA64FAULT fault;
  14.153  
  14.154 -in_vcpu_tpa=1;
  14.155  	fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
  14.156 -in_vcpu_tpa=0;
  14.157  	if (fault == IA64_NO_FAULT)
  14.158  	{
  14.159  		mask = itir_mask(itir);
  14.160 @@ -1534,28 +1534,8 @@ unsigned long vcpu_get_rr_ve(VCPU *vcpu,
  14.161  	return(rr.ve);
  14.162  }
  14.163  
  14.164 -
  14.165 -unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
  14.166 -{
  14.167 -	ia64_rr rr;
  14.168 -
  14.169 -	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
  14.170 -	return(rr.ps);
  14.171 -}
  14.172 -
  14.173 -
  14.174 -unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
  14.175 -{
  14.176 -	ia64_rr rr;
  14.177 -
  14.178 -	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
  14.179 -	return(rr.rid);
  14.180 -}
  14.181 -
  14.182 -
  14.183  IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  14.184  {
  14.185 -	extern void set_one_rr(UINT64, UINT64);
  14.186  	PSCB(vcpu,rrs)[reg>>61] = val;
  14.187  	// warning: set_one_rr() does it "live"
  14.188  	set_one_rr(reg,val);
  14.189 @@ -1785,49 +1765,26 @@ IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 
  14.190  IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
  14.191  {
  14.192  	// TODO: Only allowed for current vcpu
  14.193 -	UINT64 mpaddr, ps;
  14.194 +	UINT64 mpaddr, paddr;
  14.195  	IA64FAULT fault;
  14.196 -	TR_ENTRY *trp;
  14.197 -	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
  14.198 -	unsigned long pteval, dom_imva;
  14.199 +	unsigned long translate_domain_mpaddr(unsigned long);
  14.200 +	IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
  14.201  
  14.202 -	if ((trp = match_dtlb(vcpu,vadr))) {
  14.203 -		pteval = trp->page_flags;
  14.204 -		dom_imva = __va(pteval & _PFN_MASK);
  14.205 -		ia64_fc(dom_imva);
  14.206 -		return IA64_NO_FAULT;
  14.207 -	}
  14.208  	fault = vcpu_tpa(vcpu, vadr, &mpaddr);
  14.209  	if (fault == IA64_NO_FAULT) {
  14.210 -		struct domain *dom0;
  14.211 -		unsigned long dom0_start, dom0_size;
  14.212 -		if (vcpu == dom0) {
  14.213 -			if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
  14.214 -				printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
  14.215 -			}
  14.216 -		}
  14.217 -		pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
  14.218 -		if (pteval) {
  14.219 -			dom_imva = __va(pteval & _PFN_MASK);
  14.220 -			ia64_fc(dom_imva);
  14.221 -		}
  14.222 -		else {
  14.223 -			REGS *regs = vcpu_regs(vcpu);
  14.224 -			printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
  14.225 -					vadr,regs->cr_iip);
  14.226 -		}
  14.227 +		paddr = translate_domain_mpaddr(mpaddr);
  14.228 +		ia64_fc(__va(paddr));
  14.229  	}
  14.230  	return fault;
  14.231  }
  14.232  
  14.233 +int ptce_count = 0;
  14.234  IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
  14.235  {
  14.236 -
  14.237  	// Note that this only needs to be called once, i.e. the
  14.238  	// architected loop to purge the entire TLB, should use
  14.239  	//  base = stride1 = stride2 = 0, count0 = count 1 = 1
  14.240  
  14.241 -	// FIXME: When VHPT is in place, flush that too!
  14.242  #ifdef VHPT_GLOBAL
  14.243  	vhpt_flush();	// FIXME: This is overdoing it
  14.244  #endif
  14.245 @@ -1850,6 +1807,7 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
  14.246  	// FIXME: validate not flushing Xen addresses
  14.247  	// if (Xen address) return(IA64_ILLOP_FAULT);
  14.248  	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
  14.249 +//printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
  14.250  #ifdef VHPT_GLOBAL
  14.251  	vhpt_flush_address(vadr,addr_range);
  14.252  #endif
    15.1 --- a/xen/arch/ia64/vlsapic.c	Wed Aug 03 09:35:16 2005 +0000
    15.2 +++ b/xen/arch/ia64/vlsapic.c	Wed Aug 03 09:35:38 2005 +0000
    15.3 @@ -38,6 +38,14 @@
    15.4  #include <asm/vmx_pal_vsa.h>
    15.5  #include <asm/kregs.h>
    15.6  
    15.7 +#define  SHARED_VLAPIC_INF
    15.8 +#ifdef V_IOSAPIC_READY
    15.9 +static inline vl_apic_info* get_psapic(VCPU *vcpu)
   15.10 +{
   15.11 +    shared_iopage_t  *sp = get_sp(vcpu->domain);
   15.12 +    return &(sp->vcpu_iodata[vcpu->vcpu_id].apic_intr);
   15.13 +}
   15.14 +#endif
   15.15  //u64  fire_itc;
   15.16  //u64  fire_itc2;
   15.17  //u64  fire_itm;
   15.18 @@ -216,7 +224,8 @@ void vtm_interruption_update(VCPU *vcpu,
   15.19   */
   15.20  void vtm_domain_out(VCPU *vcpu)
   15.21  {
   15.22 -    rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
   15.23 +    if(!is_idle_task(vcpu->domain))
   15.24 +	rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
   15.25  }
   15.26  
   15.27  /*
   15.28 @@ -226,9 +235,11 @@ void vtm_domain_out(VCPU *vcpu)
   15.29  void vtm_domain_in(VCPU *vcpu)
   15.30  {
   15.31      vtime_t     *vtm;
   15.32 -    
   15.33 -    vtm=&(vcpu->arch.arch_vmx.vtm);
   15.34 -    vtm_interruption_update(vcpu, vtm);
   15.35 +
   15.36 +    if(!is_idle_task(vcpu->domain)) {
   15.37 +	vtm=&(vcpu->arch.arch_vmx.vtm);
   15.38 +	vtm_interruption_update(vcpu, vtm);
   15.39 +    }
   15.40  }
   15.41  
   15.42  /*
   15.43 @@ -262,10 +273,50 @@ static void update_vhpi(VCPU *vcpu, int 
   15.44      }
   15.45  }
   15.46  
   15.47 +#ifdef V_IOSAPIC_READY
   15.48 +void vlapic_update_shared_info(VCPU *vcpu)
   15.49 +{
   15.50 +    //int	i;
   15.51 +    
   15.52 +    vl_apic_info *ps;
   15.53 +
   15.54 +    if (vcpu->domain == dom0)
   15.55 +	return;
   15.56 +
   15.57 +    ps = get_psapic(vcpu);
   15.58 +    ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16; 
   15.59 +    printf("vl_lapic_id = %x\n", ps->vl_lapic_id);
   15.60 +    ps->vl_apr = 0;
   15.61 +    // skip ps->vl_logical_dest && ps->vl_dest_format
   15.62 +    // IPF support physical destination mode only
   15.63 +    ps->vl_arb_id = 0;
   15.64 +    /*
   15.65 +    for ( i=0; i<4; i++ ) {
   15.66 +    	ps->tmr[i] = 0;		// edge trigger 
   15.67 +    }
   15.68 +    */
   15.69 +}
   15.70 +
   15.71 +void vlapic_update_ext_irq(VCPU *vcpu)
   15.72 +{
   15.73 +    int  vec;
   15.74 +    
   15.75 +    vl_apic_info *ps = get_psapic(vcpu);
   15.76 +    while ( (vec = highest_bits(ps->irr)) != NULL_VECTOR ) {
   15.77 +    	clear_bit (vec, ps->irr);
   15.78 +        vmx_vcpu_pend_interrupt(vcpu, vec);
   15.79 +    }
   15.80 +}
   15.81 +#endif
   15.82 +
   15.83  void vlsapic_reset(VCPU *vcpu)
   15.84  {
   15.85      int     i;
   15.86 -    VPD_CR(vcpu, lid) = 0;
   15.87 +#ifdef V_IOSAPIC_READY
   15.88 +    vl_apic_info  *psapic;	// shared lapic inf.
   15.89 +#endif
   15.90 +    
   15.91 +    VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
   15.92      VPD_CR(vcpu, ivr) = 0;
   15.93      VPD_CR(vcpu,tpr) = 0x10000;
   15.94      VPD_CR(vcpu, eoi) = 0;
   15.95 @@ -281,6 +332,10 @@ void vlsapic_reset(VCPU *vcpu)
   15.96      for ( i=0; i<4; i++) {
   15.97          VLSAPIC_INSVC(vcpu,i) = 0;
   15.98      }
   15.99 +#ifdef V_IOSAPIC_READY
  15.100 +    vlapic_update_shared_info(vcpu);
  15.101 +    //vlapic_update_shared_irr(vcpu);
  15.102 +#endif
  15.103      DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
  15.104  }
  15.105  
  15.106 @@ -414,6 +469,7 @@ void vmx_vcpu_pend_interrupt(VCPU *vcpu,
  15.107      }
  15.108      local_irq_save(spsr);
  15.109      VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
  15.110 +    //vlapic_update_shared_irr(vcpu);
  15.111      local_irq_restore(spsr);
  15.112      vcpu->arch.irq_new_pending = 1;
  15.113  }
  15.114 @@ -432,6 +488,7 @@ void vmx_vcpu_pend_batch_interrupt(VCPU 
  15.115      for (i=0 ; i<4; i++ ) {
  15.116          VPD_CR(vcpu,irr[i]) |= pend_irr[i];
  15.117      }
  15.118 +    //vlapic_update_shared_irr(vcpu);
  15.119      local_irq_restore(spsr);
  15.120      vcpu->arch.irq_new_pending = 1;
  15.121  }
  15.122 @@ -518,6 +575,7 @@ uint64_t guest_read_vivr(VCPU *vcpu)
  15.123      VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
  15.124      VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
  15.125      update_vhpi(vcpu, NULL_VECTOR);     // clear VHPI till EOI or IRR write
  15.126 +    //vlapic_update_shared_irr(vcpu);
  15.127      local_irq_restore(spsr);
  15.128      return (uint64_t)vec;
  15.129  }
    16.1 --- a/xen/arch/ia64/vmmu.c	Wed Aug 03 09:35:16 2005 +0000
    16.2 +++ b/xen/arch/ia64/vmmu.c	Wed Aug 03 09:35:38 2005 +0000
    16.3 @@ -145,7 +145,7 @@ static thash_cb_t *init_domain_vhpt(stru
    16.4      thash_cb_t  *vhpt;
    16.5      PTA pta_value;
    16.6      
    16.7 -    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
    16.8 +    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
    16.9      if ( page == NULL ) {
   16.10          panic("No enough contiguous memory for init_domain_mm\n");
   16.11      }
   16.12 @@ -187,7 +187,7 @@ thash_cb_t *init_domain_tlb(struct vcpu 
   16.13      tlb_special_t  *ts;
   16.14      thash_cb_t  *tlb;
   16.15      
   16.16 -    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
   16.17 +    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
   16.18      if ( page == NULL ) {
   16.19          panic("No enough contiguous memory for init_domain_mm\n");
   16.20      }
   16.21 @@ -224,7 +224,7 @@ alloc_pmt(struct domain *d)
   16.22      /* Only called once */
   16.23      ASSERT(d->arch.pmt);
   16.24  
   16.25 -    page = alloc_domheap_pages(NULL, get_order(d->max_pages));
   16.26 +    page = alloc_domheap_pages(NULL, get_order(d->max_pages), 0);
   16.27      ASSERT(page);
   16.28  
   16.29      d->arch.pmt = page_to_virt(page);
    17.1 --- a/xen/arch/ia64/vmx_hypercall.c	Wed Aug 03 09:35:16 2005 +0000
    17.2 +++ b/xen/arch/ia64/vmx_hypercall.c	Wed Aug 03 09:35:38 2005 +0000
    17.3 @@ -29,6 +29,7 @@
    17.4  #include <asm/regionreg.h>
    17.5  #include <asm/page.h>
    17.6  #include <xen/mm.h>
    17.7 +#include <xen/multicall.h>
    17.8  
    17.9  
   17.10  void hyper_not_support(void)
   17.11 @@ -51,6 +52,42 @@ void hyper_mmu_update(void)
   17.12      vmx_vcpu_increment_iip(vcpu);
   17.13  }
   17.14  
   17.15 +unsigned long __hypercall_create_continuation(
   17.16 +    unsigned int op, unsigned int nr_args, ...)
   17.17 +{
   17.18 +    struct mc_state *mcs = &mc_state[smp_processor_id()];
   17.19 +    VCPU *vcpu = current;
   17.20 +    struct cpu_user_regs *regs = vcpu_regs(vcpu);
   17.21 +    unsigned int i;
   17.22 +    va_list args;
   17.23 +
   17.24 +    va_start(args, nr_args);
   17.25 +    if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
   17.26 +	panic("PREEMPT happen in multicall\n");	// Not support yet
   17.27 +    } else {
   17.28 +	vmx_vcpu_set_gr(vcpu, 15, op, 0);
   17.29 +	for ( i = 0; i < nr_args; i++) {
   17.30 +	    switch (i) {
   17.31 +	    case 0: vmx_vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
   17.32 +		    break;
   17.33 +	    case 1: vmx_vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
   17.34 +		    break;
   17.35 +	    case 2: vmx_vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
   17.36 +		    break;
   17.37 +	    case 3: vmx_vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
   17.38 +		    break;
   17.39 +	    case 4: vmx_vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
   17.40 +		    break;
   17.41 +	    default: panic("Too many args for hypercall continuation\n");
   17.42 +		    break;
   17.43 +	    }
   17.44 +	}
   17.45 +    }
   17.46 +    vcpu->arch.hypercall_continuation = 1;
   17.47 +    va_end(args);
   17.48 +    return op;
   17.49 +}
   17.50 +
   17.51  void hyper_dom_mem_op(void)
   17.52  {
   17.53      VCPU *vcpu=current;
   17.54 @@ -65,7 +102,13 @@ void hyper_dom_mem_op(void)
   17.55      printf("do_dom_mem return value: %lx\n", ret);
   17.56      vmx_vcpu_set_gr(vcpu, 8, ret, 0);
   17.57  
   17.58 -    vmx_vcpu_increment_iip(vcpu);
   17.59 +    /* Hard to define a special return value to indicate hypercall restart.
   17.60 +     * So just add a new mark, which is SMP safe
   17.61 +     */
   17.62 +    if (vcpu->arch.hypercall_continuation == 1)
   17.63 +	vcpu->arch.hypercall_continuation = 0;
   17.64 +    else
   17.65 +	vmx_vcpu_increment_iip(vcpu);
   17.66  }
   17.67  
   17.68  
    18.1 --- a/xen/arch/ia64/vmx_ivt.S	Wed Aug 03 09:35:16 2005 +0000
    18.2 +++ b/xen/arch/ia64/vmx_ivt.S	Wed Aug 03 09:35:38 2005 +0000
    18.3 @@ -560,6 +560,21 @@ END(vmx_virtual_exirq)
    18.4  	VMX_DBG_FAULT(19)
    18.5  	VMX_FAULT(19)
    18.6  
    18.7 +    .org vmx_ia64_ivt+0x5000
    18.8 +/////////////////////////////////////////////////////////////////////////////////////////
    18.9 +// 0x5000 Entry 20 (size 16 bundles) Page Not Present
   18.10 +ENTRY(vmx_page_not_present)
   18.11 +	VMX_REFLECT(20)
   18.12 +END(vmx_page_not_present)
   18.13 +
   18.14 +    .org vmx_ia64_ivt+0x5100
   18.15 +/////////////////////////////////////////////////////////////////////////////////////////
   18.16 +// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
   18.17 +ENTRY(vmx_key_permission)
   18.18 +	VMX_REFLECT(21)
   18.19 +END(vmx_key_permission)
   18.20 +
   18.21 +    .org vmx_ia64_ivt+0x5200
   18.22  /////////////////////////////////////////////////////////////////////////////////////////
   18.23  // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
   18.24  ENTRY(vmx_iaccess_rights)
    19.1 --- a/xen/arch/ia64/vmx_support.c	Wed Aug 03 09:35:16 2005 +0000
    19.2 +++ b/xen/arch/ia64/vmx_support.c	Wed Aug 03 09:35:38 2005 +0000
    19.3 @@ -37,18 +37,19 @@ void vmx_wait_io(void)
    19.4      struct vcpu *v = current;
    19.5      struct domain *d = v->domain;
    19.6      extern void do_block();
    19.7 +    int port = iopacket_port(d);
    19.8  
    19.9      do {
   19.10 -	if (!test_bit(IOPACKET_PORT,
   19.11 +	if (!test_bit(port,
   19.12  		&d->shared_info->evtchn_pending[0]))
   19.13  	    do_block();
   19.14  
   19.15  	/* Unblocked when some event is coming. Clear pending indication
   19.16  	 * immediately if deciding to go for io assist
   19.17  	  */
   19.18 -	if (test_and_clear_bit(IOPACKET_PORT,
   19.19 +	if (test_and_clear_bit(port,
   19.20  		&d->shared_info->evtchn_pending[0])) {
   19.21 -	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
   19.22 +	    clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
   19.23  	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   19.24  	    vmx_io_assist(v);
   19.25  	}
   19.26 @@ -66,7 +67,7 @@ void vmx_wait_io(void)
   19.27  	     * nothing losed. Next loop will check I/O channel to fix this
   19.28  	     * window.
   19.29  	     */
   19.30 -	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
   19.31 +	    clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
   19.32  	}
   19.33  	else
   19.34  	    break;
   19.35 @@ -88,7 +89,7 @@ void vmx_io_assist(struct vcpu *v)
   19.36       * This shared page contains I/O request between emulation code
   19.37       * and device model.
   19.38       */
   19.39 -    vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
   19.40 +    vio = get_vio(v->domain, v->vcpu_id);
   19.41      if (!vio)
   19.42  	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
   19.43  
   19.44 @@ -127,6 +128,7 @@ void vmx_intr_assist(struct vcpu *v)
   19.45      struct domain *d = v->domain;
   19.46      extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
   19.47  					unsigned long *pend_irr);
   19.48 +    int port = iopacket_port(d);
   19.49  
   19.50      /* I/O emulation is atomic, so it's impossible to see execution flow
   19.51       * out of vmx_wait_io, when guest is still waiting for response.
   19.52 @@ -135,10 +137,10 @@ void vmx_intr_assist(struct vcpu *v)
   19.53  	panic("!!!Bad resume to guest before I/O emulation is done.\n");
   19.54  
   19.55      /* Clear indicator specific to interrupt delivered from DM */
   19.56 -    if (test_and_clear_bit(IOPACKET_PORT,
   19.57 +    if (test_and_clear_bit(port,
   19.58  		&d->shared_info->evtchn_pending[0])) {
   19.59 -	if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5])
   19.60 -	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
   19.61 +	if (!d->shared_info->evtchn_pending[port >> 5])
   19.62 +	    clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
   19.63  
   19.64  	if (!v->vcpu_info->evtchn_pending_sel)
   19.65  	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   19.66 @@ -149,11 +151,14 @@ void vmx_intr_assist(struct vcpu *v)
   19.67       * shares same event channel as I/O emulation, with corresponding
   19.68       * indicator possibly cleared when vmx_wait_io().
   19.69       */
   19.70 -    vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
   19.71 +    vio = get_vio(v->domain, v->vcpu_id);
   19.72      if (!vio)
   19.73  	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
   19.74  
   19.75 -    vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]); 
   19.76 -    memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr));
   19.77 +#ifdef V_IOSAPIC_READY
   19.78 +    vlapic_update_ext_irq(v);
   19.79 +#else
   19.80 +    panic("IOSAPIC model is missed in qemu\n");
   19.81 +#endif
   19.82      return;
   19.83  }
    20.1 --- a/xen/arch/ia64/vmx_vcpu.c	Wed Aug 03 09:35:16 2005 +0000
    20.2 +++ b/xen/arch/ia64/vmx_vcpu.c	Wed Aug 03 09:35:38 2005 +0000
    20.3 @@ -23,7 +23,7 @@
    20.4   *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
    20.5   */
    20.6  
    20.7 -#include <linux/sched.h>
    20.8 +#include <xen/sched.h>
    20.9  #include <public/arch-ia64.h>
   20.10  #include <asm/ia64_int.h>
   20.11  #include <asm/vmx_vcpu.h>
   20.12 @@ -201,7 +201,7 @@ vmx_vcpu_get_vtlb(VCPU *vcpu)
   20.13  struct virutal_platform_def *
   20.14  vmx_vcpu_get_plat(VCPU *vcpu)
   20.15  {
   20.16 -    return &(vcpu->arch.arch_vmx.vmx_platform);
   20.17 +    return &(vcpu->domain->arch.vmx_platform);
   20.18  }
   20.19  
   20.20  
   20.21 @@ -213,7 +213,6 @@ ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 va
   20.22  
   20.23  IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
   20.24  {
   20.25 -    extern void set_one_rr(UINT64, UINT64);
   20.26      ia64_rr oldrr,newrr;
   20.27      thash_cb_t *hcb;
   20.28      oldrr=vmx_vcpu_rr(vcpu,reg);
   20.29 @@ -375,7 +374,7 @@ IA64FAULT
   20.30  vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
   20.31  {
   20.32      REGS *regs=vcpu_regs(vcpu);
   20.33 -    u64 nat;
   20.34 +    int nat;
   20.35      //TODO, Eddie
   20.36      if (!regs) return 0;
   20.37      if (reg >= 16 && reg < 32) {
    21.1 --- a/xen/arch/ia64/vmx_virt.c	Wed Aug 03 09:35:16 2005 +0000
    21.2 +++ b/xen/arch/ia64/vmx_virt.c	Wed Aug 03 09:35:38 2005 +0000
    21.3 @@ -1193,7 +1193,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
    21.4          case 23:return vmx_vcpu_set_ifs(vcpu,r2);
    21.5          case 24:return vmx_vcpu_set_iim(vcpu,r2);
    21.6          case 25:return vmx_vcpu_set_iha(vcpu,r2);
    21.7 -        case 64:return vmx_vcpu_set_lid(vcpu,r2);
    21.8 +        case 64:printk("SET LID to 0x%lx\n", r2);
    21.9 +		return vmx_vcpu_set_lid(vcpu,r2);
   21.10          case 65:return IA64_NO_FAULT;
   21.11          case 66:return vmx_vcpu_set_tpr(vcpu,r2);
   21.12          case 67:return vmx_vcpu_set_eoi(vcpu,r2);
   21.13 @@ -1253,9 +1254,9 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
   21.14          case 23:return cr_get(ifs);
   21.15          case 24:return cr_get(iim);
   21.16          case 25:return cr_get(iha);
   21.17 -	case 64:val = ia64_getreg(_IA64_REG_CR_LID);
   21.18 -	     return vmx_vcpu_set_gr(vcpu,tgt,val,0);
   21.19 -//        case 64:return cr_get(lid);
   21.20 +//	case 64:val = ia64_getreg(_IA64_REG_CR_LID);
   21.21 +//	     return vmx_vcpu_set_gr(vcpu,tgt,val,0);
   21.22 +        case 64:return cr_get(lid);
   21.23          case 65:
   21.24               vmx_vcpu_get_ivr(vcpu,&val);
   21.25               return vmx_vcpu_set_gr(vcpu,tgt,val,0);
    22.1 --- a/xen/arch/ia64/vtlb.c	Wed Aug 03 09:35:16 2005 +0000
    22.2 +++ b/xen/arch/ia64/vtlb.c	Wed Aug 03 09:35:38 2005 +0000
    22.3 @@ -23,6 +23,7 @@
    22.4  
    22.5  #include <linux/sched.h>
    22.6  #include <asm/tlb.h>
    22.7 +#include <asm/mm.h>
    22.8  #include <asm/vmx_mm_def.h>
    22.9  #include <asm/gcc_intrin.h>
   22.10  #include <xen/interrupt.h>
   22.11 @@ -359,7 +360,10 @@ thash_data_t *__alloc_chain(thash_cb_t *
   22.12  void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   22.13  {
   22.14      thash_data_t    *hash_table, *cch;
   22.15 +    int flag;
   22.16      rr_t  vrr;
   22.17 +    u64 gppn;
   22.18 +    u64 ppns, ppne;
   22.19      
   22.20      hash_table = (hcb->hash_func)(hcb->pta,
   22.21                          va, entry->rid, entry->ps);
   22.22 @@ -375,7 +379,18 @@ void vtlb_insert(thash_cb_t *hcb, thash_
   22.23          *hash_table = *entry;
   22.24          hash_table->next = cch;
   22.25      }
   22.26 -    thash_insert (hcb->ts->vhpt, entry, va);
   22.27 +    if(hcb->vcpu->domain->domain_id==0){
   22.28 +       thash_insert(hcb->ts->vhpt, entry, va);
   22.29 +        return;
   22.30 +    }
   22.31 +    flag = 1;
   22.32 +    gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
   22.33 +    ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
   22.34 +    ppne = ppns + PSIZE(entry->ps);
   22.35 +    if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
   22.36 +        flag = 0;
   22.37 +    if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
   22.38 +       thash_insert(hcb->ts->vhpt, entry, va);
   22.39      return ;
   22.40  }
   22.41  
   22.42 @@ -427,18 +442,22 @@ static void rem_thash(thash_cb_t *hcb, t
   22.43      thash_data_t    *hash_table, *p, *q;
   22.44      thash_internal_t *priv = &hcb->priv;
   22.45      int idx;
   22.46 -    
   22.47 +
   22.48      hash_table = priv->hash_base;
   22.49      if ( hash_table == entry ) {
   22.50 -        __rem_hash_head (hcb, entry);
   22.51 +//        if ( PURGABLE_ENTRY(hcb, entry) ) {
   22.52 +            __rem_hash_head (hcb, entry);
   22.53 +//        }
   22.54          return ;
   22.55      }
   22.56      // remove from collision chain
   22.57      p = hash_table;
   22.58      for ( q=p->next; q; q = p->next ) {
   22.59 -        if ( q == entry ) {
   22.60 -            p->next = q->next;
   22.61 -            __rem_chain(hcb, entry);
   22.62 +        if ( q == entry ){
   22.63 +//            if ( PURGABLE_ENTRY(hcb,q ) ) {
   22.64 +                p->next = q->next;
   22.65 +                __rem_chain(hcb, entry);
   22.66 +//            }
   22.67              return ;
   22.68          }
   22.69          p = q;
   22.70 @@ -939,7 +958,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
   22.71      if ( sanity_check == 0 ) return;
   22.72      sanity_check --;
   22.73      s_sect.v = 0;
   22.74 -//    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
   22.75 +//    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
   22.76  //    if ( page == NULL ) {
   22.77  //        panic("No enough contiguous memory for init_domain_mm\n");
   22.78  //    };
    23.1 --- a/xen/arch/ia64/xenasm.S	Wed Aug 03 09:35:16 2005 +0000
    23.2 +++ b/xen/arch/ia64/xenasm.S	Wed Aug 03 09:35:38 2005 +0000
    23.3 @@ -48,10 +48,11 @@ END(platform_is_hp_ski)
    23.4  // FIXME? Note that this turns off the DB bit (debug)
    23.5  #define PSR_BITS_TO_SET	IA64_PSR_BN
    23.6  
    23.7 +//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
    23.8  GLOBAL_ENTRY(ia64_new_rr7)
    23.9  	// not sure this unwind statement is correct...
   23.10  	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
   23.11 -	alloc loc1 = ar.pfs, 2, 7, 0, 0
   23.12 +	alloc loc1 = ar.pfs, 3, 8, 0, 0
   23.13  1:	{
   23.14  	  mov r28  = in0		// copy procedure index
   23.15  	  mov r8   = ip			// save ip to compute branch
   23.16 @@ -72,6 +73,10 @@ 1:	{
   23.17  	;;
   23.18  	tpa loc5=loc5			// grab this BEFORE changing rr7
   23.19  	;;
   23.20 +	mov loc7=in2			// arch_vcpu_info_t
   23.21 +	;;
   23.22 +	tpa loc7=loc7			// grab this BEFORE changing rr7
   23.23 +	;;
   23.24  	mov loc3 = psr			// save psr
   23.25  	adds r8  = 1f-1b,r8		// calculate return address for call
   23.26  	;;
   23.27 @@ -206,6 +211,25 @@ 1:
   23.28  	;;
   23.29  	itr.d dtr[r25]=r23		// wire in new mapping...
   23.30  	;;
   23.31 +	// Map for arch_vcpu_info_t
   23.32 +	movl r22=SHARED_ARCHINFO_ADDR
   23.33 +	;;
   23.34 +	movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
   23.35 +	;;
   23.36 +	mov r21=loc7			// saved sharedinfo physical address
   23.37 +	;;
   23.38 +	or r23=r25,r21			// construct PA | page properties
   23.39 +	mov r24=PAGE_SHIFT<<2
   23.40 +	;;
   23.41 +	ptr.d	r22,r24
   23.42 +	;;
   23.43 +	mov cr.itir=r24
   23.44 +	mov cr.ifa=r22
   23.45 +	;;
   23.46 +	mov r25=IA64_TR_ARCH_INFO
   23.47 +	;;
   23.48 +	itr.d dtr[r25]=r23		// wire in new mapping...
   23.49 +	;;
   23.50  
   23.51  	// done, switch back to virtual and return
   23.52  	mov r16=loc3			// r16= original psr
   23.53 @@ -278,12 +302,9 @@ GLOBAL_ENTRY(__get_domain_bundle)
   23.54  END(__get_domain_bundle)
   23.55  
   23.56  GLOBAL_ENTRY(dorfirfi)
   23.57 -#define SI_CR_IIP_OFFSET 0x10
   23.58 -#define SI_CR_IPSR_OFFSET 0x08
   23.59 -#define SI_CR_IFS_OFFSET 0x18
   23.60 -        movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
   23.61 -        movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
   23.62 -        movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
   23.63 +        movl r16 = XSI_IIP
   23.64 +        movl r17 = XSI_IPSR
   23.65 +        movl r18 = XSI_IFS
   23.66  	;;
   23.67  	ld8 r16 = [r16]
   23.68  	ld8 r17 = [r17]
    24.1 --- a/xen/arch/ia64/xenmem.c	Wed Aug 03 09:35:16 2005 +0000
    24.2 +++ b/xen/arch/ia64/xenmem.c	Wed Aug 03 09:35:38 2005 +0000
    24.3 @@ -65,7 +65,7 @@ paging_init (void)
    24.4  #else // CONFIG_VTI
    24.5  
    24.6  	/* Allocate and map the machine-to-phys table */
    24.7 -	if ((pg = alloc_domheap_pages(NULL, 10)) == NULL)
    24.8 +	if ((pg = alloc_domheap_pages(NULL, 10, 0)) == NULL)
    24.9  		panic("Not enough memory to bootstrap Xen.\n");
   24.10  	memset(page_to_virt(pg), 0x55, 16UL << 20);
   24.11  #endif // CONFIG_VTI
    25.1 --- a/xen/arch/ia64/xenmisc.c	Wed Aug 03 09:35:16 2005 +0000
    25.2 +++ b/xen/arch/ia64/xenmisc.c	Wed Aug 03 09:35:38 2005 +0000
    25.3 @@ -103,11 +103,13 @@ while(1);
    25.4  }
    25.5  #endif
    25.6  
    25.7 +#ifndef CONFIG_VTI
    25.8  unsigned long __hypercall_create_continuation(
    25.9  	unsigned int op, unsigned int nr_args, ...)
   25.10  {
   25.11  	printf("__hypercall_create_continuation: not implemented!!!\n");
   25.12  }
   25.13 +#endif
   25.14  
   25.15  ///////////////////////////////
   25.16  
   25.17 @@ -115,14 +117,17 @@ unsigned long __hypercall_create_continu
   25.18  // from arch/x86/apic.c
   25.19  ///////////////////////////////
   25.20  
   25.21 +extern unsigned long domain0_ready;
   25.22 +
   25.23  int reprogram_ac_timer(s_time_t timeout)
   25.24  {
   25.25  	struct vcpu *v = current;
   25.26  
   25.27  #ifdef CONFIG_VTI
   25.28 -	if(VMX_DOMAIN(v))
   25.29 +//	if(VMX_DOMAIN(v))
   25.30  		return 1;
   25.31  #endif // CONFIG_VTI
   25.32 +	if (!domain0_ready) return 1;
   25.33  	local_cpu_data->itm_next = timeout;
   25.34  	if (is_idle_task(v->domain)) vcpu_safe_set_itm(timeout);
   25.35  	else vcpu_set_next_timer(current);
   25.36 @@ -177,6 +182,22 @@ void show_registers(struct pt_regs *regs
   25.37  	printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
   25.38  }
   25.39  
   25.40 +int is_kernel_text(unsigned long addr)
   25.41 +{
   25.42 +	extern char _stext[], _etext[];
   25.43 +	if (addr >= (unsigned long) _stext &&
   25.44 +	    addr <= (unsigned long) _etext)
   25.45 +	    return 1;
   25.46 +
   25.47 +	return 0;
   25.48 +}
   25.49 +
   25.50 +unsigned long kernel_text_end(void)
   25.51 +{
   25.52 +	extern char _etext[];
   25.53 +	return (unsigned long) _etext;
   25.54 +}
   25.55 +
   25.56  ///////////////////////////////
   25.57  // from common/keyhandler.c
   25.58  ///////////////////////////////
   25.59 @@ -291,8 +312,8 @@ void context_switch(struct vcpu *prev, s
   25.60  static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
   25.61  static int i = 100;
   25.62  int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
   25.63 -if (!cnt[id]--) { printk("%x",id); cnt[id] = 500; }
   25.64 -if (!i--) { printk("+",id); cnt[id] = 1000; }
   25.65 +if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
   25.66 +if (!i--) { printk("+",id); i = 1000000; }
   25.67  }
   25.68  	clear_bit(_VCPUF_running, &prev->vcpu_flags);
   25.69  	//if (!is_idle_task(next->domain) )
    26.1 --- a/xen/arch/ia64/xensetup.c	Wed Aug 03 09:35:16 2005 +0000
    26.2 +++ b/xen/arch/ia64/xensetup.c	Wed Aug 03 09:35:38 2005 +0000
    26.3 @@ -136,6 +136,12 @@ struct ns16550_defaults ns16550_com1 = {
    26.4      .stop_bits = 1
    26.5  };
    26.6  
    26.7 +struct ns16550_defaults ns16550_com2 = {
    26.8 +    .data_bits = 8,
    26.9 +    .parity    = 'n',
   26.10 +    .stop_bits = 1
   26.11 +};
   26.12 +
   26.13  void start_kernel(void)
   26.14  {
   26.15      unsigned char *cmdline;
   26.16 @@ -158,7 +164,13 @@ void start_kernel(void)
   26.17  
   26.18      /* We initialise the serial devices very early so we can get debugging. */
   26.19      if (running_on_sim) hpsim_serial_init();
   26.20 -    else ns16550_init(0, &ns16550_com1);
   26.21 +    else {
   26.22 +	ns16550_init(0, &ns16550_com1);
   26.23 +	/* Also init com2 for Tiger4. */
   26.24 +	ns16550_com2.io_base = 0x2f8;
   26.25 +	ns16550_com2.irq     = 3;
   26.26 +	ns16550_init(1, &ns16550_com2);
   26.27 +    }
   26.28      serial_init_preirq();
   26.29  
   26.30      init_console();
    27.1 --- a/xen/arch/ia64/xentime.c	Wed Aug 03 09:35:16 2005 +0000
    27.2 +++ b/xen/arch/ia64/xentime.c	Wed Aug 03 09:35:38 2005 +0000
    27.3 @@ -27,6 +27,7 @@
    27.4  #include <asm/sections.h>
    27.5  #include <asm/system.h>
    27.6  #ifdef XEN
    27.7 +#include <asm/vcpu.h>
    27.8  #include <linux/jiffies.h>	// not included by xen/sched.h
    27.9  #endif
   27.10  #include <xen/softirq.h>
   27.11 @@ -143,8 +144,8 @@ xen_timer_interrupt (int irq, void *dev_
   27.12  	if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
   27.13  		printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
   27.14  			regs->cr_iip,
   27.15 -			current->vcpu_info->arch.interrupt_delivery_enabled,
   27.16 -			current->vcpu_info->arch.pending_interruption);
   27.17 +			VCPU(current,interrupt_delivery_enabled),
   27.18 +			VCPU(current,pending_interruption));
   27.19  		count = 0;
   27.20  	}
   27.21  #endif
   27.22 @@ -159,7 +160,7 @@ xen_timer_interrupt (int irq, void *dev_
   27.23  		// We have to ensure that domain0 is launched before we
   27.24  		// call vcpu_timer_expired on it
   27.25  		//domain0_ready = 1; // moved to xensetup.c
   27.26 -		current->vcpu_info->arch.pending_interruption = 1;
   27.27 +		VCPU(current,pending_interruption) = 1;
   27.28  	}
   27.29  	if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
   27.30  		vcpu_pend_timer(dom0->vcpu[0]);
    28.1 --- a/xen/common/xmalloc.c	Wed Aug 03 09:35:16 2005 +0000
    28.2 +++ b/xen/common/xmalloc.c	Wed Aug 03 09:35:38 2005 +0000
    28.3 @@ -111,7 +111,9 @@ void *_xmalloc(size_t size, size_t align
    28.4      unsigned long flags;
    28.5  
    28.6      /* We currently always return cacheline aligned. */
    28.7 +#ifndef __ia64__
    28.8      BUG_ON(align > SMP_CACHE_BYTES);
    28.9 +#endif
   28.10  
   28.11      /* Add room for header, pad to align next header. */
   28.12      size += sizeof(struct xmalloc_hdr);
    29.1 --- a/xen/include/asm-ia64/config.h	Wed Aug 03 09:35:16 2005 +0000
    29.2 +++ b/xen/include/asm-ia64/config.h	Wed Aug 03 09:35:38 2005 +0000
    29.3 @@ -230,6 +230,7 @@ struct screen_info { };
    29.4  
    29.5  #define FORCE_CRASH()	asm("break 0;;");
    29.6  
    29.7 +void dummy_called(char *function);
    29.8  #define dummy()	dummy_called(__FUNCTION__)
    29.9  
   29.10  // these declarations got moved at some point, find a better place for them
    30.1 --- a/xen/include/asm-ia64/domain.h	Wed Aug 03 09:35:16 2005 +0000
    30.2 +++ b/xen/include/asm-ia64/domain.h	Wed Aug 03 09:35:38 2005 +0000
    30.3 @@ -8,6 +8,7 @@
    30.4  #include <asm/vmmu.h>
    30.5  #include <asm/regionreg.h>
    30.6  #include <public/arch-ia64.h>
    30.7 +#include <asm/vmx_platform.h>
    30.8  #endif // CONFIG_VTI
    30.9  #include <xen/list.h>
   30.10  
   30.11 @@ -42,6 +43,7 @@ struct arch_domain {
   30.12       * max_pages in domain struct, which indicates maximum memory size
   30.13       */
   30.14      unsigned long max_pfn;
   30.15 +    struct virutal_platform_def     vmx_platform;
   30.16  #endif  //CONFIG_VTI
   30.17      u64 xen_vastart;
   30.18      u64 xen_vaend;
   30.19 @@ -88,6 +90,7 @@ struct arch_vcpu {
   30.20      thash_cb_t *vtlb;
   30.21      char irq_new_pending;
   30.22      char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
   30.23 +    char hypercall_continuation;
   30.24      //for phycial  emulation
   30.25      unsigned long old_rsc;
   30.26      int mode_flags;
    31.1 --- a/xen/include/asm-ia64/event.h	Wed Aug 03 09:35:16 2005 +0000
    31.2 +++ b/xen/include/asm-ia64/event.h	Wed Aug 03 09:35:38 2005 +0000
    31.3 @@ -9,6 +9,9 @@
    31.4  #ifndef __ASM_EVENT_H__
    31.5  #define __ASM_EVENT_H__
    31.6  
    31.7 +#include <public/arch-ia64.h>
    31.8 +#include <asm/vcpu.h>
    31.9 +
   31.10  static inline void evtchn_notify(struct vcpu *v)
   31.11  {
   31.12  	vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector);
    32.1 --- a/xen/include/asm-ia64/ia64_int.h	Wed Aug 03 09:35:16 2005 +0000
    32.2 +++ b/xen/include/asm-ia64/ia64_int.h	Wed Aug 03 09:35:38 2005 +0000
    32.3 @@ -37,7 +37,9 @@
    32.4  #define	IA64_RFI_IN_PROGRESS	0x0002
    32.5  #define IA64_RETRY              0x0003
    32.6  #ifdef  CONFIG_VTI
    32.7 -#define IA64_FAULT		0x0002
    32.8 +#undef  IA64_NO_FAULT
    32.9 +#define	IA64_NO_FAULT		0x0000
   32.10 +#define IA64_FAULT		0x0001
   32.11  #endif      //CONFIG_VTI
   32.12  #define IA64_FORCED_IFA         0x0004
   32.13  #define	IA64_ILLOP_FAULT	(IA64_GENEX_VECTOR | 0x00)
    33.1 --- a/xen/include/asm-ia64/privop.h	Wed Aug 03 09:35:16 2005 +0000
    33.2 +++ b/xen/include/asm-ia64/privop.h	Wed Aug 03 09:35:38 2005 +0000
    33.3 @@ -138,14 +138,32 @@ typedef union U_INST64_M47 {
    33.4      IA64_INST inst;
    33.5      struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
    33.6  } INST64_M47;
    33.7 +
    33.8  typedef union U_INST64_M1{
    33.9      IA64_INST inst;
   33.10      struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   33.11  } INST64_M1;
   33.12 +
   33.13 +typedef union U_INST64_M2{
   33.14 +    IA64_INST inst;
   33.15 +    struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   33.16 +} INST64_M2;
   33.17 +
   33.18 +typedef union U_INST64_M3{
   33.19 +    IA64_INST inst;
   33.20 +    struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
   33.21 +} INST64_M3;
   33.22 +
   33.23  typedef union U_INST64_M4 {
   33.24      IA64_INST inst;
   33.25      struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   33.26  } INST64_M4;
   33.27 +
   33.28 +typedef union U_INST64_M5 {
   33.29 +    IA64_INST inst;
   33.30 +    struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
   33.31 +} INST64_M5;
   33.32 +
   33.33  typedef union U_INST64_M6 {
   33.34      IA64_INST inst;
   33.35      struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   33.36 @@ -166,7 +184,10 @@ typedef union U_INST64 {
   33.37      INST64_I28 I28;	// mov from ar (I unit)
   33.38  #ifdef CONFIG_VTI
   33.39      INST64_M1  M1;  // ld integer
   33.40 +    INST64_M2  M2;
   33.41 +    INST64_M3  M3;
   33.42      INST64_M4  M4;  // st integer
   33.43 +    INST64_M5  M5;
   33.44      INST64_M6  M6;  // ldfd floating pointer
   33.45  #endif // CONFIG_VTI
   33.46      INST64_M28 M28;	// purge translation cache entry
    34.1 --- a/xen/include/asm-ia64/regionreg.h	Wed Aug 03 09:35:16 2005 +0000
    34.2 +++ b/xen/include/asm-ia64/regionreg.h	Wed Aug 03 09:35:38 2005 +0000
    34.3 @@ -39,4 +39,7 @@ typedef union ia64_rr {
    34.4  #define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8)
    34.5  #define RR_RID_MASK 0x00000000ffffff00L
    34.6  
    34.7 +
    34.8 +int set_one_rr(unsigned long rr, unsigned long val);
    34.9 +
   34.10  #endif		/* !_REGIONREG_H_ */
    35.1 --- a/xen/include/asm-ia64/vcpu.h	Wed Aug 03 09:35:16 2005 +0000
    35.2 +++ b/xen/include/asm-ia64/vcpu.h	Wed Aug 03 09:35:38 2005 +0000
    35.3 @@ -13,13 +13,9 @@ typedef	int BOOLEAN;
    35.4  struct vcpu;
    35.5  typedef	struct vcpu VCPU;
    35.6  
    35.7 -// NOTE: The actual VCPU structure (struct virtualcpu) is defined in
    35.8 -// thread.h.  Moving it to here caused a lot of files to change, so
    35.9 -// for now, we'll leave well enough alone.
   35.10  typedef struct pt_regs REGS;
   35.11 -//#define PSCB(vcpu)		(((struct spk_thread_t *)vcpu)->pscb)
   35.12 -//#define vcpu_regs(vcpu)		&((struct spk_thread_t *)vcpu)->thread_regs
   35.13 -//#define vcpu_thread(vcpu)	((struct spk_thread_t *)vcpu)
   35.14 +
   35.15 +#define VCPU(_v,_x)	_v->vcpu_info->arch.privregs->_x
   35.16  
   35.17  #define PRIVOP_ADDR_COUNT
   35.18  #ifdef PRIVOP_ADDR_COUNT
    36.1 --- a/xen/include/asm-ia64/vmx.h	Wed Aug 03 09:35:16 2005 +0000
    36.2 +++ b/xen/include/asm-ia64/vmx.h	Wed Aug 03 09:35:38 2005 +0000
    36.3 @@ -23,6 +23,7 @@
    36.4  #define _ASM_IA64_VT_H
    36.5  
    36.6  #define RR7_SWITCH_SHIFT	12	/* 4k enough */
    36.7 +#include <public/io/ioreq.h>
    36.8  
    36.9  extern void identify_vmx_feature(void);
   36.10  extern unsigned int vmx_enabled;
   36.11 @@ -35,6 +36,22 @@ extern vmx_insert_double_mapping(u64,u64
   36.12  extern void vmx_purge_double_mapping(u64, u64, u64);
   36.13  extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
   36.14  
   36.15 +
   36.16  extern void vmx_wait_io(void);
   36.17  extern void vmx_io_assist(struct vcpu *v);
   36.18 +
   36.19 +static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
   36.20 +{
   36.21 +    return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
   36.22 +}
   36.23 +
   36.24 +static inline int iopacket_port(struct domain *d)
   36.25 +{
   36.26 +    return ((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->sp_global.eport;
   36.27 +}
   36.28 +
   36.29 +static inline shared_iopage_t *get_sp(struct domain *d)
   36.30 +{
   36.31 +    return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va;
   36.32 +}
   36.33  #endif /* _ASM_IA64_VT_H */
    37.1 --- a/xen/include/asm-ia64/vmx_uaccess.h	Wed Aug 03 09:35:16 2005 +0000
    37.2 +++ b/xen/include/asm-ia64/vmx_uaccess.h	Wed Aug 03 09:35:38 2005 +0000
    37.3 @@ -40,6 +40,8 @@
    37.4   */
    37.5  asm (".section \"__ex_table\", \"a\"\n\t.previous");
    37.6  
    37.7 +/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
    37.8 +#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
    37.9  /* For back compatibility */
   37.10  #define __access_ok(addr, size, segment)	1
   37.11  #define access_ok(addr, size, segment)	__access_ok((addr), (size), (segment))
    38.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Wed Aug 03 09:35:16 2005 +0000
    38.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Wed Aug 03 09:35:38 2005 +0000
    38.3 @@ -105,6 +105,10 @@ extern void vtm_set_itv(VCPU *vcpu);
    38.4  extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
    38.5  extern void vtm_domain_out(VCPU *vcpu);
    38.6  extern void vtm_domain_in(VCPU *vcpu);
    38.7 +#ifdef V_IOSAPIC_READY
    38.8 +extern void vlapic_update_ext_irq(VCPU *vcpu);
    38.9 +extern void vlapic_update_shared_info(VCPU *vcpu);
   38.10 +#endif
   38.11  extern void vlsapic_reset(VCPU *vcpu);
   38.12  extern int vmx_check_pending_irq(VCPU *vcpu);
   38.13  extern void guest_write_eoi(VCPU *vcpu);
   38.14 @@ -399,6 +403,9 @@ IA64FAULT
   38.15  vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
   38.16  {
   38.17      VPD_CR(vcpu,lid)=val;
   38.18 +#ifdef V_IOSAPIC_READY
   38.19 +    vlapic_update_shared_info(vcpu);
   38.20 +#endif
   38.21      return IA64_NO_FAULT;
   38.22  }
   38.23  extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
    39.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Wed Aug 03 09:35:16 2005 +0000
    39.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Wed Aug 03 09:35:38 2005 +0000
    39.3 @@ -25,37 +25,10 @@
    39.4  #ifndef __ASSEMBLY__
    39.5  
    39.6  #include <asm/vtm.h>
    39.7 -#include <asm/vmx_platform.h>
    39.8  #include <public/arch-ia64.h>
    39.9  
   39.10  #define VPD_SHIFT	17	/* 128K requirement */
   39.11  #define VPD_SIZE	(1 << VPD_SHIFT)
   39.12 -typedef union {
   39.13 -	unsigned long value;
   39.14 -	struct {
   39.15 -		int 	a_int:1;
   39.16 -		int 	a_from_int_cr:1;
   39.17 -		int	a_to_int_cr:1;
   39.18 -		int	a_from_psr:1;
   39.19 -		int	a_from_cpuid:1;
   39.20 -		int	a_cover:1;
   39.21 -		int	a_bsw:1;
   39.22 -		long	reserved:57;
   39.23 -	};
   39.24 -} vac_t;
   39.25 -
   39.26 -typedef union {
   39.27 -	unsigned long value;
   39.28 -	struct {
   39.29 -		int 	d_vmsw:1;
   39.30 -		int 	d_extint:1;
   39.31 -		int	d_ibr_dbr:1;
   39.32 -		int	d_pmc:1;
   39.33 -		int	d_to_pmd:1;
   39.34 -		int	d_itm:1;
   39.35 -		long	reserved:58;
   39.36 -	};
   39.37 -} vdc_t;
   39.38  
   39.39  typedef struct {
   39.40  	unsigned long	dcr;		// CR0
   39.41 @@ -89,29 +62,6 @@ typedef struct {
   39.42  	unsigned long	rsv6[46];
   39.43  } cr_t;
   39.44  
   39.45 -typedef struct vpd {
   39.46 -	vac_t			vac;
   39.47 -	vdc_t			vdc;
   39.48 -	unsigned long		virt_env_vaddr;
   39.49 -	unsigned long		reserved1[29];
   39.50 -	unsigned long		vhpi;
   39.51 -	unsigned long		reserved2[95];
   39.52 -	unsigned long		vgr[16];
   39.53 -	unsigned long		vbgr[16];
   39.54 -	unsigned long		vnat;
   39.55 -	unsigned long		vbnat;
   39.56 -	unsigned long		vcpuid[5];
   39.57 -	unsigned long		reserved3[11];
   39.58 -	unsigned long		vpsr;
   39.59 -	unsigned long		vpr;
   39.60 -	unsigned long		reserved4[76];
   39.61 -	unsigned long		vcr[128];
   39.62 -	unsigned long		reserved5[128];
   39.63 -	unsigned long		reserved6[3456];
   39.64 -	unsigned long		vmm_avail[128];
   39.65 -	unsigned long		reserved7[4096];
   39.66 -} vpd_t;
   39.67 -
   39.68  void vmx_enter_scheduler(void);
   39.69  
   39.70  //FIXME: Map for LID to vcpu, Eddie
   39.71 @@ -133,7 +83,6 @@ struct arch_vmx_struct {
   39.72      unsigned long   rfi_ipsr;
   39.73      unsigned long   rfi_ifs;
   39.74  	unsigned long	in_service[4];	// vLsapic inservice IRQ bits
   39.75 -	struct virutal_platform_def     vmx_platform;
   39.76  	unsigned long   flags;
   39.77  };
   39.78  
   39.79 @@ -175,7 +124,6 @@ extern unsigned int opt_vmx_debug_level;
   39.80  
   39.81  #endif //__ASSEMBLY__
   39.82  
   39.83 -
   39.84  // VPD field offset
   39.85  #define VPD_VAC_START_OFFSET		0
   39.86  #define VPD_VDC_START_OFFSET		8
    40.1 --- a/xen/include/asm-ia64/xensystem.h	Wed Aug 03 09:35:16 2005 +0000
    40.2 +++ b/xen/include/asm-ia64/xensystem.h	Wed Aug 03 09:35:38 2005 +0000
    40.3 @@ -21,10 +21,13 @@
    40.4  #define XEN_RR7_SWITCH_STUB	 0xb700000000000000
    40.5  #endif // CONFIG_VTI
    40.6  
    40.7 +#define XEN_START_ADDR		 0xf000000000000000
    40.8  #define KERNEL_START		 0xf000000004000000
    40.9  #define PERCPU_ADDR		 0xf100000000000000-PERCPU_PAGE_SIZE
   40.10  #define SHAREDINFO_ADDR		 0xf100000000000000
   40.11  #define VHPT_ADDR		 0xf200000000000000
   40.12 +#define SHARED_ARCHINFO_ADDR	 0xf300000000000000
   40.13 +#define XEN_END_ADDR		 0xf400000000000000
   40.14  
   40.15  #ifndef __ASSEMBLY__
   40.16  
   40.17 @@ -58,8 +61,9 @@ extern struct task_struct *vmx_ia64_swit
   40.18  		ia64_save_extra(prev);								 \
   40.19  	if (IA64_HAS_EXTRA_STATE(next))								 \
   40.20  		ia64_load_extra(next);								 \
   40.21 -	ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
   40.22 +	/*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/			 \
   40.23  	(last) = ia64_switch_to((next));							 \
   40.24 +	vcpu_set_next_timer(current);								\
   40.25  } while (0)
   40.26  #endif // CONFIG_VTI
   40.27  
    41.1 --- a/xen/include/public/arch-ia64.h	Wed Aug 03 09:35:16 2005 +0000
    41.2 +++ b/xen/include/public/arch-ia64.h	Wed Aug 03 09:35:38 2005 +0000
    41.3 @@ -140,38 +140,121 @@ struct pt_regs {
    41.4  	struct pt_fpreg f11;		/* scratch */
    41.5  };
    41.6  
    41.7 +typedef union {
    41.8 +	unsigned long value;
    41.9 +	struct {
   41.10 +		int 	a_int:1;
   41.11 +		int 	a_from_int_cr:1;
   41.12 +		int	a_to_int_cr:1;
   41.13 +		int	a_from_psr:1;
   41.14 +		int	a_from_cpuid:1;
   41.15 +		int	a_cover:1;
   41.16 +		int	a_bsw:1;
   41.17 +		long	reserved:57;
   41.18 +	};
   41.19 +} vac_t;
   41.20 +
   41.21 +typedef union {
   41.22 +	unsigned long value;
   41.23 +	struct {
   41.24 +		int 	d_vmsw:1;
   41.25 +		int 	d_extint:1;
   41.26 +		int	d_ibr_dbr:1;
   41.27 +		int	d_pmc:1;
   41.28 +		int	d_to_pmd:1;
   41.29 +		int	d_itm:1;
   41.30 +		long	reserved:58;
   41.31 +	};
   41.32 +} vdc_t;
   41.33 +
   41.34  typedef struct {
   41.35 -	unsigned long ipsr;
   41.36 -	unsigned long iip;
   41.37 -	unsigned long ifs;
   41.38 -	unsigned long precover_ifs;
   41.39 -	unsigned long isr;
   41.40 -	unsigned long ifa;
   41.41 -	unsigned long iipa;
   41.42 -	unsigned long iim;
   41.43 -	unsigned long unat;  // not sure if this is needed until NaT arch is done
   41.44 -	unsigned long tpr;
   41.45 -	unsigned long iha;
   41.46 -	unsigned long itir;
   41.47 -	unsigned long itv;
   41.48 -	unsigned long pmv;
   41.49 -	unsigned long cmcv;
   41.50 -	unsigned long pta;
   41.51 -	int interrupt_collection_enabled; // virtual psr.ic
   41.52 -	int interrupt_delivery_enabled; // virtual psr.i
   41.53 -	int pending_interruption;
   41.54 -	int incomplete_regframe;	// see SDM vol2 6.8
   41.55 -	unsigned long delivery_mask[4];
   41.56 -	int metaphysical_mode;	// 1 = use metaphys mapping, 0 = use virtual
   41.57 -	int banknum;	// 0 or 1, which virtual register bank is active
   41.58 -	unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
   41.59 -	unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
   41.60 -	unsigned long rrs[8];	// region registers
   41.61 -	unsigned long krs[8];	// kernel registers
   41.62 -	unsigned long pkrs[8];	// protection key registers
   41.63 -	unsigned long tmp[8];	// temp registers (e.g. for hyperprivops)
   41.64 +	vac_t			vac;
   41.65 +	vdc_t			vdc;
   41.66 +	unsigned long		virt_env_vaddr;
   41.67 +	unsigned long		reserved1[29];
   41.68 +	unsigned long		vhpi;
   41.69 +	unsigned long		reserved2[95];
   41.70 +	union {
   41.71 +	  unsigned long		vgr[16];
   41.72 +  	  unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
   41.73 +	};
   41.74 +	union {
   41.75 +	  unsigned long		vbgr[16];
   41.76 +	  unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
   41.77 +	};
   41.78 +	unsigned long		vnat;
   41.79 +	unsigned long		vbnat;
   41.80 +	unsigned long		vcpuid[5];
   41.81 +	unsigned long		reserved3[11];
   41.82 +	unsigned long		vpsr;
   41.83 +	unsigned long		vpr;
   41.84 +	unsigned long		reserved4[76];
   41.85 +	union {
   41.86 +	  unsigned long		vcr[128];
   41.87 +          struct {
   41.88 +  	    unsigned long	dcr;		// CR0
   41.89 +	    unsigned long	itm;
   41.90 +	    unsigned long	iva;
   41.91 +	    unsigned long	rsv1[5];
   41.92 +	    unsigned long	pta;		// CR8
   41.93 +	    unsigned long	rsv2[7];
   41.94 +	    unsigned long	ipsr;		// CR16
   41.95 +	    unsigned long	isr;
   41.96 +	    unsigned long	rsv3;
   41.97 +	    unsigned long	iip;
   41.98 +	    unsigned long	ifa;
   41.99 +	    unsigned long	itir;
  41.100 +	    unsigned long	iipa;
  41.101 +	    unsigned long	ifs;
  41.102 +	    unsigned long	iim;		// CR24
  41.103 +	    unsigned long	iha;
  41.104 +	    unsigned long	rsv4[38];
  41.105 +	    unsigned long	lid;		// CR64
  41.106 +	    unsigned long	ivr;
  41.107 +	    unsigned long	tpr;
  41.108 +	    unsigned long	eoi;
  41.109 +	    unsigned long	irr[4];
  41.110 +	    unsigned long	itv;		// CR72
  41.111 +	    unsigned long	pmv;
  41.112 +	    unsigned long	cmcv;
  41.113 +	    unsigned long	rsv5[5];
  41.114 +	    unsigned long	lrr0;		// CR80
  41.115 +	    unsigned long	lrr1;
  41.116 +	    unsigned long	rsv6[46];
  41.117 +          };
  41.118 +	};
  41.119 +	union {
  41.120 +	  unsigned long		reserved5[128];
  41.121 +	  struct {
  41.122 +	    unsigned long precover_ifs;
  41.123 +	    unsigned long unat;  // not sure if this is needed until NaT arch is done
  41.124 +	    int interrupt_collection_enabled; // virtual psr.ic
  41.125 +	    int interrupt_delivery_enabled; // virtual psr.i
  41.126 +	    int pending_interruption;
  41.127 +	    int incomplete_regframe;	// see SDM vol2 6.8
  41.128 +	    unsigned long delivery_mask[4];
  41.129 +	    int metaphysical_mode;	// 1 = use metaphys mapping, 0 = use virtual
  41.130 +	    int banknum;	// 0 or 1, which virtual register bank is active
  41.131 +	    unsigned long rrs[8];	// region registers
  41.132 +	    unsigned long krs[8];	// kernel registers
  41.133 +	    unsigned long pkrs[8];	// protection key registers
  41.134 +	    unsigned long tmp[8];	// temp registers (e.g. for hyperprivops)
  41.135 +	  };
  41.136 +        };
  41.137 +#ifdef CONFIG_VTI
  41.138 +	unsigned long		reserved6[3456];
  41.139 +	unsigned long		vmm_avail[128];
  41.140 +	unsigned long		reserved7[4096];
  41.141 +#endif
  41.142 +} mapped_regs_t;
  41.143 +
  41.144 +typedef struct {
  41.145 +	mapped_regs_t *privregs;
  41.146  	int evtchn_vector;
  41.147  } arch_vcpu_info_t;
  41.148 +
  41.149 +typedef mapped_regs_t vpd_t;
  41.150 +
  41.151  #define __ARCH_HAS_VCPU_INFO
  41.152  
  41.153  typedef struct {