direct-io.hg

changeset 6444:9312a3e8a6f8

merge?
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 26 09:05:43 2005 +0000 (2005-08-26)
parents 48202c7c709a 8799d14bef77
children 83c73802f02a
files .hgignore Config.mk Makefile buildconfigs/Rules.mk buildconfigs/mk.linux-2.6-xen buildconfigs/mk.linux-2.6-xen0 buildconfigs/mk.linux-2.6-xenU docs/src/user.tex extras/mini-os/include/hypervisor.h extras/mini-os/include/mm.h extras/mini-os/include/time.h extras/mini-os/kernel.c extras/mini-os/mm.c extras/mini-os/time.c linux-2.4-xen-sparse/Makefile linux-2.4-xen-sparse/arch/xen/Makefile linux-2.4-xen-sparse/arch/xen/boot/Makefile linux-2.4-xen-sparse/arch/xen/config.in linux-2.4-xen-sparse/arch/xen/defconfig-xen0 linux-2.4-xen-sparse/arch/xen/defconfig-xenU linux-2.4-xen-sparse/arch/xen/drivers/balloon/Makefile linux-2.4-xen-sparse/arch/xen/drivers/blkif/Makefile linux-2.4-xen-sparse/arch/xen/drivers/blkif/backend/Makefile linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/common.h linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c linux-2.4-xen-sparse/arch/xen/drivers/console/Makefile linux-2.4-xen-sparse/arch/xen/drivers/dom0/Makefile linux-2.4-xen-sparse/arch/xen/drivers/evtchn/Makefile linux-2.4-xen-sparse/arch/xen/drivers/netif/Makefile linux-2.4-xen-sparse/arch/xen/drivers/netif/backend/Makefile linux-2.4-xen-sparse/arch/xen/drivers/netif/frontend/Makefile linux-2.4-xen-sparse/arch/xen/kernel/Makefile linux-2.4-xen-sparse/arch/xen/kernel/entry.S linux-2.4-xen-sparse/arch/xen/kernel/head.S linux-2.4-xen-sparse/arch/xen/kernel/i386_ksyms.c linux-2.4-xen-sparse/arch/xen/kernel/irq.c linux-2.4-xen-sparse/arch/xen/kernel/ldt.c linux-2.4-xen-sparse/arch/xen/kernel/pci-pc.c linux-2.4-xen-sparse/arch/xen/kernel/process.c linux-2.4-xen-sparse/arch/xen/kernel/setup.c linux-2.4-xen-sparse/arch/xen/kernel/signal.c linux-2.4-xen-sparse/arch/xen/kernel/time.c linux-2.4-xen-sparse/arch/xen/kernel/traps.c linux-2.4-xen-sparse/arch/xen/lib/Makefile linux-2.4-xen-sparse/arch/xen/lib/delay.c linux-2.4-xen-sparse/arch/xen/mm/Makefile linux-2.4-xen-sparse/arch/xen/mm/fault.c linux-2.4-xen-sparse/arch/xen/mm/init.c linux-2.4-xen-sparse/arch/xen/mm/ioremap.c linux-2.4-xen-sparse/arch/xen/vmlinux.lds linux-2.4-xen-sparse/drivers/block/ll_rw_blk.c linux-2.4-xen-sparse/drivers/char/Makefile linux-2.4-xen-sparse/drivers/char/mem.c linux-2.4-xen-sparse/drivers/char/tty_io.c linux-2.4-xen-sparse/drivers/scsi/aic7xxx/Makefile linux-2.4-xen-sparse/include/asm-xen/bugs.h linux-2.4-xen-sparse/include/asm-xen/desc.h linux-2.4-xen-sparse/include/asm-xen/fixmap.h linux-2.4-xen-sparse/include/asm-xen/highmem.h linux-2.4-xen-sparse/include/asm-xen/hw_irq.h linux-2.4-xen-sparse/include/asm-xen/io.h linux-2.4-xen-sparse/include/asm-xen/irq.h linux-2.4-xen-sparse/include/asm-xen/keyboard.h linux-2.4-xen-sparse/include/asm-xen/mmu_context.h linux-2.4-xen-sparse/include/asm-xen/module.h linux-2.4-xen-sparse/include/asm-xen/page.h linux-2.4-xen-sparse/include/asm-xen/pci.h linux-2.4-xen-sparse/include/asm-xen/pgalloc.h linux-2.4-xen-sparse/include/asm-xen/pgtable-2level.h linux-2.4-xen-sparse/include/asm-xen/pgtable.h linux-2.4-xen-sparse/include/asm-xen/processor.h linux-2.4-xen-sparse/include/asm-xen/queues.h linux-2.4-xen-sparse/include/asm-xen/segment.h linux-2.4-xen-sparse/include/asm-xen/smp.h linux-2.4-xen-sparse/include/asm-xen/system.h linux-2.4-xen-sparse/include/asm-xen/vga.h linux-2.4-xen-sparse/include/asm-xen/xor.h linux-2.4-xen-sparse/include/linux/blk.h linux-2.4-xen-sparse/include/linux/highmem.h linux-2.4-xen-sparse/include/linux/irq.h linux-2.4-xen-sparse/include/linux/mm.h linux-2.4-xen-sparse/include/linux/sched.h linux-2.4-xen-sparse/include/linux/skbuff.h linux-2.4-xen-sparse/include/linux/timer.h linux-2.4-xen-sparse/kernel/time.c linux-2.4-xen-sparse/kernel/timer.c linux-2.4-xen-sparse/mkbuildtree linux-2.4-xen-sparse/mm/highmem.c linux-2.4-xen-sparse/mm/memory.c linux-2.4-xen-sparse/mm/mprotect.c linux-2.4-xen-sparse/mm/mremap.c linux-2.4-xen-sparse/mm/page_alloc.c linux-2.4-xen-sparse/net/core/skbuff.c linux-2.6-xen-sparse/arch/xen/Kconfig linux-2.6-xen-sparse/arch/xen/Kconfig.drivers linux-2.6-xen-sparse/arch/xen/Makefile linux-2.6-xen-sparse/arch/xen/boot/Makefile linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/i386/Kconfig linux-2.6-xen-sparse/arch/xen/i386/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/acpi/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/acpi/boot.c linux-2.6-xen-sparse/arch/xen/i386/kernel/apic.c linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c linux-2.6-xen-sparse/arch/xen/i386/kernel/init_task.c linux-2.6-xen-sparse/arch/xen/i386/kernel/io_apic.c linux-2.6-xen-sparse/arch/xen/i386/kernel/ioport.c linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c linux-2.6-xen-sparse/arch/xen/i386/kernel/ldt.c linux-2.6-xen-sparse/arch/xen/i386/kernel/microcode.c linux-2.6-xen-sparse/arch/xen/i386/kernel/mpparse.c linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6-xen-sparse/arch/xen/i386/kernel/signal.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c linux-2.6-xen-sparse/arch/xen/i386/kernel/vsyscall.S linux-2.6-xen-sparse/arch/xen/i386/mach-default/Makefile linux-2.6-xen-sparse/arch/xen/i386/mm/Makefile linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c linux-2.6-xen-sparse/arch/xen/i386/mm/highmem.c linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6-xen-sparse/arch/xen/i386/mm/init.c linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c linux-2.6-xen-sparse/arch/xen/i386/pci/Makefile linux-2.6-xen-sparse/arch/xen/i386/pci/irq.c linux-2.6-xen-sparse/arch/xen/kernel/Makefile linux-2.6-xen-sparse/arch/xen/kernel/ctrl_if.c linux-2.6-xen-sparse/arch/xen/kernel/devmem.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/fixup.c linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c linux-2.6-xen-sparse/arch/xen/kernel/reboot.c linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c linux-2.6-xen-sparse/arch/xen/kernel/smp.c linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig linux-2.6-xen-sparse/arch/xen/x86_64/Makefile linux-2.6-xen-sparse/arch/xen/x86_64/ia32/Makefile linux-2.6-xen-sparse/arch/xen/x86_64/ia32/ia32entry.S linux-2.6-xen-sparse/arch/xen/x86_64/ia32/syscall32.c linux-2.6-xen-sparse/arch/xen/x86_64/ia32/vsyscall-int80.S linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile linux-2.6-xen-sparse/arch/xen/x86_64/kernel/acpi/Makefile linux-2.6-xen-sparse/arch/xen/x86_64/kernel/apic.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/e820.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/early_printk.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head64.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/io_apic.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/ioport.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/irq.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/ldt.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/mpparse.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/pci-nommu.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/process.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/signal.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smp.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/vsyscall.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/pageattr.c linux-2.6-xen-sparse/arch/xen/x86_64/pci/Makefile linux-2.6-xen-sparse/arch/xen/x86_64/pci/Makefile-BUS linux-2.6-xen-sparse/drivers/Makefile linux-2.6-xen-sparse/drivers/acpi/tables.c linux-2.6-xen-sparse/drivers/char/mem.c linux-2.6-xen-sparse/drivers/char/tty_io.c linux-2.6-xen-sparse/drivers/xen/Makefile linux-2.6-xen-sparse/drivers/xen/balloon/Makefile linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c linux-2.6-xen-sparse/drivers/xen/blkback/Makefile linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkback/common.h linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c linux-2.6-xen-sparse/drivers/xen/blkfront/Kconfig linux-2.6-xen-sparse/drivers/xen/blkfront/Makefile linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6-xen-sparse/drivers/xen/blktap/Makefile linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h linux-2.6-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c linux-2.6-xen-sparse/drivers/xen/console/Makefile linux-2.6-xen-sparse/drivers/xen/console/console.c linux-2.6-xen-sparse/drivers/xen/evtchn/Makefile linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c linux-2.6-xen-sparse/drivers/xen/netback/Makefile linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/loopback.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c linux-2.6-xen-sparse/drivers/xen/netfront/Kconfig linux-2.6-xen-sparse/drivers/xen/netfront/Makefile linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/privcmd/Makefile linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/usbback/common.h linux-2.6-xen-sparse/drivers/xen/usbback/control.c linux-2.6-xen-sparse/drivers/xen/usbback/interface.c linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c linux-2.6-xen-sparse/drivers/xen/usbfront/usbfront.c linux-2.6-xen-sparse/drivers/xen/usbfront/xhci.h linux-2.6-xen-sparse/drivers/xen/xenbus/Makefile linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c linux-2.6-xen-sparse/include/asm-generic/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/agp.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/desc.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/fixmap.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/floppy.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/highmem.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/hw_irq.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/io.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/kmap_types.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/param.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/pci.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgalloc.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/ptrace.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/scatterlist.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/segment.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/spinlock.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/swiotlb.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/tlbflush.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/vga.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/arch_hooks.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/bootsetup.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/desc.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/fixmap.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/floppy.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hw_irq.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/io.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/irq.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/io_ports.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/param.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pci.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/processor.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/ptrace.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/segment.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/smp.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/timer.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/tlbflush.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/vga.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/xor.h linux-2.6-xen-sparse/include/asm-xen/balloon.h linux-2.6-xen-sparse/include/asm-xen/ctrl_if.h linux-2.6-xen-sparse/include/asm-xen/evtchn.h linux-2.6-xen-sparse/include/asm-xen/foreign_page.h linux-2.6-xen-sparse/include/asm-xen/gnttab.h linux-2.6-xen-sparse/include/asm-xen/hypervisor.h linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h linux-2.6-xen-sparse/include/asm-xen/linux-public/suspend.h linux-2.6-xen-sparse/include/asm-xen/queues.h linux-2.6-xen-sparse/include/asm-xen/synch_bitops.h linux-2.6-xen-sparse/include/asm-xen/xen_proc.h linux-2.6-xen-sparse/include/asm-xen/xenbus.h linux-2.6-xen-sparse/include/linux/gfp.h linux-2.6-xen-sparse/include/linux/highmem.h linux-2.6-xen-sparse/include/linux/irq.h linux-2.6-xen-sparse/include/linux/mm.h linux-2.6-xen-sparse/include/linux/skbuff.h linux-2.6-xen-sparse/kernel/irq/manage.c linux-2.6-xen-sparse/mkbuildtree linux-2.6-xen-sparse/mm/highmem.c linux-2.6-xen-sparse/mm/memory.c linux-2.6-xen-sparse/mm/mmap.c linux-2.6-xen-sparse/mm/page_alloc.c linux-2.6-xen-sparse/net/core/dev.c linux-2.6-xen-sparse/net/core/skbuff.c patches/linux-2.6.12/i386-cpu-hotplug-updated-for-mm.patch patches/linux-2.6.12/net-csum.patch patches/linux-2.6.12/patch-2.6.12.5 patches/linux-2.6.12/rcu-nohz.patch patches/linux-2.6.12/smp-alts.patch tools/Makefile tools/Rules.mk tools/blktap/blktaplib.c tools/blktap/blktaplib.h tools/blktap/parallax/Makefile tools/blktap/parallax/block-async.h tools/blktap/parallax/blockstore.h tools/console/Makefile tools/console/client/main.c tools/console/daemon/io.c tools/console/daemon/io.h tools/console/daemon/main.c tools/console/daemon/utils.c tools/console/daemon/utils.h tools/console/testsuite/Makefile tools/console/testsuite/README tools/console/testsuite/console-dom0.c tools/console/testsuite/console-domU.c tools/console/testsuite/procpipe.c tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c tools/debugger/gdb/gdbbuild tools/debugger/libxendebug/Makefile tools/debugger/libxendebug/xendebug.c tools/debugger/libxendebug/xendebug.h tools/debugger/pdb/Domain.ml tools/debugger/pdb/Domain.mli tools/debugger/pdb/Makefile tools/debugger/pdb/PDB.ml tools/debugger/pdb/Process.ml tools/debugger/pdb/Process.mli tools/debugger/pdb/Util.ml tools/debugger/pdb/Xen_domain.ml tools/debugger/pdb/Xen_domain.mli tools/debugger/pdb/debugger.ml tools/debugger/pdb/linux-2.6-module/Makefile tools/debugger/pdb/linux-2.6-module/debug.c tools/debugger/pdb/linux-2.6-module/module.c tools/debugger/pdb/linux-2.6-module/pdb_debug.h tools/debugger/pdb/linux-2.6-module/pdb_module.h tools/debugger/pdb/linux-2.6-patches/Makefile tools/debugger/pdb/linux-2.6-patches/i386_ksyms.patch tools/debugger/pdb/linux-2.6-patches/kdebug.patch tools/debugger/pdb/linux-2.6-patches/makefile.patch tools/debugger/pdb/linux-2.6-patches/ptrace.patch tools/debugger/pdb/linux-2.6-patches/traps.patch tools/debugger/pdb/pdb_caml_domain.c tools/debugger/pdb/pdb_caml_evtchn.c tools/debugger/pdb/pdb_caml_process.c tools/debugger/pdb/pdb_caml_xc.c tools/debugger/pdb/pdb_caml_xcs.c tools/debugger/pdb/pdb_caml_xen.h tools/debugger/pdb/pdb_xen.c tools/debugger/pdb/readme tools/debugger/pdb/server.ml tools/examples/Makefile tools/examples/README tools/examples/backend.hotplug tools/examples/network-bridge tools/examples/vif-bridge tools/examples/xend-config.sxp tools/examples/xmexample.vmx tools/firmware/acpi/acpi2_0.h tools/firmware/rombios/rombios.c tools/ioemu/hw/i8254.c tools/ioemu/hw/i8259.c tools/ioemu/hw/ide.c tools/ioemu/hw/ioapic.h tools/ioemu/monitor.c tools/ioemu/target-i386-dm/Makefile tools/ioemu/target-i386-dm/helper2.c tools/ioemu/target-i386-dm/qemu-dm.debug tools/ioemu/vl.c tools/ioemu/vl.h tools/ioemu/vnc.c tools/libxc/Makefile tools/libxc/linux_boot_params.h tools/libxc/xc_core.c tools/libxc/xc_domain.c tools/libxc/xc_gnttab.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_linux_save.c tools/libxc/xc_load_aout9.c tools/libxc/xc_load_bin.c tools/libxc/xc_load_elf.c tools/libxc/xc_private.c tools/libxc/xc_private.h tools/libxc/xc_ptrace.c tools/libxc/xc_vmx_build.c tools/libxc/xenctrl.h tools/libxc/xenguest.h tools/libxc/xg_private.c tools/libxc/xg_private.h tools/misc/Makefile tools/misc/cpuperf/Makefile tools/misc/cpuperf/cpuperf_xeno.h tools/misc/xc_shadow.c tools/misc/xend tools/misc/xenperf.c tools/python/setup.py tools/python/xen/lowlevel/xc/xc.c tools/python/xen/lowlevel/xs/xs.c tools/python/xen/lowlevel/xu/xu.c tools/python/xen/sv/CreateDomain.py tools/python/xen/sv/DomInfo.py tools/python/xen/sv/GenTabbed.py tools/python/xen/sv/HTMLBase.py tools/python/xen/sv/Main.py tools/python/xen/sv/NodeInfo.py tools/python/xen/sv/RestoreDomain.py tools/python/xen/sv/Wizard.py tools/python/xen/sv/__init__.py tools/python/xen/sv/util.py tools/python/xen/web/SrvBase.py tools/python/xen/web/SrvDir.py tools/python/xen/web/__init__.py tools/python/xen/web/connection.py tools/python/xen/web/httpserver.py tools/python/xen/web/protocol.py tools/python/xen/web/reactor.py tools/python/xen/web/resource.py tools/python/xen/web/static.py tools/python/xen/web/tcp.py tools/python/xen/web/unix.py tools/python/xen/xend/Args.py tools/python/xen/xend/EventServer.py tools/python/xen/xend/PrettyPrint.py tools/python/xen/xend/Vifctl.py tools/python/xen/xend/XendCheckpoint.py tools/python/xen/xend/XendClient.py tools/python/xen/xend/XendDB.py tools/python/xen/xend/XendDmesg.py tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xend/XendError.py tools/python/xen/xend/XendLogging.py tools/python/xen/xend/XendNode.py tools/python/xen/xend/XendProtocol.py tools/python/xen/xend/XendRoot.py tools/python/xen/xend/XendVnet.py tools/python/xen/xend/encode.py tools/python/xen/xend/image.py tools/python/xen/xend/scheduler.py tools/python/xen/xend/server/SrvDaemon.py tools/python/xen/xend/server/SrvDmesg.py tools/python/xen/xend/server/SrvDomain.py tools/python/xen/xend/server/SrvDomainDir.py tools/python/xen/xend/server/SrvNode.py tools/python/xen/xend/server/SrvRoot.py tools/python/xen/xend/server/SrvServer.py tools/python/xen/xend/server/SrvVnetDir.py tools/python/xen/xend/server/SrvXendLog.py tools/python/xen/xend/server/blkif.py tools/python/xen/xend/server/channel.py tools/python/xen/xend/server/controller.py tools/python/xen/xend/server/event.py tools/python/xen/xend/server/messages.py tools/python/xen/xend/server/netif.py tools/python/xen/xend/server/params.py tools/python/xen/xend/server/pciif.py tools/python/xen/xend/server/relocate.py tools/python/xen/xend/sxp.py tools/python/xen/xend/uuid.py tools/python/xen/xend/xenstore/__init__.py tools/python/xen/xend/xenstore/xsnode.py tools/python/xen/xend/xenstore/xsobj.py tools/python/xen/xend/xenstore/xsresource.py tools/python/xen/xm/create.py tools/python/xen/xm/destroy.py tools/python/xen/xm/help.py tools/python/xen/xm/main.py tools/python/xen/xm/migrate.py tools/python/xen/xm/opts.py tools/python/xen/xm/shutdown.py tools/python/xen/xm/sysrq.py tools/security/Makefile tools/security/example.txt tools/security/install.txt tools/security/policies/chwall/chwall-security_label_template.xml tools/security/policies/chwall/chwall-security_policy.xml tools/security/policies/chwall_ste/chwall_ste-security_label_template.xml tools/security/policies/chwall_ste/chwall_ste-security_policy.xml tools/security/policies/null/null-security_label_template.xml tools/security/policies/null/null-security_policy.xml tools/security/policies/security_policy.xsd tools/security/policies/ste/ste-security_label_template.xml tools/security/policies/ste/ste-security_policy.xml tools/security/policy.txt tools/security/readme.txt tools/security/secpol_compat.h tools/security/secpol_tool.c tools/security/secpol_xml2bin.c tools/security/secpol_xml2bin.h tools/security/setlabel.sh tools/security/updategrub.sh tools/sv/Makefile tools/sv/images/destroy.png tools/sv/images/finish.png tools/sv/images/next.png tools/sv/images/pause.png tools/sv/images/previous.png tools/sv/images/reboot.png tools/sv/images/shutdown.png tools/sv/images/small-destroy.png tools/sv/images/small-pause.png tools/sv/images/small-unpause.png tools/sv/images/unpause.png tools/sv/images/xen.png tools/sv/inc/script.js tools/sv/inc/style.css tools/sv/index.psp tools/xcs/Makefile tools/xcs/dump.h tools/xcs/xcs.h tools/xcs/xcsdump.c tools/xcutils/Makefile tools/xcutils/xc_restore.c tools/xcutils/xc_save.c tools/xenstat/Makefile tools/xenstat/libxenstat/COPYING tools/xenstat/libxenstat/Makefile tools/xenstat/libxenstat/bindings/swig/perl/.empty tools/xenstat/libxenstat/bindings/swig/python/.empty tools/xenstat/libxenstat/bindings/swig/xenstat.i tools/xenstat/libxenstat/src/xen-interface.c tools/xenstat/libxenstat/src/xen-interface.h tools/xenstat/libxenstat/src/xenstat.c tools/xenstat/libxenstat/src/xenstat.h tools/xenstat/xentop/Makefile tools/xenstat/xentop/TODO tools/xenstat/xentop/xentop.1 tools/xenstat/xentop/xentop.c tools/xenstore/COPYING tools/xenstore/Makefile tools/xenstore/TODO tools/xenstore/testsuite/01simple.test tools/xenstore/testsuite/02directory.test tools/xenstore/testsuite/03write.test tools/xenstore/testsuite/04rm.test tools/xenstore/testsuite/05filepermissions.test tools/xenstore/testsuite/06dirpermissions.test tools/xenstore/testsuite/07watch.test tools/xenstore/testsuite/08transaction.slowtest tools/xenstore/testsuite/08transaction.test tools/xenstore/testsuite/09domain.test tools/xenstore/testsuite/10domain-homedir.test tools/xenstore/testsuite/11domain-watch.test tools/xenstore/testsuite/12readonly.test tools/xenstore/testsuite/13watch-ack.test tools/xenstore/testsuite/14complexperms.test tools/xenstore/testsuite/15nowait.test tools/xenstore/testsuite/test.sh tools/xenstore/utils.c tools/xenstore/utils.h tools/xenstore/xenstored.h tools/xenstore/xenstored_core.c tools/xenstore/xenstored_core.h tools/xenstore/xenstored_domain.c tools/xenstore/xenstored_domain.h tools/xenstore/xenstored_transaction.c tools/xenstore/xenstored_transaction.h tools/xenstore/xenstored_watch.c tools/xenstore/xenstored_watch.h tools/xenstore/xs.c tools/xenstore/xs.h tools/xenstore/xs_crashme.c tools/xenstore/xs_dom0_test.c tools/xenstore/xs_lib.c tools/xenstore/xs_lib.h tools/xenstore/xs_random.c tools/xenstore/xs_test.c tools/xentrace/Makefile tools/xentrace/xenctx.c tools/xentrace/xentrace.c xen/Makefile xen/Rules.mk xen/acm/acm_core.c xen/acm/acm_policy.c xen/arch/ia64/Makefile xen/arch/ia64/Rules.mk xen/arch/ia64/asm-offsets.c xen/arch/ia64/asm-xsi-offsets.c xen/arch/ia64/dom_fw.c xen/arch/ia64/domain.c xen/arch/ia64/grant_table.c xen/arch/ia64/hypercall.c xen/arch/ia64/hyperprivop.S xen/arch/ia64/ivt.S xen/arch/ia64/linux-xen/efi.c xen/arch/ia64/linux-xen/entry.S xen/arch/ia64/linux-xen/entry.h xen/arch/ia64/linux-xen/head.S xen/arch/ia64/linux-xen/irq_ia64.c xen/arch/ia64/linux-xen/mm_contig.c xen/arch/ia64/linux-xen/pal.S xen/arch/ia64/linux-xen/setup.c xen/arch/ia64/linux-xen/time.c xen/arch/ia64/linux-xen/tlb.c xen/arch/ia64/linux-xen/unaligned.c xen/arch/ia64/linux/cmdline.c xen/arch/ia64/linux/efi_stub.S xen/arch/ia64/linux/extable.c xen/arch/ia64/linux/hpsim.S xen/arch/ia64/linux/ia64_ksyms.c xen/arch/ia64/linux/irq_lsapic.c xen/arch/ia64/linux/lib/Makefile xen/arch/ia64/linux/lib/bitop.c xen/arch/ia64/linux/lib/carta_random.S xen/arch/ia64/linux/lib/checksum.c xen/arch/ia64/linux/lib/clear_page.S xen/arch/ia64/linux/lib/clear_user.S xen/arch/ia64/linux/lib/copy_page.S xen/arch/ia64/linux/lib/copy_page_mck.S xen/arch/ia64/linux/lib/copy_user.S xen/arch/ia64/linux/lib/csum_partial_copy.c xen/arch/ia64/linux/lib/dec_and_lock.c xen/arch/ia64/linux/lib/do_csum.S xen/arch/ia64/linux/lib/flush.S xen/arch/ia64/linux/lib/idiv32.S xen/arch/ia64/linux/lib/idiv64.S xen/arch/ia64/linux/lib/io.c xen/arch/ia64/linux/lib/ip_fast_csum.S xen/arch/ia64/linux/lib/memcpy.S xen/arch/ia64/linux/lib/memcpy_mck.S xen/arch/ia64/linux/lib/memset.S xen/arch/ia64/linux/lib/strlen.S xen/arch/ia64/linux/lib/strlen_user.S xen/arch/ia64/linux/lib/strncpy_from_user.S xen/arch/ia64/linux/lib/strnlen_user.S xen/arch/ia64/linux/lib/xor.S xen/arch/ia64/linux/linuxextable.c xen/arch/ia64/linux/machvec.c xen/arch/ia64/linux/minstate.h xen/arch/ia64/linux/patch.c xen/arch/ia64/linux/pcdp.h xen/arch/ia64/linux/sal.c xen/arch/ia64/mmio.c xen/arch/ia64/pal_emul.c xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c xen/arch/ia64/patch/linux-2.6.11/kregs.h xen/arch/ia64/pcdp.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/vcpu.c xen/arch/ia64/vlsapic.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_hypercall.c xen/arch/ia64/vmx_init.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_phy_mode.c xen/arch/ia64/vmx_support.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vmx_virt.c xen/arch/ia64/vtlb.c xen/arch/ia64/xen.lds.S xen/arch/ia64/xenasm.S xen/arch/ia64/xenmem.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/arch/ia64/xentime.c xen/arch/x86/Makefile xen/arch/x86/acpi/boot.c xen/arch/x86/apic.c xen/arch/x86/audit.c xen/arch/x86/cpu/amd.c xen/arch/x86/cpu/common.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/genapic/es7000plat.c xen/arch/x86/i8259.c xen/arch/x86/io_apic.c xen/arch/x86/mm.c xen/arch/x86/mpparse.c xen/arch/x86/physdev.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/smpboot.c xen/arch/x86/time.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/common/ac_timer.c xen/common/dom0_ops.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/grant_table.c xen/common/lib.c xen/common/page_alloc.c xen/common/perfc.c xen/common/sched_sedf.c xen/common/schedule.c xen/common/symbols.c xen/common/trace.c xen/common/xmalloc.c xen/drivers/char/console.c xen/drivers/char/ns16550.c xen/include/acm/acm_core.h xen/include/acm/acm_hooks.h xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/event.h xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h xen/include/asm-ia64/linux-xen/asm/ia64regs.h xen/include/asm-ia64/linux-xen/asm/io.h xen/include/asm-ia64/linux-xen/asm/kregs.h xen/include/asm-ia64/linux-xen/asm/mca_asm.h xen/include/asm-ia64/linux-xen/asm/page.h xen/include/asm-ia64/linux-xen/asm/pal.h xen/include/asm-ia64/linux-xen/asm/pgalloc.h xen/include/asm-ia64/linux-xen/asm/processor.h xen/include/asm-ia64/linux-xen/asm/ptrace.h xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h xen/include/asm-ia64/linux-xen/asm/system.h xen/include/asm-ia64/linux-xen/asm/types.h xen/include/asm-ia64/linux-xen/asm/uaccess.h xen/include/asm-ia64/linux-xen/linux/cpumask.h xen/include/asm-ia64/linux-xen/linux/hardirq.h xen/include/asm-ia64/linux-xen/linux/interrupt.h xen/include/asm-ia64/linux/asm-generic/bug.h xen/include/asm-ia64/linux/asm-generic/div64.h xen/include/asm-ia64/linux/asm-generic/errno-base.h xen/include/asm-ia64/linux/asm-generic/errno.h xen/include/asm-ia64/linux/asm-generic/ide_iops.h xen/include/asm-ia64/linux/asm-generic/iomap.h xen/include/asm-ia64/linux/asm-generic/pci-dma-compat.h xen/include/asm-ia64/linux/asm-generic/pci.h xen/include/asm-ia64/linux/asm-generic/pgtable-nopud.h xen/include/asm-ia64/linux/asm-generic/pgtable.h xen/include/asm-ia64/linux/asm-generic/sections.h xen/include/asm-ia64/linux/asm-generic/topology.h xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h xen/include/asm-ia64/linux/asm/acpi.h xen/include/asm-ia64/linux/asm/asmmacro.h xen/include/asm-ia64/linux/asm/atomic.h xen/include/asm-ia64/linux/asm/bitops.h xen/include/asm-ia64/linux/asm/break.h xen/include/asm-ia64/linux/asm/bug.h xen/include/asm-ia64/linux/asm/byteorder.h xen/include/asm-ia64/linux/asm/cache.h xen/include/asm-ia64/linux/asm/cacheflush.h xen/include/asm-ia64/linux/asm/checksum.h xen/include/asm-ia64/linux/asm/current.h xen/include/asm-ia64/linux/asm/delay.h xen/include/asm-ia64/linux/asm/desc.h xen/include/asm-ia64/linux/asm/div64.h xen/include/asm-ia64/linux/asm/dma-mapping.h xen/include/asm-ia64/linux/asm/dma.h xen/include/asm-ia64/linux/asm/errno.h xen/include/asm-ia64/linux/asm/fpu.h xen/include/asm-ia64/linux/asm/hardirq.h xen/include/asm-ia64/linux/asm/hdreg.h xen/include/asm-ia64/linux/asm/hw_irq.h xen/include/asm-ia64/linux/asm/ia32.h xen/include/asm-ia64/linux/asm/intrinsics.h xen/include/asm-ia64/linux/asm/ioctl.h xen/include/asm-ia64/linux/asm/irq.h xen/include/asm-ia64/linux/asm/linkage.h xen/include/asm-ia64/linux/asm/machvec.h xen/include/asm-ia64/linux/asm/machvec_hpsim.h xen/include/asm-ia64/linux/asm/mca.h xen/include/asm-ia64/linux/asm/meminit.h xen/include/asm-ia64/linux/asm/mman.h xen/include/asm-ia64/linux/asm/module.h xen/include/asm-ia64/linux/asm/numa.h xen/include/asm-ia64/linux/asm/param.h xen/include/asm-ia64/linux/asm/patch.h xen/include/asm-ia64/linux/asm/pci.h xen/include/asm-ia64/linux/asm/pdb.h xen/include/asm-ia64/linux/asm/percpu.h xen/include/asm-ia64/linux/asm/pgtable.h xen/include/asm-ia64/linux/asm/ptrace_offsets.h xen/include/asm-ia64/linux/asm/rse.h xen/include/asm-ia64/linux/asm/rwsem.h xen/include/asm-ia64/linux/asm/sal.h xen/include/asm-ia64/linux/asm/scatterlist.h xen/include/asm-ia64/linux/asm/sections.h xen/include/asm-ia64/linux/asm/semaphore.h xen/include/asm-ia64/linux/asm/setup.h xen/include/asm-ia64/linux/asm/sigcontext.h xen/include/asm-ia64/linux/asm/signal.h xen/include/asm-ia64/linux/asm/smp.h xen/include/asm-ia64/linux/asm/sn/arch.h xen/include/asm-ia64/linux/asm/sn/geo.h xen/include/asm-ia64/linux/asm/sn/nodepda.h xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h xen/include/asm-ia64/linux/asm/spinlock.h xen/include/asm-ia64/linux/asm/string.h xen/include/asm-ia64/linux/asm/thread_info.h xen/include/asm-ia64/linux/asm/timex.h xen/include/asm-ia64/linux/asm/tlbflush.h xen/include/asm-ia64/linux/asm/topology.h xen/include/asm-ia64/linux/asm/unaligned.h xen/include/asm-ia64/linux/asm/unistd.h xen/include/asm-ia64/linux/asm/unwind.h xen/include/asm-ia64/linux/asm/ustack.h xen/include/asm-ia64/linux/bcd.h xen/include/asm-ia64/linux/bitmap.h xen/include/asm-ia64/linux/bitops.h xen/include/asm-ia64/linux/bootmem.h xen/include/asm-ia64/linux/byteorder/generic.h xen/include/asm-ia64/linux/byteorder/little_endian.h xen/include/asm-ia64/linux/byteorder/swab.h xen/include/asm-ia64/linux/cpu.h xen/include/asm-ia64/linux/device.h xen/include/asm-ia64/linux/dma-mapping.h xen/include/asm-ia64/linux/efi.h xen/include/asm-ia64/linux/err.h xen/include/asm-ia64/linux/file.h xen/include/asm-ia64/linux/gfp.h xen/include/asm-ia64/linux/initrd.h xen/include/asm-ia64/linux/ioport.h xen/include/asm-ia64/linux/jiffies.h xen/include/asm-ia64/linux/kernel_stat.h xen/include/asm-ia64/linux/kmalloc_sizes.h xen/include/asm-ia64/linux/linkage.h xen/include/asm-ia64/linux/linuxtime.h xen/include/asm-ia64/linux/mmzone.h xen/include/asm-ia64/linux/module.h xen/include/asm-ia64/linux/numa.h xen/include/asm-ia64/linux/page-flags.h xen/include/asm-ia64/linux/percpu.h xen/include/asm-ia64/linux/preempt.h xen/include/asm-ia64/linux/proc_fs.h xen/include/asm-ia64/linux/profile.h xen/include/asm-ia64/linux/ptrace.h xen/include/asm-ia64/linux/random.h xen/include/asm-ia64/linux/rbtree.h xen/include/asm-ia64/linux/rtc.h xen/include/asm-ia64/linux/rwsem.h xen/include/asm-ia64/linux/seq_file.h xen/include/asm-ia64/linux/seqlock.h xen/include/asm-ia64/linux/serial.h xen/include/asm-ia64/linux/serial_core.h xen/include/asm-ia64/linux/signal.h xen/include/asm-ia64/linux/slab.h xen/include/asm-ia64/linux/smp_lock.h xen/include/asm-ia64/linux/stddef.h xen/include/asm-ia64/linux/swap.h xen/include/asm-ia64/linux/thread_info.h xen/include/asm-ia64/linux/threads.h xen/include/asm-ia64/linux/timex.h xen/include/asm-ia64/linux/topology.h xen/include/asm-ia64/linux/tty.h xen/include/asm-ia64/linux/wait.h xen/include/asm-ia64/mmu_context.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/regs.h xen/include/asm-ia64/serial.h xen/include/asm-ia64/tlb.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_uaccess.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xenprocessor.h xen/include/asm-ia64/xensystem.h xen/include/asm-x86/apicdef.h xen/include/asm-x86/config.h xen/include/asm-x86/e820.h xen/include/asm-x86/event.h xen/include/asm-x86/fixmap.h xen/include/asm-x86/genapic.h xen/include/asm-x86/hpet.h xen/include/asm-x86/io.h xen/include/asm-x86/mach-bigsmp/mach_apic.h xen/include/asm-x86/mach-default/mach_apic.h xen/include/asm-x86/mach-es7000/mach_apic.h xen/include/asm-x86/mach-generic/mach_apic.h xen/include/asm-x86/mach-summit/mach_apic.h xen/include/asm-x86/mach-summit/mach_mpparse.h xen/include/asm-x86/mm.h xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_public.h xen/include/asm-x86/time.h xen/include/asm-x86/types.h xen/include/asm-x86/uaccess.h xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_virpit.h xen/include/asm-x86/vmx_vmcs.h xen/include/asm-x86/x86_32/page-3level.h xen/include/asm-x86/x86_32/uaccess.h xen/include/asm-x86/x86_64/page.h xen/include/public/arch-ia64.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/dom0_ops.h xen/include/public/grant_table.h xen/include/public/io/blkif.h xen/include/public/io/domain_controller.h xen/include/public/io/netif.h xen/include/public/physdev.h xen/include/public/trace.h xen/include/public/xen.h xen/include/xen/ac_timer.h xen/include/xen/domain.h xen/include/xen/event.h xen/include/xen/grant_table.h xen/include/xen/mm.h xen/include/xen/perfc_defn.h xen/include/xen/sched.h xen/include/xen/serial.h xen/include/xen/symbols.h xen/include/xen/time.h xen/include/xen/trace.h xen/tools/Makefile xen/tools/symbols.c
line diff
   617.1 --- a/xen/arch/ia64/Makefile	Fri Aug 26 08:50:31 2005 +0000
   617.2 +++ b/xen/arch/ia64/Makefile	Fri Aug 26 09:05:43 2005 +0000
   617.3 @@ -1,5 +1,7 @@
   617.4  include $(BASEDIR)/Rules.mk
   617.5  
   617.6 +VPATH = linux linux-xen
   617.7 +
   617.8  # libs-y	+= arch/ia64/lib/lib.a
   617.9  
  617.10  OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
  617.11 @@ -12,8 +14,11 @@ OBJS = xensetup.o setup.o time.o irq.o i
  617.12  	irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
  617.13  	grant_table.o sn_console.o
  617.14  
  617.15 +# TMP holder to contain *.0 moved out of CONFIG_VTI
  617.16 +OBJS += vmx_init.o
  617.17 +
  617.18  ifeq ($(CONFIG_VTI),y)
  617.19 -OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
  617.20 +OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
  617.21  	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
  617.22  	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
  617.23  endif
  617.24 @@ -75,7 +80,7 @@ xen.lds.s: xen.lds.S
  617.25  		-o xen.lds.s xen.lds.S
  617.26  
  617.27  ia64lib.o:
  617.28 -	$(MAKE) -C lib && cp lib/ia64lib.o .
  617.29 +	$(MAKE) -C linux/lib && cp linux/lib/ia64lib.o .
  617.30  
  617.31  clean:
  617.32  	rm -f *.o *~ core  xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s
   618.1 --- a/xen/arch/ia64/Rules.mk	Fri Aug 26 08:50:31 2005 +0000
   618.2 +++ b/xen/arch/ia64/Rules.mk	Fri Aug 26 09:05:43 2005 +0000
   618.3 @@ -6,14 +6,21 @@ ifneq ($(COMPILE_ARCH),$(TARGET_ARCH))
   618.4  CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux-
   618.5  endif
   618.6  AFLAGS  += -D__ASSEMBLY__
   618.7 -CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64
   618.8 +CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64 	\
   618.9 +             -I$(BASEDIR)/include/asm-ia64/linux 			\
  618.10 +	     -I$(BASEDIR)/include/asm-ia64/linux-xen 			\
  618.11 +             -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
  618.12 +
  618.13  CFLAGS  := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
  618.14  #CFLAGS  += -O3		# -O3 over-inlines making debugging tough!
  618.15  CFLAGS  += -O2		# but no optimization causes compile errors!
  618.16  #CFLAGS  += -iwithprefix include -Wall -DMONITOR_BASE=$(MONITOR_BASE)
  618.17  CFLAGS  += -iwithprefix include -Wall
  618.18  CFLAGS  += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
  618.19 -CFLAGS  += -I$(BASEDIR)/include/asm-ia64
  618.20 +CFLAGS  += -I$(BASEDIR)/include/asm-ia64 -I$(BASEDIR)/include/asm-ia64/linux \
  618.21 +           -I$(BASEDIR)/include/asm-ia64/linux 				\
  618.22 +           -I$(BASEDIR)/include/asm-ia64/linux-xen 			\
  618.23 +           -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
  618.24  CFLAGS  += -Wno-pointer-arith -Wredundant-decls
  618.25  CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
  618.26  CFLAGS	+= -ffixed-r13 -mfixed-range=f12-f15,f32-f127
   619.1 --- a/xen/arch/ia64/asm-offsets.c	Fri Aug 26 08:50:31 2005 +0000
   619.2 +++ b/xen/arch/ia64/asm-offsets.c	Fri Aug 26 09:05:43 2005 +0000
   619.3 @@ -139,14 +139,14 @@ void foo(void)
   619.4  	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
   619.5  	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
   619.6  #ifdef CONFIG_VTI
   619.7 -	DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct xen_regs, r4));
   619.8 -	DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct xen_regs, r5));
   619.9 -	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct xen_regs, r6));
  619.10 -	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct xen_regs, r7));
  619.11 -	DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct xen_regs, cr_iipa));
  619.12 -	DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr));
  619.13 -	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs, eml_unat));
  619.14 -	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs, rfi_pfs));
  619.15 +	DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
  619.16 +	DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
  619.17 +	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
  619.18 +	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
  619.19 +	DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct pt_regs, cr_iipa));
  619.20 +	DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct pt_regs, cr_isr));
  619.21 +	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
  619.22 +	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
  619.23  	DEFINE(RFI_IIP_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_iip));
  619.24  	DEFINE(RFI_IPSR_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_ipsr));
  619.25  	DEFINE(RFI_IFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_ifs));
  619.26 @@ -296,4 +296,11 @@ void foo(void)
  619.27  	//DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
  619.28  	//DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
  619.29  	//DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
  619.30 +	DEFINE(IA64_KR_CURRENT_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_CURRENT]));
  619.31 +	DEFINE(IA64_KR_PT_BASE_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_PT_BASE]));
  619.32 +	DEFINE(IA64_KR_IO_BASE_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_IO_BASE]));
  619.33 +	DEFINE(IA64_KR_PERCPU_DATA_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_PER_CPU_DATA]));
  619.34 +	DEFINE(IA64_KR_IO_BASE_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_IO_BASE]));
  619.35 +	DEFINE(IA64_KR_CURRENT_STACK_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_CURRENT_STACK]));
  619.36 +
  619.37  }
   622.1 --- a/xen/arch/ia64/domain.c	Fri Aug 26 08:50:31 2005 +0000
   622.2 +++ b/xen/arch/ia64/domain.c	Fri Aug 26 09:05:43 2005 +0000
   622.3 @@ -38,25 +38,17 @@
   622.4  
   622.5  #include <asm/vcpu.h>   /* for function declarations */
   622.6  #include <public/arch-ia64.h>
   622.7 -#ifdef CONFIG_VTI
   622.8  #include <asm/vmx.h>
   622.9  #include <asm/vmx_vcpu.h>
  622.10  #include <asm/vmx_vpd.h>
  622.11  #include <asm/pal.h>
  622.12  #include <public/io/ioreq.h>
  622.13 -#endif // CONFIG_VTI
  622.14  
  622.15  #define CONFIG_DOMAIN0_CONTIGUOUS
  622.16  unsigned long dom0_start = -1L;
  622.17 -#ifdef CONFIG_VTI
  622.18  unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
  622.19  //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
  622.20  unsigned long dom0_align = 256*1024*1024;
  622.21 -#else // CONFIG_VTI
  622.22 -unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
  622.23 -//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
  622.24 -unsigned long dom0_align = 64*1024*1024;
  622.25 -#endif // CONFIG_VTI
  622.26  #ifdef DOMU_BUILD_STAGING
  622.27  unsigned long domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
  622.28  unsigned long domU_staging_start;
  622.29 @@ -187,60 +179,6 @@ static void init_switch_stack(struct vcp
  622.30  	memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
  622.31  }
  622.32  
  622.33 -#ifdef CONFIG_VTI
  622.34 -void arch_do_createdomain(struct vcpu *v)
  622.35 -{
  622.36 -	struct domain *d = v->domain;
  622.37 -	struct thread_info *ti = alloc_thread_info(v);
  622.38 -
  622.39 -	/* Clear thread_info to clear some important fields, like preempt_count */
  622.40 -	memset(ti, 0, sizeof(struct thread_info));
  622.41 -	init_switch_stack(v);
  622.42 -
  622.43 - 	/* Shared info area is required to be allocated at domain
  622.44 - 	 * creation, since control panel will write some I/O info
  622.45 - 	 * between front end and back end to that area. However for
  622.46 - 	 * vmx domain, our design is to let domain itself to allcoate
  622.47 - 	 * shared info area, to keep machine page contiguous. So this
  622.48 - 	 * page will be released later when domainN issues request
  622.49 - 	 * after up.
  622.50 - 	 */
  622.51 - 	d->shared_info = (void *)alloc_xenheap_page();
  622.52 -	/* Now assume all vcpu info and event indicators can be
  622.53 -	 * held in one shared page. Definitely later we need to
  622.54 -	 * consider more about it
  622.55 -	 */
  622.56 -
  622.57 -	memset(d->shared_info, 0, PAGE_SIZE);
  622.58 -	d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
  622.59 -			alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
  622.60 -	printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
  622.61 -	memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
  622.62 -	v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
  622.63 -	/* Mask all events, and specific port will be unmasked
  622.64 -	 * when customer subscribes to it.
  622.65 -	 */
  622.66 -	if(v == d->vcpu[0]) {
  622.67 -	    memset(&d->shared_info->evtchn_mask[0], 0xff,
  622.68 -		sizeof(d->shared_info->evtchn_mask));
  622.69 -	}
  622.70 -
  622.71 -	/* Allocate per-domain vTLB and vhpt */
  622.72 -	v->arch.vtlb = init_domain_tlb(v);
  622.73 -
  622.74 -	/* Physical->machine page table will be allocated when 
  622.75 -	 * final setup, since we have no the maximum pfn number in 
  622.76 -	 * this stage
  622.77 -	 */
  622.78 -
  622.79 -	/* FIXME: This is identity mapped address for xenheap. 
  622.80 -	 * Do we need it at all?
  622.81 -	 */
  622.82 -	d->xen_vastart = XEN_START_ADDR;
  622.83 -	d->xen_vaend = XEN_END_ADDR;
  622.84 -	d->arch.breakimm = 0x1000;
  622.85 -}
  622.86 -#else // CONFIG_VTI
  622.87  void arch_do_createdomain(struct vcpu *v)
  622.88  {
  622.89  	struct domain *d = v->domain;
  622.90 @@ -263,11 +201,26 @@ void arch_do_createdomain(struct vcpu *v
  622.91  	v->vcpu_info = &(d->shared_info->vcpu_data[0]);
  622.92  
  622.93  	d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
  622.94 -	if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
  622.95 +
  622.96 +#ifdef CONFIG_VTI
  622.97 +	/* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
  622.98 +	 * to this solution. Maybe it can be deferred until we know created
  622.99 +	 * one as vmx domain */
 622.100 +	v->arch.vtlb = init_domain_tlb(v);
 622.101 +#endif
 622.102 +
 622.103 +	/* We may also need emulation rid for region4, though it's unlikely
 622.104 +	 * to see guest issue uncacheable access in metaphysical mode. But
 622.105 +	 * keep such info here may be more sane.
 622.106 +	 */
 622.107 +	if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL)
 622.108 +	 || ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL))
 622.109  		BUG();
 622.110  	VCPU(v, metaphysical_mode) = 1;
 622.111  	v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
 622.112 +	v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
 622.113  	v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
 622.114 +	v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
 622.115  #define DOMAIN_RID_BITS_DEFAULT 18
 622.116  	if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
 622.117  		BUG();
 622.118 @@ -292,7 +245,6 @@ void arch_do_createdomain(struct vcpu *v
 622.119  		return -ENOMEM;
 622.120  	}
 622.121  }
 622.122 -#endif // CONFIG_VTI
 622.123  
 622.124  void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
 622.125  {
 622.126 @@ -312,16 +264,28 @@ void arch_getdomaininfo_ctxt(struct vcpu
 622.127  	c->shared = v->domain->shared_info->arch;
 622.128  }
 622.129  
 622.130 -#ifndef CONFIG_VTI
 622.131  int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
 622.132  {
 622.133  	struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
 622.134 +	struct domain *d = v->domain;
 622.135 +	int i, rc, ret;
 622.136 +	unsigned long progress = 0;
 622.137  
 622.138  	printf("arch_set_info_guest\n");
 622.139 +	if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
 622.140 +            return 0;
 622.141 +
 622.142 +	if (c->flags & VGCF_VMX_GUEST) {
 622.143 +	    if (!vmx_enabled) {
 622.144 +		printk("No VMX hardware feature for vmx domain.\n");
 622.145 +		return -EINVAL;
 622.146 +	    }
 622.147 +
 622.148 +	    vmx_setup_platform(v, c);
 622.149 +	}
 622.150 +
 622.151  	*regs = c->regs;
 622.152 -	regs->cr_ipsr = IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
 622.153 -	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
 622.154 -	regs->ar_rsc |= (2 << 2); /* force PL2/3 */
 622.155 +	new_thread(v, regs->cr_iip, 0, 0);
 622.156  
 622.157   	v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
 622.158  	if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
 622.159 @@ -330,100 +294,13 @@ int arch_set_info_guest(struct vcpu *v, 
 622.160  	    return -EFAULT;
 622.161  	}
 622.162  
 622.163 -	init_all_rr(v);
 622.164 +	v->arch.domain_itm_last = -1L;
 622.165 +	d->shared_info->arch = c->shared;
 622.166  
 622.167 -	// this should be in userspace
 622.168 -	regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=tty0 console=tty0 root=/dev/hda1",256L);  //FIXME
 622.169 -	v->arch.domain_itm_last = -1L;
 622.170 - 	VCPU(v, banknum) = 1;
 622.171 - 	VCPU(v, metaphysical_mode) = 1;
 622.172 -
 622.173 -	v->domain->shared_info->arch = c->shared;
 622.174 +	/* Don't redo final setup */
 622.175 +	set_bit(_VCPUF_initialised, &v->vcpu_flags);
 622.176  	return 0;
 622.177  }
 622.178 -#else // CONFIG_VTI
 622.179 -int arch_set_info_guest(
 622.180 -    struct vcpu *v, struct vcpu_guest_context *c)
 622.181 -{
 622.182 -    struct domain *d = v->domain;
 622.183 -    int i, rc, ret;
 622.184 -    unsigned long progress = 0;
 622.185 -    shared_iopage_t *sp;
 622.186 -
 622.187 -    if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
 622.188 -        return 0;
 622.189 -
 622.190 -    /* Lazy FP not implemented yet */
 622.191 -    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
 622.192 -    if ( c->flags & VGCF_FPU_VALID )
 622.193 -        set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
 622.194 -
 622.195 -    /* Sync d/i cache conservatively, after domain N is loaded */
 622.196 -    ret = ia64_pal_cache_flush(3, 0, &progress, NULL);
 622.197 -    if (ret != PAL_STATUS_SUCCESS)
 622.198 -            panic("PAL CACHE FLUSH failed for dom[%d].\n",
 622.199 -		v->domain->domain_id);
 622.200 -    DPRINTK("Sync i/d cache for dom%d image SUCC\n",
 622.201 -		v->domain->domain_id);
 622.202 -
 622.203 -    /* Physical mode emulation initialization, including
 622.204 -     * emulation ID allcation and related memory request
 622.205 -     */
 622.206 -    physical_mode_init(v);
 622.207 -
 622.208 -    /* FIXME: only support PMT table continuously by far */
 622.209 -    d->arch.pmt = __va(c->pt_base);
 622.210 -    d->arch.max_pfn = c->pt_max_pfn;
 622.211 -    d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
 622.212 -    sp = get_sp(d);
 622.213 -    memset((char *)sp,0,PAGE_SIZE);
 622.214 -    /* FIXME: temp due to old CP */
 622.215 -    sp->sp_global.eport = 2;
 622.216 -#ifdef V_IOSAPIC_READY
 622.217 -    sp->vcpu_number = 1;
 622.218 -#endif
 622.219 -    /* TEMP */
 622.220 -    d->arch.vmx_platform.pib_base = 0xfee00000UL;
 622.221 -    
 622.222 -
 622.223 -    if (c->flags & VGCF_VMX_GUEST) {
 622.224 -	if (!vmx_enabled)
 622.225 -	    panic("No VMX hardware feature for vmx domain.\n");
 622.226 -
 622.227 -	vmx_final_setup_domain(d);
 622.228 -
 622.229 -	/* One more step to enable interrupt assist */
 622.230 -	set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
 622.231 -    }
 622.232 -
 622.233 -    vlsapic_reset(v);
 622.234 -    vtm_init(v);
 622.235 -
 622.236 -    /* Only open one port for I/O and interrupt emulation */
 622.237 -    if (v == d->vcpu[0]) {
 622.238 -	memset(&d->shared_info->evtchn_mask[0], 0xff,
 622.239 -		sizeof(d->shared_info->evtchn_mask));
 622.240 -	clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
 622.241 -    }
 622.242 -    /* Setup domain context. Actually IA-64 is a bit different with
 622.243 -     * x86, with almost all system resources better managed by HV
 622.244 -     * directly. CP only needs to provide start IP of guest, which
 622.245 -     * ideally is the load address of guest Firmware.
 622.246 -     */
 622.247 -    new_thread(v, c->guest_iip, 0, 0);
 622.248 -
 622.249 -
 622.250 -    d->xen_vastart = XEN_START_ADDR;
 622.251 -    d->xen_vaend = XEN_END_ADDR;
 622.252 -    d->arch.breakimm = 0x1000 + d->domain_id;
 622.253 -    v->arch._thread.on_ustack = 0;
 622.254 -
 622.255 -    /* Don't redo final setup */
 622.256 -    set_bit(_VCPUF_initialised, &v->vcpu_flags);
 622.257 -
 622.258 -    return 0;
 622.259 -}
 622.260 -#endif // CONFIG_VTI
 622.261  
 622.262  void arch_do_boot_vcpu(struct vcpu *v)
 622.263  {
 622.264 @@ -443,17 +320,17 @@ void domain_relinquish_resources(struct 
 622.265  	printf("domain_relinquish_resources: not implemented\n");
 622.266  }
 622.267  
 622.268 -#ifdef CONFIG_VTI
 622.269 +// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
 622.270 +// and linux/arch/ia64/kernel/process.c:kernel_thread()
 622.271  void new_thread(struct vcpu *v,
 622.272                  unsigned long start_pc,
 622.273                  unsigned long start_stack,
 622.274                  unsigned long start_info)
 622.275  {
 622.276  	struct domain *d = v->domain;
 622.277 -	struct xen_regs *regs;
 622.278 +	struct pt_regs *regs;
 622.279  	struct ia64_boot_param *bp;
 622.280  	extern char saved_command_line[];
 622.281 -	//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
 622.282  
 622.283  
 622.284  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
 622.285 @@ -471,61 +348,31 @@ void new_thread(struct vcpu *v,
 622.286  		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
 622.287  	}
 622.288  	regs->cr_iip = start_pc;
 622.289 -	regs->cr_ifs = 0; /* why? - matthewc */
 622.290 +	regs->cr_ifs = 1UL << 63; /* or clear? */
 622.291  	regs->ar_fpsr = FPSR_DEFAULT;
 622.292 -	if (VMX_DOMAIN(v)) {
 622.293 -		vmx_init_all_rr(v);
 622.294 -	} else
 622.295 -		init_all_rr(v);
 622.296  
 622.297  	if (VMX_DOMAIN(v)) {
 622.298 -		if (d == dom0) {
 622.299 +#ifdef CONFIG_VTI
 622.300 +		vmx_init_all_rr(v);
 622.301 +		if (d == dom0)
 622.302  		    VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
 622.303 -		    printk("new_thread, done with dom_fw_setup\n");
 622.304 -		}
 622.305  		/* Virtual processor context setup */
 622.306  		VMX_VPD(v, vpsr) = IA64_PSR_BN;
 622.307  		VPD_CR(v, dcr) = 0;
 622.308 +#endif
 622.309  	} else {
 622.310 -		regs->r28 = dom_fw_setup(d,saved_command_line,256L);
 622.311 +		init_all_rr(v);
 622.312 +		if (d == dom0) 
 622.313 +		    regs->r28 = dom_fw_setup(d,saved_command_line,256L);
 622.314 +		else {
 622.315 +		    regs->ar_rsc |= (2 << 2); /* force PL2/3 */
 622.316 +		    regs->r28 = dom_fw_setup(d,"nomca nosmp xencons=tty0 console=tty0 root=/dev/hda1",256L);  //FIXME
 622.317 +		}
 622.318  		VCPU(v, banknum) = 1;
 622.319  		VCPU(v, metaphysical_mode) = 1;
 622.320  		d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
 622.321  	}
 622.322  }
 622.323 -#else // CONFIG_VTI
 622.324 -
 622.325 -// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
 622.326 -// and linux/arch/ia64/kernel/process.c:kernel_thread()
 622.327 -void new_thread(struct vcpu *v,
 622.328 -	            unsigned long start_pc,
 622.329 -	            unsigned long start_stack,
 622.330 -	            unsigned long start_info)
 622.331 -{
 622.332 -	struct domain *d = v->domain;
 622.333 -	struct pt_regs *regs;
 622.334 -	struct ia64_boot_param *bp;
 622.335 -	extern char saved_command_line[];
 622.336 -
 622.337 -#ifdef CONFIG_DOMAIN0_CONTIGUOUS
 622.338 -	if (d == dom0) start_pc += dom0_start;
 622.339 -#endif
 622.340 -
 622.341 -	regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
 622.342 -	regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
 622.343 -		| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
 622.344 -		& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
 622.345 -	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
 622.346 -	regs->cr_iip = start_pc;
 622.347 -	regs->cr_ifs = 1UL << 63;
 622.348 -	regs->ar_fpsr = FPSR_DEFAULT;
 622.349 -	init_all_rr(v);
 622.350 -	regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
 622.351 -	VCPU(v, banknum) = 1;
 622.352 -	VCPU(v, metaphysical_mode) = 1;
 622.353 -	d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
 622.354 -}
 622.355 -#endif // CONFIG_VTI
 622.356  
 622.357  static struct page * map_new_domain0_page(unsigned long mpaddr)
 622.358  {
 622.359 @@ -903,44 +750,6 @@ domU_staging_write_32(unsigned long at, 
 622.360  }
 622.361  #endif
 622.362  
 622.363 -#ifdef CONFIG_VTI
 622.364 -/* Up to whether domain is vmx one, different context may be setup
 622.365 - * here.
 622.366 - */
 622.367 -void
 622.368 -post_arch_do_create_domain(struct vcpu *v, int vmx_domain)
 622.369 -{
 622.370 -    struct domain *d = v->domain;
 622.371 -
 622.372 -    if (!vmx_domain) {
 622.373 -	d->shared_info = (void*)alloc_xenheap_page();
 622.374 -	if (!d->shared_info)
 622.375 -		panic("Allocate share info for non-vmx domain failed.\n");
 622.376 -	d->shared_info_va = 0xfffd000000000000;
 622.377 -
 622.378 -	printk("Build shared info for non-vmx domain\n");
 622.379 -	build_shared_info(d);
 622.380 -	/* Setup start info area */
 622.381 -    }
 622.382 -}
 622.383 -
 622.384 -/* For VMX domain, this is invoked when kernel model in domain
 622.385 - * request actively
 622.386 - */
 622.387 -void build_shared_info(struct domain *d)
 622.388 -{
 622.389 -    int i;
 622.390 -
 622.391 -    /* Set up shared-info area. */
 622.392 -    update_dom_time(d);
 622.393 -
 622.394 -    /* Mask all upcalls... */
 622.395 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
 622.396 -        d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 622.397 -
 622.398 -    /* ... */
 622.399 -}
 622.400 -
 622.401  /*
 622.402   * Domain 0 has direct access to all devices absolutely. However
 622.403   * the major point of this stub here, is to allow alloc_dom_mem
 622.404 @@ -959,182 +768,12 @@ int construct_dom0(struct domain *d,
 622.405  	               unsigned long initrd_start, unsigned long initrd_len,
 622.406  	               char *cmdline)
 622.407  {
 622.408 -    char *dst;
 622.409 -    int i, rc;
 622.410 -    unsigned long pfn, mfn;
 622.411 -    unsigned long nr_pt_pages;
 622.412 -    unsigned long count;
 622.413 -    unsigned long alloc_start, alloc_end;
 622.414 -    struct pfn_info *page = NULL;
 622.415 -    start_info_t *si;
 622.416 -    struct vcpu *v = d->vcpu[0];
 622.417 -    struct domain_setup_info dsi;
 622.418 -    unsigned long p_start;
 622.419 -    unsigned long pkern_start;
 622.420 -    unsigned long pkern_entry;
 622.421 -    unsigned long pkern_end;
 622.422 -    unsigned long ret;
 622.423 -    unsigned long progress = 0;
 622.424 -
 622.425 -//printf("construct_dom0: starting\n");
 622.426 -    /* Sanity! */
 622.427 -#ifndef CLONE_DOMAIN0
 622.428 -    if ( d != dom0 ) 
 622.429 -        BUG();
 622.430 -    if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
 622.431 -        BUG();
 622.432 -#endif
 622.433 -
 622.434 -    printk("##Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
 622.435 -    memset(&dsi, 0, sizeof(struct domain_setup_info));
 622.436 -
 622.437 -    printk("*** LOADING DOMAIN 0 ***\n");
 622.438 -
 622.439 -    alloc_start = dom0_start;
 622.440 -    alloc_end = dom0_start + dom0_size;
 622.441 -    d->tot_pages = d->max_pages = (alloc_end - alloc_start)/PAGE_SIZE;
 622.442 -    image_start = __va(ia64_boot_param->initrd_start);
 622.443 -    image_len = ia64_boot_param->initrd_size;
 622.444 -
 622.445 -    dsi.image_addr = (unsigned long)image_start;
 622.446 -    dsi.image_len  = image_len;
 622.447 -    rc = parseelfimage(&dsi);
 622.448 -    if ( rc != 0 )
 622.449 -        return rc;
 622.450 -
 622.451 -    /* Temp workaround */
 622.452 -    if (running_on_sim)
 622.453 -	dsi.xen_section_string = (char *)1;
 622.454 -
 622.455 -    if ((!vmx_enabled) && !dsi.xen_section_string) {
 622.456 -	printk("Lack of hardware support for unmodified vmx dom0\n");
 622.457 -	panic("");
 622.458 -    }
 622.459 -
 622.460 -    if (vmx_enabled && !dsi.xen_section_string) {
 622.461 -	printk("Dom0 is vmx domain!\n");
 622.462 -	vmx_dom0 = 1;
 622.463 -    }
 622.464 -
 622.465 -    p_start = dsi.v_start;
 622.466 -    pkern_start = dsi.v_kernstart;
 622.467 -    pkern_end = dsi.v_kernend;
 622.468 -    pkern_entry = dsi.v_kernentry;
 622.469 -
 622.470 -    printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",
 622.471 -	p_start,pkern_start,pkern_end,pkern_entry);
 622.472 -
 622.473 -    if ( (p_start & (PAGE_SIZE-1)) != 0 )
 622.474 -    {
 622.475 -        printk("Initial guest OS must load to a page boundary.\n");
 622.476 -        return -EINVAL;
 622.477 -    }
 622.478 -
 622.479 -    printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
 622.480 -           " Kernel image:  %lx->%lx\n"
 622.481 -           " Entry address: %lx\n"
 622.482 -           " Init. ramdisk:   (NOT IMPLEMENTED YET)\n",
 622.483 -           pkern_start, pkern_end, pkern_entry);
 622.484 -
 622.485 -    if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
 622.486 -    {
 622.487 -        printk("Initial guest OS requires too much space\n"
 622.488 -               "(%luMB is greater than %luMB limit)\n",
 622.489 -               (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
 622.490 -        return -ENOMEM;
 622.491 -    }
 622.492 -
 622.493 -    // Other sanity check about Dom0 image
 622.494 -
 622.495 -    /* Construct a frame-allocation list for the initial domain, since these
 622.496 -     * pages are allocated by boot allocator and pfns are not set properly
 622.497 -     */
 622.498 -    for ( mfn = (alloc_start>>PAGE_SHIFT); 
 622.499 -          mfn < (alloc_end>>PAGE_SHIFT); 
 622.500 -          mfn++ )
 622.501 -    {
 622.502 -        page = &frame_table[mfn];
 622.503 -        page_set_owner(page, d);
 622.504 -        page->u.inuse.type_info = 0;
 622.505 -        page->count_info        = PGC_allocated | 1;
 622.506 -        list_add_tail(&page->list, &d->page_list);
 622.507 -
 622.508 -	/* Construct 1:1 mapping */
 622.509 -	machine_to_phys_mapping[mfn] = mfn;
 622.510 -    }
 622.511 -
 622.512 -    post_arch_do_create_domain(v, vmx_dom0);
 622.513 -
 622.514 -    /* Load Dom0 image to its own memory */
 622.515 -    loaddomainelfimage(d,image_start);
 622.516 -
 622.517 -    /* Copy the initial ramdisk. */
 622.518 -
 622.519 -    /* Sync d/i cache conservatively */
 622.520 -    ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
 622.521 -    if (ret != PAL_STATUS_SUCCESS)
 622.522 -            panic("PAL CACHE FLUSH failed for dom0.\n");
 622.523 -    printk("Sync i/d cache for dom0 image SUCC\n");
 622.524 -
 622.525 -    /* Physical mode emulation initialization, including
 622.526 -     * emulation ID allcation and related memory request
 622.527 -     */
 622.528 -    physical_mode_init(v);
 622.529 -    /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
 622.530 -     * for dom0
 622.531 -     */
 622.532 -    d->arch.pmt = NULL;
 622.533 -
 622.534 -    /* Give up the VGA console if DOM0 is configured to grab it. */
 622.535 -    if (cmdline != NULL)
 622.536 -    	console_endboot(strstr(cmdline, "tty0") != NULL);
 622.537 -
 622.538 -    /* VMX specific construction for Dom0, if hardware supports VMX
 622.539 -     * and Dom0 is unmodified image
 622.540 -     */
 622.541 -    printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
 622.542 -    if (vmx_dom0)
 622.543 -	vmx_final_setup_domain(dom0);
 622.544 -    
 622.545 -    /* vpd is ready now */
 622.546 -    vlsapic_reset(v);
 622.547 -    vtm_init(v);
 622.548 -
 622.549 -    set_bit(_DOMF_constructed, &d->domain_flags);
 622.550 -    new_thread(v, pkern_entry, 0, 0);
 622.551 -
 622.552 -    physdev_init_dom0(d);
 622.553 -    // FIXME: Hack for keyboard input
 622.554 -#ifdef CLONE_DOMAIN0
 622.555 -if (d == dom0)
 622.556 -#endif
 622.557 -    serial_input_init();
 622.558 -    if (d == dom0) {
 622.559 -    	VCPU(v, delivery_mask[0]) = -1L;
 622.560 -    	VCPU(v, delivery_mask[1]) = -1L;
 622.561 -    	VCPU(v, delivery_mask[2]) = -1L;
 622.562 -    	VCPU(v, delivery_mask[3]) = -1L;
 622.563 -    }
 622.564 -    else __set_bit(0x30,VCPU(v, delivery_mask));
 622.565 -
 622.566 -    return 0;
 622.567 -}
 622.568 -
 622.569 -
 622.570 -#else //CONFIG_VTI
 622.571 -
 622.572 -int construct_dom0(struct domain *d, 
 622.573 -	               unsigned long image_start, unsigned long image_len, 
 622.574 -	               unsigned long initrd_start, unsigned long initrd_len,
 622.575 -	               char *cmdline)
 622.576 -{
 622.577  	char *dst;
 622.578  	int i, rc;
 622.579  	unsigned long pfn, mfn;
 622.580  	unsigned long nr_pt_pages;
 622.581  	unsigned long count;
 622.582 -	//l2_pgentry_t *l2tab, *l2start;
 622.583 -	//l1_pgentry_t *l1tab = NULL, *l1start = NULL;
 622.584 +	unsigned long alloc_start, alloc_end;
 622.585  	struct pfn_info *page = NULL;
 622.586  	start_info_t *si;
 622.587  	struct vcpu *v = d->vcpu[0];
 622.588 @@ -1144,6 +783,7 @@ int construct_dom0(struct domain *d,
 622.589  	unsigned long pkern_start;
 622.590  	unsigned long pkern_entry;
 622.591  	unsigned long pkern_end;
 622.592 +	unsigned long ret, progress = 0;
 622.593  
 622.594  //printf("construct_dom0: starting\n");
 622.595  	/* Sanity! */
 622.596 @@ -1158,7 +798,9 @@ int construct_dom0(struct domain *d,
 622.597  
 622.598  	printk("*** LOADING DOMAIN 0 ***\n");
 622.599  
 622.600 -	d->max_pages = dom0_size/PAGE_SIZE;
 622.601 +	alloc_start = dom0_start;
 622.602 +	alloc_end = dom0_start + dom0_size;
 622.603 +	d->tot_pages = d->max_pages = dom0_size/PAGE_SIZE;
 622.604  	image_start = __va(ia64_boot_param->initrd_start);
 622.605  	image_len = ia64_boot_param->initrd_size;
 622.606  //printk("image_start=%lx, image_len=%lx\n",image_start,image_len);
 622.607 @@ -1171,6 +813,23 @@ int construct_dom0(struct domain *d,
 622.608  	if ( rc != 0 )
 622.609  	    return rc;
 622.610  
 622.611 +#ifdef CONFIG_VTI
 622.612 +	/* Temp workaround */
 622.613 +	if (running_on_sim)
 622.614 +	    dsi.xen_section_string = (char *)1;
 622.615 +
 622.616 +	/* Check whether dom0 is vti domain */
 622.617 +	if ((!vmx_enabled) && !dsi.xen_section_string) {
 622.618 +	    printk("Lack of hardware support for unmodified vmx dom0\n");
 622.619 +	    panic("");
 622.620 +	}
 622.621 +
 622.622 +	if (vmx_enabled && !dsi.xen_section_string) {
 622.623 +	    printk("Dom0 is vmx domain!\n");
 622.624 +	    vmx_dom0 = 1;
 622.625 +	}
 622.626 +#endif
 622.627 +
 622.628  	p_start = dsi.v_start;
 622.629  	pkern_start = dsi.v_kernstart;
 622.630  	pkern_end = dsi.v_kernend;
 622.631 @@ -1214,14 +873,43 @@ int construct_dom0(struct domain *d,
 622.632  	for ( i = 0; i < MAX_VIRT_CPUS; i++ )
 622.633  	    d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 622.634  
 622.635 +#ifdef CONFIG_VTI
 622.636 +	/* Construct a frame-allocation list for the initial domain, since these
 622.637 +	 * pages are allocated by boot allocator and pfns are not set properly
 622.638 +	 */
 622.639 +	for ( mfn = (alloc_start>>PAGE_SHIFT); 
 622.640 +	      mfn < (alloc_end>>PAGE_SHIFT); 
 622.641 +	      mfn++ )
 622.642 +	{
 622.643 +            page = &frame_table[mfn];
 622.644 +            page_set_owner(page, d);
 622.645 +            page->u.inuse.type_info = 0;
 622.646 +            page->count_info        = PGC_allocated | 1;
 622.647 +            list_add_tail(&page->list, &d->page_list);
 622.648 +
 622.649 +	    /* Construct 1:1 mapping */
 622.650 +	    machine_to_phys_mapping[mfn] = mfn;
 622.651 +	}
 622.652 +
 622.653 +	/* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
 622.654 +	 * for dom0
 622.655 +	 */
 622.656 +	d->arch.pmt = NULL;
 622.657 +#endif
 622.658 +
 622.659  	/* Copy the OS image. */
 622.660 -	//(void)loadelfimage(image_start);
 622.661  	loaddomainelfimage(d,image_start);
 622.662  
 622.663  	/* Copy the initial ramdisk. */
 622.664  	//if ( initrd_len != 0 )
 622.665  	//    memcpy((void *)vinitrd_start, initrd_start, initrd_len);
 622.666  
 622.667 +	/* Sync d/i cache conservatively */
 622.668 +	ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
 622.669 +	if (ret != PAL_STATUS_SUCCESS)
 622.670 +	    panic("PAL CACHE FLUSH failed for dom0.\n");
 622.671 +	printk("Sync i/d cache for dom0 image SUCC\n");
 622.672 +
 622.673  #if 0
 622.674  	/* Set up start info area. */
 622.675  	//si = (start_info_t *)vstartinfo_start;
 622.676 @@ -1257,14 +945,21 @@ int construct_dom0(struct domain *d,
 622.677  #endif
 622.678  	
 622.679  	/* Give up the VGA console if DOM0 is configured to grab it. */
 622.680 -#ifdef IA64
 622.681  	if (cmdline != NULL)
 622.682 -#endif
 622.683 -	console_endboot(strstr(cmdline, "tty0") != NULL);
 622.684 +	    console_endboot(strstr(cmdline, "tty0") != NULL);
 622.685 +
 622.686 +	/* VMX specific construction for Dom0, if hardware supports VMX
 622.687 +	 * and Dom0 is unmodified image
 622.688 +	 */
 622.689 +	printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
 622.690 +	if (vmx_dom0)
 622.691 +	    vmx_final_setup_domain(dom0);
 622.692  
 622.693  	set_bit(_DOMF_constructed, &d->domain_flags);
 622.694  
 622.695  	new_thread(v, pkern_entry, 0, 0);
 622.696 +	physdev_init_dom0(d);
 622.697 +
 622.698  	// FIXME: Hack for keyboard input
 622.699  #ifdef CLONE_DOMAIN0
 622.700  if (d == dom0)
 622.701 @@ -1280,7 +975,6 @@ if (d == dom0)
 622.702  
 622.703  	return 0;
 622.704  }
 622.705 -#endif // CONFIG_VTI
 622.706  
 622.707  // FIXME: When dom0 can construct domains, this goes away (or is rewritten)
 622.708  int construct_domU(struct domain *d,
   625.1 --- a/xen/arch/ia64/hyperprivop.S	Fri Aug 26 08:50:31 2005 +0000
   625.2 +++ b/xen/arch/ia64/hyperprivop.S	Fri Aug 26 09:05:43 2005 +0000
   625.3 @@ -73,7 +73,8 @@ GLOBAL_ENTRY(fast_hyperprivop)
   625.4  	ld4 r20=[r20] ;;
   625.5  	cmp.eq p7,p0=r0,r20
   625.6  (p7)	br.cond.sptk.many 1f
   625.7 -	mov r20=IA64_KR(CURRENT);;
   625.8 +	movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   625.9 +	ld8 r20=[r20];;
  625.10  	adds r21=IA64_VCPU_IRR0_OFFSET,r20;
  625.11  	adds r22=IA64_VCPU_IRR0_OFFSET+8,r20;;
  625.12  	ld8 r23=[r21],16; ld8 r24=[r22],16;;
  625.13 @@ -257,7 +258,8 @@ ENTRY(hyper_ssm_i)
  625.14  	st8 [r21]=r20 ;;
  625.15  	// leave cr.ifs alone for later rfi
  625.16  	// set iip to go to domain IVA break instruction vector
  625.17 -	mov r22=IA64_KR(CURRENT);;
  625.18 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.19 +	ld8 r22=[r22];;
  625.20  	adds r22=IA64_VCPU_IVA_OFFSET,r22;;
  625.21  	ld8 r23=[r22];;
  625.22  	movl r24=0x3000;;
  625.23 @@ -306,7 +308,7 @@ GLOBAL_ENTRY(fast_tick_reflect)
  625.24  	mov r28=IA64_TIMER_VECTOR;;
  625.25  	cmp.ne p6,p0=r28,r30
  625.26  (p6)	br.cond.spnt.few rp;;
  625.27 -	movl r20=(PERCPU_ADDR)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
  625.28 +	movl r20=THIS_CPU(cpu_info)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
  625.29  	ld8 r26=[r20];;
  625.30  	mov r27=ar.itc;;
  625.31  	adds r27=200,r27;;	// safety margin
  625.32 @@ -340,7 +342,8 @@ GLOBAL_ENTRY(fast_tick_reflect)
  625.33  (p6)	br.cond.spnt.few fast_tick_reflect_done;;
  625.34  	extr.u r27=r20,0,6	// r27 has low 6 bits of itv.vector
  625.35  	extr.u r26=r20,6,2;;	// r26 has irr index of itv.vector
  625.36 -	mov r19=IA64_KR(CURRENT);;
  625.37 +	movl r19=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.38 +	ld8 r19=[r19];;
  625.39  	adds r22=IA64_VCPU_DOMAIN_ITM_LAST_OFFSET,r19
  625.40  	adds r23=IA64_VCPU_DOMAIN_ITM_OFFSET,r19;;
  625.41  	ld8 r24=[r22];;
  625.42 @@ -581,7 +584,8 @@ ENTRY(fast_reflect)
  625.43  	st8 [r18]=r0;;
  625.44  	// FIXME: need to save iipa and isr to be arch-compliant
  625.45  	// set iip to go to domain IVA break instruction vector
  625.46 -	mov r22=IA64_KR(CURRENT);;
  625.47 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.48 +	ld8 r22=[r22];;
  625.49  	adds r22=IA64_VCPU_IVA_OFFSET,r22;;
  625.50  	ld8 r23=[r22];;
  625.51  	add r20=r20,r23;;
  625.52 @@ -803,7 +807,8 @@ GLOBAL_ENTRY(rfi_check_extint)
  625.53  
  625.54  	// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
  625.55  	// make sure none of these get trashed in case going to just_do_rfi
  625.56 -	mov r30=IA64_KR(CURRENT);;
  625.57 +	movl r30=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.58 +	ld8 r30=[r30];;
  625.59  	adds r24=IA64_VCPU_INSVC3_OFFSET,r30;;
  625.60  	mov r25=192
  625.61  	adds r16=IA64_VCPU_IRR3_OFFSET,r30;;
  625.62 @@ -1010,7 +1015,8 @@ ENTRY(hyper_ssm_dt)
  625.63  	ld4 r21=[r20];;
  625.64  	cmp.eq p7,p0=r21,r0	// meta==0?
  625.65  (p7)	br.spnt.many	1f ;;	// already in virtual mode
  625.66 -	mov r22=IA64_KR(CURRENT);;
  625.67 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.68 +	ld8 r22=[r22];;
  625.69  	adds r22=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
  625.70  	ld4 r23=[r22];;
  625.71  	mov rr[r0]=r23;;
  625.72 @@ -1045,7 +1051,8 @@ ENTRY(hyper_rsm_dt)
  625.73  	ld4 r21=[r20];;
  625.74  	cmp.ne p7,p0=r21,r0	// meta==0?
  625.75  (p7)	br.spnt.many	1f ;;	// already in metaphysical mode
  625.76 -	mov r22=IA64_KR(CURRENT);;
  625.77 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.78 +	ld8 r22=[r22];;
  625.79  	adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
  625.80  	ld4 r23=[r22];;
  625.81  	mov rr[r0]=r23;;
  625.82 @@ -1137,7 +1144,8 @@ ENTRY(hyper_get_ivr)
  625.83  (p7)	adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
  625.84  (p7)	st4 [r20]=r0;;
  625.85  (p7)	br.spnt.many 1f ;;
  625.86 -	mov r30=IA64_KR(CURRENT);;
  625.87 +	movl r30=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.88 +	ld8 r30=[r30];;
  625.89  	adds r24=IA64_VCPU_INSVC3_OFFSET,r30;;
  625.90  	mov r25=192
  625.91  	adds r22=IA64_VCPU_IRR3_OFFSET,r30;;
  625.92 @@ -1242,7 +1250,8 @@ ENTRY(hyper_eoi)
  625.93  	adds r21=1,r21;;
  625.94  	st8 [r20]=r21;;
  625.95  #endif
  625.96 -	mov r22=IA64_KR(CURRENT);;
  625.97 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  625.98 +	ld8 r22=[r22];;
  625.99  	adds r22=IA64_VCPU_INSVC3_OFFSET,r22;;
 625.100  	ld8 r23=[r22];;
 625.101  	cmp.eq p6,p0=r23,r0;;
 625.102 @@ -1305,9 +1314,10 @@ ENTRY(hyper_set_itm)
 625.103  	adds r21=1,r21;;
 625.104  	st8 [r20]=r21;;
 625.105  #endif
 625.106 -	movl r20=(PERCPU_ADDR)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
 625.107 +	movl r20=THIS_CPU(cpu_info)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
 625.108  	ld8 r21=[r20];;
 625.109 -	mov r20=IA64_KR(CURRENT);;
 625.110 +	movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
 625.111 +	ld8 r20=[r20];;
 625.112  	adds r20=IA64_VCPU_DOMAIN_ITM_OFFSET,r20;;
 625.113  	st8 [r20]=r8;;
 625.114  	cmp.geu p6,p0=r21,r8;;
 625.115 @@ -1378,7 +1388,8 @@ ENTRY(hyper_set_rr)
 625.116  	st8 [r20]=r21;;
 625.117  #endif
 625.118  	extr.u r26=r9,8,24	// r26 = r9.rid
 625.119 -	mov r20=IA64_KR(CURRENT);;
 625.120 +	movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
 625.121 +	ld8 r20=[r20];;
 625.122  	adds r21=IA64_VCPU_STARTING_RID_OFFSET,r20;;
 625.123  	ld4 r22=[r21];;
 625.124  	adds r21=IA64_VCPU_ENDING_RID_OFFSET,r20;;
 625.125 @@ -1544,7 +1555,8 @@ 2:
 625.126  	mov ar.lc=r30 ;;
 625.127  	mov r29=cr.ipsr
 625.128  	mov r30=cr.iip;;
 625.129 -	mov r27=IA64_KR(CURRENT);;
 625.130 +	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
 625.131 +	ld8 r27=[r27];;
 625.132  	adds r25=IA64_VCPU_DTLB_OFFSET,r27
 625.133  	adds r26=IA64_VCPU_ITLB_OFFSET,r27;;
 625.134  	ld8 r24=[r25]
   626.1 --- a/xen/arch/ia64/ivt.S	Fri Aug 26 08:50:31 2005 +0000
   626.2 +++ b/xen/arch/ia64/ivt.S	Fri Aug 26 09:05:43 2005 +0000
   626.3 @@ -136,7 +136,11 @@ ENTRY(vhpt_miss)
   626.4  	;;
   626.5  	rsm psr.dt				// use physical addressing for data
   626.6  	mov r31=pr				// save the predicate registers
   626.7 +#ifdef XEN
   626.8 +	movl r19=THIS_CPU(cpu_kr)+IA64_KR_PT_BASE_OFFSET;;
   626.9 +#else
  626.10  	mov r19=IA64_KR(PT_BASE)		// get page table base address
  626.11 +#endif
  626.12  	shl r21=r16,3				// shift bit 60 into sign bit
  626.13  	shr.u r17=r16,61			// get the region number into r17
  626.14  	;;
  626.15 @@ -503,7 +507,11 @@ ENTRY(nested_dtlb_miss)
  626.16  	 * Clobbered:	b0, r18, r19, r21, psr.dt (cleared)
  626.17  	 */
  626.18  	rsm psr.dt				// switch to using physical data addressing
  626.19 +#ifdef XEN
  626.20 +	movl r19=THIS_CPU(cpu_kr)+IA64_KR_PT_BASE_OFFSET;;
  626.21 +#else
  626.22  	mov r19=IA64_KR(PT_BASE)		// get the page table base address
  626.23 +#endif
  626.24  	shl r21=r16,3				// shift bit 60 into sign bit
  626.25  	;;
  626.26  	shr.u r17=r16,61			// get the region number into r17
  626.27 @@ -833,7 +841,9 @@ ENTRY(break_fault)
  626.28  	cmp4.eq p7,p0=r0,r19
  626.29  (p7)	br.sptk.many fast_hyperprivop
  626.30  	;;
  626.31 -	mov r22=IA64_KR(CURRENT);;
  626.32 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  626.33 +	ld8 r22 = [r22]
  626.34 +	;;
  626.35  	adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
  626.36  	ld4 r23=[r22];;
  626.37  	cmp4.eq p6,p7=r23,r17			// Xen-reserved breakimm?
  626.38 @@ -842,7 +852,8 @@ ENTRY(break_fault)
  626.39  	br.sptk.many fast_break_reflect
  626.40  	;;
  626.41  #endif
  626.42 -	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.
  626.43 +	movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
  626.44 +	ld8 r16=[r16]
  626.45  	mov r17=cr.iim
  626.46  	mov r18=__IA64_BREAK_SYSCALL
  626.47  	mov r21=ar.fpsr
  626.48 @@ -934,7 +945,7 @@ ENTRY(interrupt)
  626.49  	// FIXME: this is a hack... use cpuinfo.ksoftirqd because its
  626.50  	// not used anywhere else and we need a place to stash ivr and
  626.51  	// there's no registers available unused by SAVE_MIN/REST
  626.52 -	movl r29=(PERCPU_ADDR)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
  626.53 +	movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
  626.54  	st8 [r29]=r30;;
  626.55  	movl r28=slow_interrupt;;
  626.56  	mov r29=rp;;
  626.57 @@ -954,7 +965,7 @@ slow_interrupt:
  626.58  	;;
  626.59  	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  626.60  #ifdef XEN
  626.61 -	movl out0=(PERCPU_ADDR)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
  626.62 +	movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
  626.63  	ld8 out0=[out0];;
  626.64  #else
  626.65  	mov out0=cr.ivr		// pass cr.ivr as first arg
   627.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   627.2 +++ b/xen/arch/ia64/linux-xen/efi.c	Fri Aug 26 09:05:43 2005 +0000
   627.3 @@ -0,0 +1,866 @@
   627.4 +/*
   627.5 + * Extensible Firmware Interface
   627.6 + *
   627.7 + * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
   627.8 + *
   627.9 + * Copyright (C) 1999 VA Linux Systems
  627.10 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  627.11 + * Copyright (C) 1999-2003 Hewlett-Packard Co.
  627.12 + *	David Mosberger-Tang <davidm@hpl.hp.com>
  627.13 + *	Stephane Eranian <eranian@hpl.hp.com>
  627.14 + *
  627.15 + * All EFI Runtime Services are not implemented yet as EFI only
  627.16 + * supports physical mode addressing on SoftSDV. This is to be fixed
  627.17 + * in a future version.  --drummond 1999-07-20
  627.18 + *
  627.19 + * Implemented EFI runtime services and virtual mode calls.  --davidm
  627.20 + *
  627.21 + * Goutham Rao: <goutham.rao@intel.com>
  627.22 + *	Skip non-WB memory and ignore empty memory ranges.
  627.23 + */
  627.24 +#include <linux/config.h>
  627.25 +#include <linux/module.h>
  627.26 +#include <linux/kernel.h>
  627.27 +#include <linux/init.h>
  627.28 +#include <linux/types.h>
  627.29 +#include <linux/time.h>
  627.30 +#include <linux/efi.h>
  627.31 +
  627.32 +#include <asm/io.h>
  627.33 +#include <asm/kregs.h>
  627.34 +#include <asm/meminit.h>
  627.35 +#include <asm/pgtable.h>
  627.36 +#include <asm/processor.h>
  627.37 +#include <asm/mca.h>
  627.38 +
  627.39 +#define EFI_DEBUG	0
  627.40 +
  627.41 +extern efi_status_t efi_call_phys (void *, ...);
  627.42 +
  627.43 +struct efi efi;
  627.44 +EXPORT_SYMBOL(efi);
  627.45 +static efi_runtime_services_t *runtime;
  627.46 +static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
  627.47 +
  627.48 +#define efi_call_virt(f, args...)	(*(f))(args)
  627.49 +
  627.50 +#define STUB_GET_TIME(prefix, adjust_arg)							  \
  627.51 +static efi_status_t										  \
  627.52 +prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)						  \
  627.53 +{												  \
  627.54 +	struct ia64_fpreg fr[6];								  \
  627.55 +	efi_time_cap_t *atc = NULL;								  \
  627.56 +	efi_status_t ret;									  \
  627.57 +												  \
  627.58 +	if (tc)											  \
  627.59 +		atc = adjust_arg(tc);								  \
  627.60 +	ia64_save_scratch_fpregs(fr);								  \
  627.61 +	ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
  627.62 +	ia64_load_scratch_fpregs(fr);								  \
  627.63 +	return ret;										  \
  627.64 +}
  627.65 +
  627.66 +#define STUB_SET_TIME(prefix, adjust_arg)							\
  627.67 +static efi_status_t										\
  627.68 +prefix##_set_time (efi_time_t *tm)								\
  627.69 +{												\
  627.70 +	struct ia64_fpreg fr[6];								\
  627.71 +	efi_status_t ret;									\
  627.72 +												\
  627.73 +	ia64_save_scratch_fpregs(fr);								\
  627.74 +	ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm));	\
  627.75 +	ia64_load_scratch_fpregs(fr);								\
  627.76 +	return ret;										\
  627.77 +}
  627.78 +
  627.79 +#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)						\
  627.80 +static efi_status_t										\
  627.81 +prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)		\
  627.82 +{												\
  627.83 +	struct ia64_fpreg fr[6];								\
  627.84 +	efi_status_t ret;									\
  627.85 +												\
  627.86 +	ia64_save_scratch_fpregs(fr);								\
  627.87 +	ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time),	\
  627.88 +				adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm));	\
  627.89 +	ia64_load_scratch_fpregs(fr);								\
  627.90 +	return ret;										\
  627.91 +}
  627.92 +
  627.93 +#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)						\
  627.94 +static efi_status_t										\
  627.95 +prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)					\
  627.96 +{												\
  627.97 +	struct ia64_fpreg fr[6];								\
  627.98 +	efi_time_t *atm = NULL;									\
  627.99 +	efi_status_t ret;									\
 627.100 +												\
 627.101 +	if (tm)											\
 627.102 +		atm = adjust_arg(tm);								\
 627.103 +	ia64_save_scratch_fpregs(fr);								\
 627.104 +	ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time),	\
 627.105 +				enabled, atm);							\
 627.106 +	ia64_load_scratch_fpregs(fr);								\
 627.107 +	return ret;										\
 627.108 +}
 627.109 +
 627.110 +#define STUB_GET_VARIABLE(prefix, adjust_arg)						\
 627.111 +static efi_status_t									\
 627.112 +prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,		\
 627.113 +		       unsigned long *data_size, void *data)				\
 627.114 +{											\
 627.115 +	struct ia64_fpreg fr[6];							\
 627.116 +	u32 *aattr = NULL;									\
 627.117 +	efi_status_t ret;								\
 627.118 +											\
 627.119 +	if (attr)									\
 627.120 +		aattr = adjust_arg(attr);						\
 627.121 +	ia64_save_scratch_fpregs(fr);							\
 627.122 +	ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable),	\
 627.123 +				adjust_arg(name), adjust_arg(vendor), aattr,		\
 627.124 +				adjust_arg(data_size), adjust_arg(data));		\
 627.125 +	ia64_load_scratch_fpregs(fr);							\
 627.126 +	return ret;									\
 627.127 +}
 627.128 +
 627.129 +#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)						\
 627.130 +static efi_status_t										\
 627.131 +prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor)	\
 627.132 +{												\
 627.133 +	struct ia64_fpreg fr[6];								\
 627.134 +	efi_status_t ret;									\
 627.135 +												\
 627.136 +	ia64_save_scratch_fpregs(fr);								\
 627.137 +	ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable),	\
 627.138 +				adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor));	\
 627.139 +	ia64_load_scratch_fpregs(fr);								\
 627.140 +	return ret;										\
 627.141 +}
 627.142 +
 627.143 +#define STUB_SET_VARIABLE(prefix, adjust_arg)						\
 627.144 +static efi_status_t									\
 627.145 +prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr,	\
 627.146 +		       unsigned long data_size, void *data)				\
 627.147 +{											\
 627.148 +	struct ia64_fpreg fr[6];							\
 627.149 +	efi_status_t ret;								\
 627.150 +											\
 627.151 +	ia64_save_scratch_fpregs(fr);							\
 627.152 +	ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable),	\
 627.153 +				adjust_arg(name), adjust_arg(vendor), attr, data_size,	\
 627.154 +				adjust_arg(data));					\
 627.155 +	ia64_load_scratch_fpregs(fr);							\
 627.156 +	return ret;									\
 627.157 +}
 627.158 +
 627.159 +#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)					\
 627.160 +static efi_status_t										\
 627.161 +prefix##_get_next_high_mono_count (u32 *count)							\
 627.162 +{												\
 627.163 +	struct ia64_fpreg fr[6];								\
 627.164 +	efi_status_t ret;									\
 627.165 +												\
 627.166 +	ia64_save_scratch_fpregs(fr);								\
 627.167 +	ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)				\
 627.168 +				__va(runtime->get_next_high_mono_count), adjust_arg(count));	\
 627.169 +	ia64_load_scratch_fpregs(fr);								\
 627.170 +	return ret;										\
 627.171 +}
 627.172 +
 627.173 +#define STUB_RESET_SYSTEM(prefix, adjust_arg)					\
 627.174 +static void									\
 627.175 +prefix##_reset_system (int reset_type, efi_status_t status,			\
 627.176 +		       unsigned long data_size, efi_char16_t *data)		\
 627.177 +{										\
 627.178 +	struct ia64_fpreg fr[6];						\
 627.179 +	efi_char16_t *adata = NULL;						\
 627.180 +										\
 627.181 +	if (data)								\
 627.182 +		adata = adjust_arg(data);					\
 627.183 +										\
 627.184 +	ia64_save_scratch_fpregs(fr);						\
 627.185 +	efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),	\
 627.186 +			  reset_type, status, data_size, adata);		\
 627.187 +	/* should not return, but just in case... */				\
 627.188 +	ia64_load_scratch_fpregs(fr);						\
 627.189 +}
 627.190 +
 627.191 +#define phys_ptr(arg)	((__typeof__(arg)) ia64_tpa(arg))
 627.192 +
 627.193 +STUB_GET_TIME(phys, phys_ptr)
 627.194 +STUB_SET_TIME(phys, phys_ptr)
 627.195 +STUB_GET_WAKEUP_TIME(phys, phys_ptr)
 627.196 +STUB_SET_WAKEUP_TIME(phys, phys_ptr)
 627.197 +STUB_GET_VARIABLE(phys, phys_ptr)
 627.198 +STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
 627.199 +STUB_SET_VARIABLE(phys, phys_ptr)
 627.200 +STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
 627.201 +STUB_RESET_SYSTEM(phys, phys_ptr)
 627.202 +
 627.203 +#define id(arg)	arg
 627.204 +
 627.205 +STUB_GET_TIME(virt, id)
 627.206 +STUB_SET_TIME(virt, id)
 627.207 +STUB_GET_WAKEUP_TIME(virt, id)
 627.208 +STUB_SET_WAKEUP_TIME(virt, id)
 627.209 +STUB_GET_VARIABLE(virt, id)
 627.210 +STUB_GET_NEXT_VARIABLE(virt, id)
 627.211 +STUB_SET_VARIABLE(virt, id)
 627.212 +STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
 627.213 +STUB_RESET_SYSTEM(virt, id)
 627.214 +
 627.215 +void
 627.216 +efi_gettimeofday (struct timespec *ts)
 627.217 +{
 627.218 +	efi_time_t tm;
 627.219 +
 627.220 +	memset(ts, 0, sizeof(ts));
 627.221 +	if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
 627.222 +		return;
 627.223 +
 627.224 +	ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
 627.225 +	ts->tv_nsec = tm.nanosecond;
 627.226 +}
 627.227 +
 627.228 +static int
 627.229 +is_available_memory (efi_memory_desc_t *md)
 627.230 +{
 627.231 +	if (!(md->attribute & EFI_MEMORY_WB))
 627.232 +		return 0;
 627.233 +
 627.234 +	switch (md->type) {
 627.235 +	      case EFI_LOADER_CODE:
 627.236 +	      case EFI_LOADER_DATA:
 627.237 +	      case EFI_BOOT_SERVICES_CODE:
 627.238 +	      case EFI_BOOT_SERVICES_DATA:
 627.239 +	      case EFI_CONVENTIONAL_MEMORY:
 627.240 +		return 1;
 627.241 +	}
 627.242 +	return 0;
 627.243 +}
 627.244 +
 627.245 +/*
 627.246 + * Trim descriptor MD so its starts at address START_ADDR.  If the descriptor covers
 627.247 + * memory that is normally available to the kernel, issue a warning that some memory
 627.248 + * is being ignored.
 627.249 + */
 627.250 +static void
 627.251 +trim_bottom (efi_memory_desc_t *md, u64 start_addr)
 627.252 +{
 627.253 +	u64 num_skipped_pages;
 627.254 +
 627.255 +	if (md->phys_addr >= start_addr || !md->num_pages)
 627.256 +		return;
 627.257 +
 627.258 +	num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
 627.259 +	if (num_skipped_pages > md->num_pages)
 627.260 +		num_skipped_pages = md->num_pages;
 627.261 +
 627.262 +	if (is_available_memory(md))
 627.263 +		printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
 627.264 +		       "at 0x%lx\n", __FUNCTION__,
 627.265 +		       (num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
 627.266 +		       md->phys_addr, start_addr - IA64_GRANULE_SIZE);
 627.267 +	/*
 627.268 +	 * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory
 627.269 +	 * descriptor list to become unsorted.  In such a case, md->num_pages will be
 627.270 +	 * zero, so the Right Thing will happen.
 627.271 +	 */
 627.272 +	md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
 627.273 +	md->num_pages -= num_skipped_pages;
 627.274 +}
 627.275 +
 627.276 +static void
 627.277 +trim_top (efi_memory_desc_t *md, u64 end_addr)
 627.278 +{
 627.279 +	u64 num_dropped_pages, md_end_addr;
 627.280 +
 627.281 +	md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
 627.282 +
 627.283 +	if (md_end_addr <= end_addr || !md->num_pages)
 627.284 +		return;
 627.285 +
 627.286 +	num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
 627.287 +	if (num_dropped_pages > md->num_pages)
 627.288 +		num_dropped_pages = md->num_pages;
 627.289 +
 627.290 +	if (is_available_memory(md))
 627.291 +		printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
 627.292 +		       "at 0x%lx\n", __FUNCTION__,
 627.293 +		       (num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
 627.294 +		       md->phys_addr, end_addr);
 627.295 +	md->num_pages -= num_dropped_pages;
 627.296 +}
 627.297 +
 627.298 +/*
 627.299 + * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
 627.300 + * has memory that is available for OS use.
 627.301 + */
 627.302 +void
 627.303 +efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
 627.304 +{
 627.305 +	int prev_valid = 0;
 627.306 +	struct range {
 627.307 +		u64 start;
 627.308 +		u64 end;
 627.309 +	} prev, curr;
 627.310 +	void *efi_map_start, *efi_map_end, *p, *q;
 627.311 +	efi_memory_desc_t *md, *check_md;
 627.312 +	u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0;
 627.313 +	unsigned long total_mem = 0;
 627.314 +
 627.315 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.316 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.317 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.318 +
 627.319 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.320 +		md = p;
 627.321 +
 627.322 +		/* skip over non-WB memory descriptors; that's all we're interested in... */
 627.323 +		if (!(md->attribute & EFI_MEMORY_WB))
 627.324 +			continue;
 627.325 +
 627.326 +#ifdef XEN
 627.327 +// this works around a problem in the ski bootloader
 627.328 +{
 627.329 +		extern long running_on_sim;
 627.330 +		if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
 627.331 +			continue;
 627.332 +}
 627.333 +// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
 627.334 +		if (md->phys_addr >= 0x100000000) continue;
 627.335 +#endif
 627.336 +		/*
 627.337 +		 * granule_addr is the base of md's first granule.
 627.338 +		 * [granule_addr - first_non_wb_addr) is guaranteed to
 627.339 +		 * be contiguous WB memory.
 627.340 +		 */
 627.341 +		granule_addr = GRANULEROUNDDOWN(md->phys_addr);
 627.342 +		first_non_wb_addr = max(first_non_wb_addr, granule_addr);
 627.343 +
 627.344 +		if (first_non_wb_addr < md->phys_addr) {
 627.345 +			trim_bottom(md, granule_addr + IA64_GRANULE_SIZE);
 627.346 +			granule_addr = GRANULEROUNDDOWN(md->phys_addr);
 627.347 +			first_non_wb_addr = max(first_non_wb_addr, granule_addr);
 627.348 +		}
 627.349 +
 627.350 +		for (q = p; q < efi_map_end; q += efi_desc_size) {
 627.351 +			check_md = q;
 627.352 +
 627.353 +			if ((check_md->attribute & EFI_MEMORY_WB) &&
 627.354 +			    (check_md->phys_addr == first_non_wb_addr))
 627.355 +				first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT;
 627.356 +			else
 627.357 +				break;		/* non-WB or hole */
 627.358 +		}
 627.359 +
 627.360 +		last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr);
 627.361 +		if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT))
 627.362 +			trim_top(md, last_granule_addr);
 627.363 +
 627.364 +		if (is_available_memory(md)) {
 627.365 +			if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) >= max_addr) {
 627.366 +				if (md->phys_addr >= max_addr)
 627.367 +					continue;
 627.368 +				md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
 627.369 +				first_non_wb_addr = max_addr;
 627.370 +			}
 627.371 +
 627.372 +			if (total_mem >= mem_limit)
 627.373 +				continue;
 627.374 +
 627.375 +			if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {
 627.376 +				unsigned long limit_addr = md->phys_addr;
 627.377 +
 627.378 +				limit_addr += mem_limit - total_mem;
 627.379 +				limit_addr = GRANULEROUNDDOWN(limit_addr);
 627.380 +
 627.381 +				if (md->phys_addr > limit_addr)
 627.382 +					continue;
 627.383 +
 627.384 +				md->num_pages = (limit_addr - md->phys_addr) >>
 627.385 +				                EFI_PAGE_SHIFT;
 627.386 +				first_non_wb_addr = max_addr = md->phys_addr +
 627.387 +				              (md->num_pages << EFI_PAGE_SHIFT);
 627.388 +			}
 627.389 +			total_mem += (md->num_pages << EFI_PAGE_SHIFT);
 627.390 +
 627.391 +			if (md->num_pages == 0)
 627.392 +				continue;
 627.393 +
 627.394 +			curr.start = PAGE_OFFSET + md->phys_addr;
 627.395 +			curr.end   = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
 627.396 +
 627.397 +			if (!prev_valid) {
 627.398 +				prev = curr;
 627.399 +				prev_valid = 1;
 627.400 +			} else {
 627.401 +				if (curr.start < prev.start)
 627.402 +					printk(KERN_ERR "Oops: EFI memory table not ordered!\n");
 627.403 +
 627.404 +				if (prev.end == curr.start) {
 627.405 +					/* merge two consecutive memory ranges */
 627.406 +					prev.end = curr.end;
 627.407 +				} else {
 627.408 +					start = PAGE_ALIGN(prev.start);
 627.409 +					end = prev.end & PAGE_MASK;
 627.410 +					if ((end > start) && (*callback)(start, end, arg) < 0)
 627.411 +						return;
 627.412 +					prev = curr;
 627.413 +				}
 627.414 +			}
 627.415 +		}
 627.416 +	}
 627.417 +	if (prev_valid) {
 627.418 +		start = PAGE_ALIGN(prev.start);
 627.419 +		end = prev.end & PAGE_MASK;
 627.420 +		if (end > start)
 627.421 +			(*callback)(start, end, arg);
 627.422 +	}
 627.423 +}
 627.424 +
 627.425 +/*
 627.426 + * Look for the PAL_CODE region reported by EFI and maps it using an
 627.427 + * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
 627.428 + * Abstraction Layer chapter 11 in ADAG
 627.429 + */
 627.430 +
 627.431 +void *
 627.432 +efi_get_pal_addr (void)
 627.433 +{
 627.434 +	void *efi_map_start, *efi_map_end, *p;
 627.435 +	efi_memory_desc_t *md;
 627.436 +	u64 efi_desc_size;
 627.437 +	int pal_code_count = 0;
 627.438 +	u64 vaddr, mask;
 627.439 +
 627.440 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.441 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.442 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.443 +
 627.444 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.445 +		md = p;
 627.446 +		if (md->type != EFI_PAL_CODE)
 627.447 +			continue;
 627.448 +
 627.449 +		if (++pal_code_count > 1) {
 627.450 +			printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
 627.451 +			       md->phys_addr);
 627.452 +			continue;
 627.453 +		}
 627.454 +		/*
 627.455 +		 * The only ITLB entry in region 7 that is used is the one installed by
 627.456 +		 * __start().  That entry covers a 64MB range.
 627.457 +		 */
 627.458 +		mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
 627.459 +		vaddr = PAGE_OFFSET + md->phys_addr;
 627.460 +
 627.461 +		/*
 627.462 +		 * We must check that the PAL mapping won't overlap with the kernel
 627.463 +		 * mapping.
 627.464 +		 *
 627.465 +		 * PAL code is guaranteed to be aligned on a power of 2 between 4k and
 627.466 +		 * 256KB and that only one ITR is needed to map it. This implies that the
 627.467 +		 * PAL code is always aligned on its size, i.e., the closest matching page
 627.468 +		 * size supported by the TLB. Therefore PAL code is guaranteed never to
 627.469 +		 * cross a 64MB unless it is bigger than 64MB (very unlikely!).  So for
 627.470 +		 * now the following test is enough to determine whether or not we need a
 627.471 +		 * dedicated ITR for the PAL code.
 627.472 +		 */
 627.473 +		if ((vaddr & mask) == (KERNEL_START & mask)) {
 627.474 +			printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
 627.475 +			       __FUNCTION__);
 627.476 +			continue;
 627.477 +		}
 627.478 +
 627.479 +		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
 627.480 +			panic("Woah!  PAL code size bigger than a granule!");
 627.481 +
 627.482 +#if EFI_DEBUG
 627.483 +		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 627.484 +
 627.485 +		printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
 627.486 +			smp_processor_id(), md->phys_addr,
 627.487 +			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
 627.488 +			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
 627.489 +#endif
 627.490 +		return __va(md->phys_addr);
 627.491 +	}
 627.492 +	printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
 627.493 +	       __FUNCTION__);
 627.494 +	return NULL;
 627.495 +}
 627.496 +
 627.497 +void
 627.498 +efi_map_pal_code (void)
 627.499 +{
 627.500 +	void *pal_vaddr = efi_get_pal_addr ();
 627.501 +	u64 psr;
 627.502 +
 627.503 +	if (!pal_vaddr)
 627.504 +		return;
 627.505 +
 627.506 +	/*
 627.507 +	 * Cannot write to CRx with PSR.ic=1
 627.508 +	 */
 627.509 +	psr = ia64_clear_ic();
 627.510 +	ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
 627.511 +		 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
 627.512 +		 IA64_GRANULE_SHIFT);
 627.513 +	ia64_set_psr(psr);		/* restore psr */
 627.514 +	ia64_srlz_i();
 627.515 +}
 627.516 +
 627.517 +void __init
 627.518 +efi_init (void)
 627.519 +{
 627.520 +	void *efi_map_start, *efi_map_end;
 627.521 +	efi_config_table_t *config_tables;
 627.522 +	efi_char16_t *c16;
 627.523 +	u64 efi_desc_size;
 627.524 +	char *cp, *end, vendor[100] = "unknown";
 627.525 +	extern char saved_command_line[];
 627.526 +	int i;
 627.527 +
 627.528 +	/* it's too early to be able to use the standard kernel command line support... */
 627.529 +	for (cp = saved_command_line; *cp; ) {
 627.530 +		if (memcmp(cp, "mem=", 4) == 0) {
 627.531 +			cp += 4;
 627.532 +			mem_limit = memparse(cp, &end);
 627.533 +			if (end != cp)
 627.534 +				break;
 627.535 +			cp = end;
 627.536 +		} else if (memcmp(cp, "max_addr=", 9) == 0) {
 627.537 +			cp += 9;
 627.538 +			max_addr = GRANULEROUNDDOWN(memparse(cp, &end));
 627.539 +			if (end != cp)
 627.540 +				break;
 627.541 +			cp = end;
 627.542 +		} else {
 627.543 +			while (*cp != ' ' && *cp)
 627.544 +				++cp;
 627.545 +			while (*cp == ' ')
 627.546 +				++cp;
 627.547 +		}
 627.548 +	}
 627.549 +	if (max_addr != ~0UL)
 627.550 +		printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
 627.551 +
 627.552 +	efi.systab = __va(ia64_boot_param->efi_systab);
 627.553 +
 627.554 +	/*
 627.555 +	 * Verify the EFI Table
 627.556 +	 */
 627.557 +	if (efi.systab == NULL)
 627.558 +		panic("Woah! Can't find EFI system table.\n");
 627.559 +	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
 627.560 +		panic("Woah! EFI system table signature incorrect\n");
 627.561 +	if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0)
 627.562 +		printk(KERN_WARNING "Warning: EFI system table major version mismatch: "
 627.563 +		       "got %d.%02d, expected %d.%02d\n",
 627.564 +		       efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff,
 627.565 +		       EFI_SYSTEM_TABLE_REVISION >> 16, EFI_SYSTEM_TABLE_REVISION & 0xffff);
 627.566 +
 627.567 +	config_tables = __va(efi.systab->tables);
 627.568 +
 627.569 +	/* Show what we know for posterity */
 627.570 +	c16 = __va(efi.systab->fw_vendor);
 627.571 +	if (c16) {
 627.572 +		for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
 627.573 +			vendor[i] = *c16++;
 627.574 +		vendor[i] = '\0';
 627.575 +	}
 627.576 +
 627.577 +	printk(KERN_INFO "EFI v%u.%.02u by %s:",
 627.578 +	       efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
 627.579 +
 627.580 +	for (i = 0; i < (int) efi.systab->nr_tables; i++) {
 627.581 +		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
 627.582 +			efi.mps = __va(config_tables[i].table);
 627.583 +			printk(" MPS=0x%lx", config_tables[i].table);
 627.584 +		} else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
 627.585 +			efi.acpi20 = __va(config_tables[i].table);
 627.586 +			printk(" ACPI 2.0=0x%lx", config_tables[i].table);
 627.587 +		} else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
 627.588 +			efi.acpi = __va(config_tables[i].table);
 627.589 +			printk(" ACPI=0x%lx", config_tables[i].table);
 627.590 +		} else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
 627.591 +			efi.smbios = __va(config_tables[i].table);
 627.592 +			printk(" SMBIOS=0x%lx", config_tables[i].table);
 627.593 +		} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
 627.594 +			efi.sal_systab = __va(config_tables[i].table);
 627.595 +			printk(" SALsystab=0x%lx", config_tables[i].table);
 627.596 +		} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
 627.597 +			efi.hcdp = __va(config_tables[i].table);
 627.598 +			printk(" HCDP=0x%lx", config_tables[i].table);
 627.599 +		}
 627.600 +	}
 627.601 +	printk("\n");
 627.602 +
 627.603 +	runtime = __va(efi.systab->runtime);
 627.604 +	efi.get_time = phys_get_time;
 627.605 +	efi.set_time = phys_set_time;
 627.606 +	efi.get_wakeup_time = phys_get_wakeup_time;
 627.607 +	efi.set_wakeup_time = phys_set_wakeup_time;
 627.608 +	efi.get_variable = phys_get_variable;
 627.609 +	efi.get_next_variable = phys_get_next_variable;
 627.610 +	efi.set_variable = phys_set_variable;
 627.611 +	efi.get_next_high_mono_count = phys_get_next_high_mono_count;
 627.612 +	efi.reset_system = phys_reset_system;
 627.613 +
 627.614 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.615 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.616 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.617 +
 627.618 +#if EFI_DEBUG
 627.619 +	/* print EFI memory map: */
 627.620 +	{
 627.621 +		efi_memory_desc_t *md;
 627.622 +		void *p;
 627.623 +
 627.624 +		for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
 627.625 +			md = p;
 627.626 +			printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
 627.627 +			       i, md->type, md->attribute, md->phys_addr,
 627.628 +			       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
 627.629 +			       md->num_pages >> (20 - EFI_PAGE_SHIFT));
 627.630 +		}
 627.631 +	}
 627.632 +#endif
 627.633 +
 627.634 +	efi_map_pal_code();
 627.635 +	efi_enter_virtual_mode();
 627.636 +}
 627.637 +
 627.638 +void
 627.639 +efi_enter_virtual_mode (void)
 627.640 +{
 627.641 +	void *efi_map_start, *efi_map_end, *p;
 627.642 +	efi_memory_desc_t *md;
 627.643 +	efi_status_t status;
 627.644 +	u64 efi_desc_size;
 627.645 +
 627.646 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.647 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.648 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.649 +
 627.650 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.651 +		md = p;
 627.652 +		if (md->attribute & EFI_MEMORY_RUNTIME) {
 627.653 +			/*
 627.654 +			 * Some descriptors have multiple bits set, so the order of
 627.655 +			 * the tests is relevant.
 627.656 +			 */
 627.657 +			if (md->attribute & EFI_MEMORY_WB) {
 627.658 +				md->virt_addr = (u64) __va(md->phys_addr);
 627.659 +			} else if (md->attribute & EFI_MEMORY_UC) {
 627.660 +				md->virt_addr = (u64) ioremap(md->phys_addr, 0);
 627.661 +			} else if (md->attribute & EFI_MEMORY_WC) {
 627.662 +#if 0
 627.663 +				md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
 627.664 +									   | _PAGE_D
 627.665 +									   | _PAGE_MA_WC
 627.666 +									   | _PAGE_PL_0
 627.667 +									   | _PAGE_AR_RW));
 627.668 +#else
 627.669 +				printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
 627.670 +				md->virt_addr = (u64) ioremap(md->phys_addr, 0);
 627.671 +#endif
 627.672 +			} else if (md->attribute & EFI_MEMORY_WT) {
 627.673 +#if 0
 627.674 +				md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
 627.675 +									   | _PAGE_D | _PAGE_MA_WT
 627.676 +									   | _PAGE_PL_0
 627.677 +									   | _PAGE_AR_RW));
 627.678 +#else
 627.679 +				printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
 627.680 +				md->virt_addr = (u64) ioremap(md->phys_addr, 0);
 627.681 +#endif
 627.682 +			}
 627.683 +		}
 627.684 +	}
 627.685 +
 627.686 +	status = efi_call_phys(__va(runtime->set_virtual_address_map),
 627.687 +			       ia64_boot_param->efi_memmap_size,
 627.688 +			       efi_desc_size, ia64_boot_param->efi_memdesc_version,
 627.689 +			       ia64_boot_param->efi_memmap);
 627.690 +	if (status != EFI_SUCCESS) {
 627.691 +		printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
 627.692 +		       "(status=%lu)\n", status);
 627.693 +		return;
 627.694 +	}
 627.695 +
 627.696 +	/*
 627.697 +	 * Now that EFI is in virtual mode, we call the EFI functions more efficiently:
 627.698 +	 */
 627.699 +	efi.get_time = virt_get_time;
 627.700 +	efi.set_time = virt_set_time;
 627.701 +	efi.get_wakeup_time = virt_get_wakeup_time;
 627.702 +	efi.set_wakeup_time = virt_set_wakeup_time;
 627.703 +	efi.get_variable = virt_get_variable;
 627.704 +	efi.get_next_variable = virt_get_next_variable;
 627.705 +	efi.set_variable = virt_set_variable;
 627.706 +	efi.get_next_high_mono_count = virt_get_next_high_mono_count;
 627.707 +	efi.reset_system = virt_reset_system;
 627.708 +}
 627.709 +
 627.710 +/*
 627.711 + * Walk the EFI memory map looking for the I/O port range.  There can only be one entry of
 627.712 + * this type, other I/O port ranges should be described via ACPI.
 627.713 + */
 627.714 +u64
 627.715 +efi_get_iobase (void)
 627.716 +{
 627.717 +	void *efi_map_start, *efi_map_end, *p;
 627.718 +	efi_memory_desc_t *md;
 627.719 +	u64 efi_desc_size;
 627.720 +
 627.721 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.722 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.723 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.724 +
 627.725 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.726 +		md = p;
 627.727 +		if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
 627.728 +			if (md->attribute & EFI_MEMORY_UC)
 627.729 +				return md->phys_addr;
 627.730 +		}
 627.731 +	}
 627.732 +	return 0;
 627.733 +}
 627.734 +
 627.735 +#ifdef XEN
 627.736 +// variation of efi_get_iobase which returns entire memory descriptor
 627.737 +efi_memory_desc_t *
 627.738 +efi_get_io_md (void)
 627.739 +{
 627.740 +	void *efi_map_start, *efi_map_end, *p;
 627.741 +	efi_memory_desc_t *md;
 627.742 +	u64 efi_desc_size;
 627.743 +
 627.744 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.745 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.746 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.747 +
 627.748 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.749 +		md = p;
 627.750 +		if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
 627.751 +			if (md->attribute & EFI_MEMORY_UC)
 627.752 +				return md;
 627.753 +		}
 627.754 +	}
 627.755 +	return 0;
 627.756 +}
 627.757 +#endif
 627.758 +
 627.759 +u32
 627.760 +efi_mem_type (unsigned long phys_addr)
 627.761 +{
 627.762 +	void *efi_map_start, *efi_map_end, *p;
 627.763 +	efi_memory_desc_t *md;
 627.764 +	u64 efi_desc_size;
 627.765 +
 627.766 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.767 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.768 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.769 +
 627.770 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.771 +		md = p;
 627.772 +
 627.773 +		if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
 627.774 +			 return md->type;
 627.775 +	}
 627.776 +	return 0;
 627.777 +}
 627.778 +
 627.779 +u64
 627.780 +efi_mem_attributes (unsigned long phys_addr)
 627.781 +{
 627.782 +	void *efi_map_start, *efi_map_end, *p;
 627.783 +	efi_memory_desc_t *md;
 627.784 +	u64 efi_desc_size;
 627.785 +
 627.786 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.787 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.788 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.789 +
 627.790 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.791 +		md = p;
 627.792 +
 627.793 +		if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
 627.794 +			return md->attribute;
 627.795 +	}
 627.796 +	return 0;
 627.797 +}
 627.798 +EXPORT_SYMBOL(efi_mem_attributes);
 627.799 +
 627.800 +int
 627.801 +valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
 627.802 +{
 627.803 +	void *efi_map_start, *efi_map_end, *p;
 627.804 +	efi_memory_desc_t *md;
 627.805 +	u64 efi_desc_size;
 627.806 +
 627.807 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
 627.808 +	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
 627.809 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
 627.810 +
 627.811 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
 627.812 +		md = p;
 627.813 +
 627.814 +		if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) {
 627.815 +			if (!(md->attribute & EFI_MEMORY_WB))
 627.816 +				return 0;
 627.817 +
 627.818 +			if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr)
 627.819 +				*size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr;
 627.820 +			return 1;
 627.821 +		}
 627.822 +	}
 627.823 +	return 0;
 627.824 +}
 627.825 +
 627.826 +int __init
 627.827 +efi_uart_console_only(void)
 627.828 +{
 627.829 +	efi_status_t status;
 627.830 +	char *s, name[] = "ConOut";
 627.831 +	efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
 627.832 +	efi_char16_t *utf16, name_utf16[32];
 627.833 +	unsigned char data[1024];
 627.834 +	unsigned long size = sizeof(data);
 627.835 +	struct efi_generic_dev_path *hdr, *end_addr;
 627.836 +	int uart = 0;
 627.837 +
 627.838 +	/* Convert to UTF-16 */
 627.839 +	utf16 = name_utf16;
 627.840 +	s = name;
 627.841 +	while (*s)
 627.842 +		*utf16++ = *s++ & 0x7f;
 627.843 +	*utf16 = 0;
 627.844 +
 627.845 +	status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
 627.846 +	if (status != EFI_SUCCESS) {
 627.847 +		printk(KERN_ERR "No EFI %s variable?\n", name);
 627.848 +		return 0;
 627.849 +	}
 627.850 +
 627.851 +	hdr = (struct efi_generic_dev_path *) data;
 627.852 +	end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
 627.853 +	while (hdr < end_addr) {
 627.854 +		if (hdr->type == EFI_DEV_MSG &&
 627.855 +		    hdr->sub_type == EFI_DEV_MSG_UART)
 627.856 +			uart = 1;
 627.857 +		else if (hdr->type == EFI_DEV_END_PATH ||
 627.858 +			  hdr->type == EFI_DEV_END_PATH2) {
 627.859 +			if (!uart)
 627.860 +				return 0;
 627.861 +			if (hdr->sub_type == EFI_DEV_END_ENTIRE)
 627.862 +				return 1;
 627.863 +			uart = 0;
 627.864 +		}
 627.865 +		hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
 627.866 +	}
 627.867 +	printk(KERN_ERR "Malformed %s value\n", name);
 627.868 +	return 0;
 627.869 +}
   628.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   628.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Fri Aug 26 09:05:43 2005 +0000
   628.3 @@ -0,0 +1,1657 @@
   628.4 +/*
   628.5 + * ia64/kernel/entry.S
   628.6 + *
   628.7 + * Kernel entry points.
   628.8 + *
   628.9 + * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
  628.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
  628.11 + * Copyright (C) 1999, 2002-2003
  628.12 + *	Asit Mallick <Asit.K.Mallick@intel.com>
  628.13 + * 	Don Dugger <Don.Dugger@intel.com>
  628.14 + *	Suresh Siddha <suresh.b.siddha@intel.com>
  628.15 + *	Fenghua Yu <fenghua.yu@intel.com>
  628.16 + * Copyright (C) 1999 VA Linux Systems
  628.17 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  628.18 + */
  628.19 +/*
  628.20 + * ia64_switch_to now places correct virtual mapping in in TR2 for
  628.21 + * kernel stack. This allows us to handle interrupts without changing
  628.22 + * to physical mode.
  628.23 + *
  628.24 + * Jonathan Nicklin	<nicklin@missioncriticallinux.com>
  628.25 + * Patrick O'Rourke	<orourke@missioncriticallinux.com>
  628.26 + * 11/07/2000
  628.27 + */
  628.28 +/*
  628.29 + * Global (preserved) predicate usage on syscall entry/exit path:
  628.30 + *
  628.31 + *	pKStk:		See entry.h.
  628.32 + *	pUStk:		See entry.h.
  628.33 + *	pSys:		See entry.h.
  628.34 + *	pNonSys:	!pSys
  628.35 + */
  628.36 +
  628.37 +#include <linux/config.h>
  628.38 +
  628.39 +#include <asm/asmmacro.h>
  628.40 +#include <asm/cache.h>
  628.41 +#include <asm/errno.h>
  628.42 +#include <asm/kregs.h>
  628.43 +#include <asm/offsets.h>
  628.44 +#include <asm/pgtable.h>
  628.45 +#include <asm/percpu.h>
  628.46 +#include <asm/processor.h>
  628.47 +#include <asm/thread_info.h>
  628.48 +#include <asm/unistd.h>
  628.49 +
  628.50 +#include "minstate.h"
  628.51 +
  628.52 +#ifndef XEN
  628.53 +	/*
  628.54 +	 * execve() is special because in case of success, we need to
  628.55 +	 * setup a null register window frame.
  628.56 +	 */
  628.57 +ENTRY(ia64_execve)
  628.58 +	/*
  628.59 +	 * Allocate 8 input registers since ptrace() may clobber them
  628.60 +	 */
  628.61 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
  628.62 +	alloc loc1=ar.pfs,8,2,4,0
  628.63 +	mov loc0=rp
  628.64 +	.body
  628.65 +	mov out0=in0			// filename
  628.66 +	;;				// stop bit between alloc and call
  628.67 +	mov out1=in1			// argv
  628.68 +	mov out2=in2			// envp
  628.69 +	add out3=16,sp			// regs
  628.70 +	br.call.sptk.many rp=sys_execve
  628.71 +.ret0:
  628.72 +#ifdef CONFIG_IA32_SUPPORT
  628.73 +	/*
  628.74 +	 * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
  628.75 +	 * from pt_regs.
  628.76 +	 */
  628.77 +	adds r16=PT(CR_IPSR)+16,sp
  628.78 +	;;
  628.79 +	ld8 r16=[r16]
  628.80 +#endif
  628.81 +	cmp4.ge p6,p7=r8,r0
  628.82 +	mov ar.pfs=loc1			// restore ar.pfs
  628.83 +	sxt4 r8=r8			// return 64-bit result
  628.84 +	;;
  628.85 +	stf.spill [sp]=f0
  628.86 +(p6)	cmp.ne pKStk,pUStk=r0,r0	// a successful execve() lands us in user-mode...
  628.87 +	mov rp=loc0
  628.88 +(p6)	mov ar.pfs=r0			// clear ar.pfs on success
  628.89 +(p7)	br.ret.sptk.many rp
  628.90 +
  628.91 +	/*
  628.92 +	 * In theory, we'd have to zap this state only to prevent leaking of
  628.93 +	 * security sensitive state (e.g., if current->mm->dumpable is zero).  However,
  628.94 +	 * this executes in less than 20 cycles even on Itanium, so it's not worth
  628.95 +	 * optimizing for...).
  628.96 +	 */
  628.97 +	mov ar.unat=0; 		mov ar.lc=0
  628.98 +	mov r4=0;		mov f2=f0;		mov b1=r0
  628.99 +	mov r5=0;		mov f3=f0;		mov b2=r0
 628.100 +	mov r6=0;		mov f4=f0;		mov b3=r0
 628.101 +	mov r7=0;		mov f5=f0;		mov b4=r0
 628.102 +	ldf.fill f12=[sp];	mov f13=f0;		mov b5=r0
 628.103 +	ldf.fill f14=[sp];	ldf.fill f15=[sp];	mov f16=f0
 628.104 +	ldf.fill f17=[sp];	ldf.fill f18=[sp];	mov f19=f0
 628.105 +	ldf.fill f20=[sp];	ldf.fill f21=[sp];	mov f22=f0
 628.106 +	ldf.fill f23=[sp];	ldf.fill f24=[sp];	mov f25=f0
 628.107 +	ldf.fill f26=[sp];	ldf.fill f27=[sp];	mov f28=f0
 628.108 +	ldf.fill f29=[sp];	ldf.fill f30=[sp];	mov f31=f0
 628.109 +#ifdef CONFIG_IA32_SUPPORT
 628.110 +	tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
 628.111 +	movl loc0=ia64_ret_from_ia32_execve
 628.112 +	;;
 628.113 +(p6)	mov rp=loc0
 628.114 +#endif
 628.115 +	br.ret.sptk.many rp
 628.116 +END(ia64_execve)
 628.117 +
 628.118 +/*
 628.119 + * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
 628.120 + *	      u64 tls)
 628.121 + */
 628.122 +GLOBAL_ENTRY(sys_clone2)
 628.123 +	/*
 628.124 +	 * Allocate 8 input registers since ptrace() may clobber them
 628.125 +	 */
 628.126 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
 628.127 +	alloc r16=ar.pfs,8,2,6,0
 628.128 +	DO_SAVE_SWITCH_STACK
 628.129 +	adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
 628.130 +	mov loc0=rp
 628.131 +	mov loc1=r16				// save ar.pfs across do_fork
 628.132 +	.body
 628.133 +	mov out1=in1
 628.134 +	mov out3=in2
 628.135 +	tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
 628.136 +	mov out4=in3	// parent_tidptr: valid only w/CLONE_PARENT_SETTID
 628.137 +	;;
 628.138 +(p6)	st8 [r2]=in5				// store TLS in r16 for copy_thread()
 628.139 +	mov out5=in4	// child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
 628.140 +	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
 628.141 +	mov out0=in0				// out0 = clone_flags
 628.142 +	br.call.sptk.many rp=do_fork
 628.143 +.ret1:	.restore sp
 628.144 +	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
 628.145 +	mov ar.pfs=loc1
 628.146 +	mov rp=loc0
 628.147 +	br.ret.sptk.many rp
 628.148 +END(sys_clone2)
 628.149 +
 628.150 +/*
 628.151 + * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
 628.152 + *	Deprecated.  Use sys_clone2() instead.
 628.153 + */
 628.154 +GLOBAL_ENTRY(sys_clone)
 628.155 +	/*
 628.156 +	 * Allocate 8 input registers since ptrace() may clobber them
 628.157 +	 */
 628.158 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
 628.159 +	alloc r16=ar.pfs,8,2,6,0
 628.160 +	DO_SAVE_SWITCH_STACK
 628.161 +	adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
 628.162 +	mov loc0=rp
 628.163 +	mov loc1=r16				// save ar.pfs across do_fork
 628.164 +	.body
 628.165 +	mov out1=in1
 628.166 +	mov out3=16				// stacksize (compensates for 16-byte scratch area)
 628.167 +	tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
 628.168 +	mov out4=in2	// parent_tidptr: valid only w/CLONE_PARENT_SETTID
 628.169 +	;;
 628.170 +(p6)	st8 [r2]=in4				// store TLS in r13 (tp)
 628.171 +	mov out5=in3	// child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
 628.172 +	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
 628.173 +	mov out0=in0				// out0 = clone_flags
 628.174 +	br.call.sptk.many rp=do_fork
 628.175 +.ret2:	.restore sp
 628.176 +	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
 628.177 +	mov ar.pfs=loc1
 628.178 +	mov rp=loc0
 628.179 +	br.ret.sptk.many rp
 628.180 +END(sys_clone)
 628.181 +#endif /* !XEN */
 628.182 +
 628.183 +/*
 628.184 + * prev_task <- ia64_switch_to(struct task_struct *next)
 628.185 + *	With Ingo's new scheduler, interrupts are disabled when this routine gets
 628.186 + *	called.  The code starting at .map relies on this.  The rest of the code
 628.187 + *	doesn't care about the interrupt masking status.
 628.188 + */
 628.189 +GLOBAL_ENTRY(ia64_switch_to)
 628.190 +	.prologue
 628.191 +	alloc r16=ar.pfs,1,0,0,0
 628.192 +	DO_SAVE_SWITCH_STACK
 628.193 +	.body
 628.194 +
 628.195 +	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
 628.196 +	movl r25=init_task
 628.197 +	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
 628.198 +	ld8 r27=[r27]
 628.199 +	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
 628.200 +#ifdef XEN
 628.201 +	dep r20=0,in0,60,4		// physical address of "next"
 628.202 +#else
 628.203 +	dep r20=0,in0,61,3		// physical address of "next"
 628.204 +#endif
 628.205 +	;;
 628.206 +	st8 [r22]=sp			// save kernel stack pointer of old task
 628.207 +	shr.u r26=r20,IA64_GRANULE_SHIFT
 628.208 +	cmp.eq p7,p6=r25,in0
 628.209 +	;;
 628.210 +	/*
 628.211 +	 * If we've already mapped this task's page, we can skip doing it again.
 628.212 +	 */
 628.213 +(p6)	cmp.eq p7,p6=r26,r27
 628.214 +(p6)	br.cond.dpnt .map
 628.215 +	;;
 628.216 +.done:
 628.217 +(p6)	ssm psr.ic			// if we had to map, reenable the psr.ic bit FIRST!!!
 628.218 +	;;
 628.219 +(p6)	srlz.d
 628.220 +	ld8 sp=[r21]			// load kernel stack pointer of new task
 628.221 +	movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
 628.222 +	st8 [r8]=in0
 628.223 +	mov r8=r13			// return pointer to previously running task
 628.224 +	mov r13=in0			// set "current" pointer
 628.225 +	;;
 628.226 +	DO_LOAD_SWITCH_STACK
 628.227 +
 628.228 +#ifdef CONFIG_SMP
 628.229 +	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
 628.230 +#endif
 628.231 +	br.ret.sptk.many rp		// boogie on out in new context
 628.232 +
 628.233 +.map:
 628.234 +#ifdef XEN
 628.235 +	// avoid overlapping with kernel TR
 628.236 +	movl r25=KERNEL_START
 628.237 +	dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
 628.238 +	;;
 628.239 +	cmp.eq p7,p0=r25,r23
 628.240 +	;;
 628.241 +(p7)	movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
 628.242 +(p7)	st8 [r8]=r26
 628.243 +(p7)	br.cond.sptk .done
 628.244 +#endif
 628.245 +	rsm psr.ic			// interrupts (psr.i) are already disabled here
 628.246 +	movl r25=PAGE_KERNEL
 628.247 +	;;
 628.248 +	srlz.d
 628.249 +	or r23=r25,r20			// construct PA | page properties
 628.250 +	mov r25=IA64_GRANULE_SHIFT<<2
 628.251 +	;;
 628.252 +	mov cr.itir=r25
 628.253 +	mov cr.ifa=in0			// VA of next task...
 628.254 +	;;
 628.255 +	mov r25=IA64_TR_CURRENT_STACK
 628.256 +	movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
 628.257 +	st8 [r8]=r26
 628.258 +	itr.d dtr[r25]=r23		// wire in new mapping...
 628.259 +	br.cond.sptk .done
 628.260 +END(ia64_switch_to)
 628.261 +
 628.262 +/*
 628.263 + * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This
 628.264 + * means that we may get an interrupt with "sp" pointing to the new kernel stack while
 628.265 + * ar.bspstore is still pointing to the old kernel backing store area.  Since ar.rsc,
 628.266 + * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
 628.267 + * problem.  Also, we don't need to specify unwind information for preserved registers
 628.268 + * that are not modified in save_switch_stack as the right unwind information is already
 628.269 + * specified at the call-site of save_switch_stack.
 628.270 + */
 628.271 +
 628.272 +/*
 628.273 + * save_switch_stack:
 628.274 + *	- r16 holds ar.pfs
 628.275 + *	- b7 holds address to return to
 628.276 + *	- rp (b0) holds return address to save
 628.277 + */
 628.278 +GLOBAL_ENTRY(save_switch_stack)
 628.279 +	.prologue
 628.280 +	.altrp b7
 628.281 +	flushrs			// flush dirty regs to backing store (must be first in insn group)
 628.282 +	.save @priunat,r17
 628.283 +	mov r17=ar.unat		// preserve caller's
 628.284 +	.body
 628.285 +#ifdef CONFIG_ITANIUM
 628.286 +	adds r2=16+128,sp
 628.287 +	adds r3=16+64,sp
 628.288 +	adds r14=SW(R4)+16,sp
 628.289 +	;;
 628.290 +	st8.spill [r14]=r4,16		// spill r4
 628.291 +	lfetch.fault.excl.nt1 [r3],128
 628.292 +	;;
 628.293 +	lfetch.fault.excl.nt1 [r2],128
 628.294 +	lfetch.fault.excl.nt1 [r3],128
 628.295 +	;;
 628.296 +	lfetch.fault.excl [r2]
 628.297 +	lfetch.fault.excl [r3]
 628.298 +	adds r15=SW(R5)+16,sp
 628.299 +#else
 628.300 +	add r2=16+3*128,sp
 628.301 +	add r3=16,sp
 628.302 +	add r14=SW(R4)+16,sp
 628.303 +	;;
 628.304 +	st8.spill [r14]=r4,SW(R6)-SW(R4)	// spill r4 and prefetch offset 0x1c0
 628.305 +	lfetch.fault.excl.nt1 [r3],128	//		prefetch offset 0x010
 628.306 +	;;
 628.307 +	lfetch.fault.excl.nt1 [r3],128	//		prefetch offset 0x090
 628.308 +	lfetch.fault.excl.nt1 [r2],128	//		prefetch offset 0x190
 628.309 +	;;
 628.310 +	lfetch.fault.excl.nt1 [r3]	//		prefetch offset 0x110
 628.311 +	lfetch.fault.excl.nt1 [r2]	//		prefetch offset 0x210
 628.312 +	adds r15=SW(R5)+16,sp
 628.313 +#endif
 628.314 +	;;
 628.315 +	st8.spill [r15]=r5,SW(R7)-SW(R5)	// spill r5
 628.316 +	mov.m ar.rsc=0			// put RSE in mode: enforced lazy, little endian, pl 0
 628.317 +	add r2=SW(F2)+16,sp		// r2 = &sw->f2
 628.318 +	;;
 628.319 +	st8.spill [r14]=r6,SW(B0)-SW(R6)	// spill r6
 628.320 +	mov.m r18=ar.fpsr		// preserve fpsr
 628.321 +	add r3=SW(F3)+16,sp		// r3 = &sw->f3
 628.322 +	;;
 628.323 +	stf.spill [r2]=f2,32
 628.324 +	mov.m r19=ar.rnat
 628.325 +	mov r21=b0
 628.326 +
 628.327 +	stf.spill [r3]=f3,32
 628.328 +	st8.spill [r15]=r7,SW(B2)-SW(R7)	// spill r7
 628.329 +	mov r22=b1
 628.330 +	;;
 628.331 +	// since we're done with the spills, read and save ar.unat:
 628.332 +	mov.m r29=ar.unat
 628.333 +	mov.m r20=ar.bspstore
 628.334 +	mov r23=b2
 628.335 +	stf.spill [r2]=f4,32
 628.336 +	stf.spill [r3]=f5,32
 628.337 +	mov r24=b3
 628.338 +	;;
 628.339 +	st8 [r14]=r21,SW(B1)-SW(B0)		// save b0
 628.340 +	st8 [r15]=r23,SW(B3)-SW(B2)		// save b2
 628.341 +	mov r25=b4
 628.342 +	mov r26=b5
 628.343 +	;;
 628.344 +	st8 [r14]=r22,SW(B4)-SW(B1)		// save b1
 628.345 +	st8 [r15]=r24,SW(AR_PFS)-SW(B3)		// save b3
 628.346 +	mov r21=ar.lc		// I-unit
 628.347 +	stf.spill [r2]=f12,32
 628.348 +	stf.spill [r3]=f13,32
 628.349 +	;;
 628.350 +	st8 [r14]=r25,SW(B5)-SW(B4)		// save b4
 628.351 +	st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)	// save ar.pfs
 628.352 +	stf.spill [r2]=f14,32
 628.353 +	stf.spill [r3]=f15,32
 628.354 +	;;
 628.355 +	st8 [r14]=r26				// save b5
 628.356 +	st8 [r15]=r21				// save ar.lc
 628.357 +	stf.spill [r2]=f16,32
 628.358 +	stf.spill [r3]=f17,32
 628.359 +	;;
 628.360 +	stf.spill [r2]=f18,32
 628.361 +	stf.spill [r3]=f19,32
 628.362 +	;;
 628.363 +	stf.spill [r2]=f20,32
 628.364 +	stf.spill [r3]=f21,32
 628.365 +	;;
 628.366 +	stf.spill [r2]=f22,32
 628.367 +	stf.spill [r3]=f23,32
 628.368 +	;;
 628.369 +	stf.spill [r2]=f24,32
 628.370 +	stf.spill [r3]=f25,32
 628.371 +	;;
 628.372 +	stf.spill [r2]=f26,32
 628.373 +	stf.spill [r3]=f27,32
 628.374 +	;;
 628.375 +	stf.spill [r2]=f28,32
 628.376 +	stf.spill [r3]=f29,32
 628.377 +	;;
 628.378 +	stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
 628.379 +	stf.spill [r3]=f31,SW(PR)-SW(F31)
 628.380 +	add r14=SW(CALLER_UNAT)+16,sp
 628.381 +	;;
 628.382 +	st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)	// save ar.unat
 628.383 +	st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
 628.384 +	mov r21=pr
 628.385 +	;;
 628.386 +	st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
 628.387 +	st8 [r3]=r21				// save predicate registers
 628.388 +	;;
 628.389 +	st8 [r2]=r20				// save ar.bspstore
 628.390 +	st8 [r14]=r18				// save fpsr
 628.391 +	mov ar.rsc=3		// put RSE back into eager mode, pl 0
 628.392 +	br.cond.sptk.many b7
 628.393 +END(save_switch_stack)
 628.394 +
 628.395 +/*
 628.396 + * load_switch_stack:
 628.397 + *	- "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
 628.398 + *	- b7 holds address to return to
 628.399 + *	- must not touch r8-r11
 628.400 + */
 628.401 +#ifdef XEN
 628.402 +GLOBAL_ENTRY(load_switch_stack)
 628.403 +#else
 628.404 +ENTRY(load_switch_stack)
 628.405 +#endif
 628.406 +	.prologue
 628.407 +	.altrp b7
 628.408 +
 628.409 +	.body
 628.410 +	lfetch.fault.nt1 [sp]
 628.411 +	adds r2=SW(AR_BSPSTORE)+16,sp
 628.412 +	adds r3=SW(AR_UNAT)+16,sp
 628.413 +	mov ar.rsc=0						// put RSE into enforced lazy mode
 628.414 +	adds r14=SW(CALLER_UNAT)+16,sp
 628.415 +	adds r15=SW(AR_FPSR)+16,sp
 628.416 +	;;
 628.417 +	ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))	// bspstore
 628.418 +	ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))	// unat
 628.419 +	;;
 628.420 +	ld8 r21=[r2],16		// restore b0
 628.421 +	ld8 r22=[r3],16		// restore b1
 628.422 +	;;
 628.423 +	ld8 r23=[r2],16		// restore b2
 628.424 +	ld8 r24=[r3],16		// restore b3
 628.425 +	;;
 628.426 +	ld8 r25=[r2],16		// restore b4
 628.427 +	ld8 r26=[r3],16		// restore b5
 628.428 +	;;
 628.429 +	ld8 r16=[r2],(SW(PR)-SW(AR_PFS))	// ar.pfs
 628.430 +	ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))	// ar.lc
 628.431 +	;;
 628.432 +	ld8 r28=[r2]		// restore pr
 628.433 +	ld8 r30=[r3]		// restore rnat
 628.434 +	;;
 628.435 +	ld8 r18=[r14],16	// restore caller's unat
 628.436 +	ld8 r19=[r15],24	// restore fpsr
 628.437 +	;;
 628.438 +	ldf.fill f2=[r14],32
 628.439 +	ldf.fill f3=[r15],32
 628.440 +	;;
 628.441 +	ldf.fill f4=[r14],32
 628.442 +	ldf.fill f5=[r15],32
 628.443 +	;;
 628.444 +	ldf.fill f12=[r14],32
 628.445 +	ldf.fill f13=[r15],32
 628.446 +	;;
 628.447 +	ldf.fill f14=[r14],32
 628.448 +	ldf.fill f15=[r15],32
 628.449 +	;;
 628.450 +	ldf.fill f16=[r14],32
 628.451 +	ldf.fill f17=[r15],32
 628.452 +	;;
 628.453 +	ldf.fill f18=[r14],32
 628.454 +	ldf.fill f19=[r15],32
 628.455 +	mov b0=r21
 628.456 +	;;
 628.457 +	ldf.fill f20=[r14],32
 628.458 +	ldf.fill f21=[r15],32
 628.459 +	mov b1=r22
 628.460 +	;;
 628.461 +	ldf.fill f22=[r14],32
 628.462 +	ldf.fill f23=[r15],32
 628.463 +	mov b2=r23
 628.464 +	;;
 628.465 +	mov ar.bspstore=r27
 628.466 +	mov ar.unat=r29		// establish unat holding the NaT bits for r4-r7
 628.467 +	mov b3=r24
 628.468 +	;;
 628.469 +	ldf.fill f24=[r14],32
 628.470 +	ldf.fill f25=[r15],32
 628.471 +	mov b4=r25
 628.472 +	;;
 628.473 +	ldf.fill f26=[r14],32
 628.474 +	ldf.fill f27=[r15],32
 628.475 +	mov b5=r26
 628.476 +	;;
 628.477 +	ldf.fill f28=[r14],32
 628.478 +	ldf.fill f29=[r15],32
 628.479 +	mov ar.pfs=r16
 628.480 +	;;
 628.481 +	ldf.fill f30=[r14],32
 628.482 +	ldf.fill f31=[r15],24
 628.483 +	mov ar.lc=r17
 628.484 +	;;
 628.485 +	ld8.fill r4=[r14],16
 628.486 +	ld8.fill r5=[r15],16
 628.487 +	mov pr=r28,-1
 628.488 +	;;
 628.489 +	ld8.fill r6=[r14],16
 628.490 +	ld8.fill r7=[r15],16
 628.491 +
 628.492 +	mov ar.unat=r18				// restore caller's unat
 628.493 +	mov ar.rnat=r30				// must restore after bspstore but before rsc!
 628.494 +	mov ar.fpsr=r19				// restore fpsr
 628.495 +	mov ar.rsc=3				// put RSE back into eager mode, pl 0
 628.496 +	br.cond.sptk.many b7
 628.497 +END(load_switch_stack)
 628.498 +
 628.499 +#ifndef XEN
 628.500 +GLOBAL_ENTRY(__ia64_syscall)
 628.501 +	.regstk 6,0,0,0
 628.502 +	mov r15=in5				// put syscall number in place
 628.503 +	break __BREAK_SYSCALL
 628.504 +	movl r2=errno
 628.505 +	cmp.eq p6,p7=-1,r10
 628.506 +	;;
 628.507 +(p6)	st4 [r2]=r8
 628.508 +(p6)	mov r8=-1
 628.509 +	br.ret.sptk.many rp
 628.510 +END(__ia64_syscall)
 628.511 +
 628.512 +GLOBAL_ENTRY(execve)
 628.513 +	mov r15=__NR_execve			// put syscall number in place
 628.514 +	break __BREAK_SYSCALL
 628.515 +	br.ret.sptk.many rp
 628.516 +END(execve)
 628.517 +
 628.518 +GLOBAL_ENTRY(clone)
 628.519 +	mov r15=__NR_clone			// put syscall number in place
 628.520 +	break __BREAK_SYSCALL
 628.521 +	br.ret.sptk.many rp
 628.522 +END(clone)
 628.523 +
 628.524 +	/*
 628.525 +	 * Invoke a system call, but do some tracing before and after the call.
 628.526 +	 * We MUST preserve the current register frame throughout this routine
 628.527 +	 * because some system calls (such as ia64_execve) directly
 628.528 +	 * manipulate ar.pfs.
 628.529 +	 */
 628.530 +GLOBAL_ENTRY(ia64_trace_syscall)
 628.531 +	PT_REGS_UNWIND_INFO(0)
 628.532 +	/*
 628.533 +	 * We need to preserve the scratch registers f6-f11 in case the system
 628.534 +	 * call is sigreturn.
 628.535 +	 */
 628.536 +	adds r16=PT(F6)+16,sp
 628.537 +	adds r17=PT(F7)+16,sp
 628.538 +	;;
 628.539 + 	stf.spill [r16]=f6,32
 628.540 + 	stf.spill [r17]=f7,32
 628.541 +	;;
 628.542 + 	stf.spill [r16]=f8,32
 628.543 + 	stf.spill [r17]=f9,32
 628.544 +	;;
 628.545 + 	stf.spill [r16]=f10
 628.546 + 	stf.spill [r17]=f11
 628.547 +	br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
 628.548 +	adds r16=PT(F6)+16,sp
 628.549 +	adds r17=PT(F7)+16,sp
 628.550 +	;;
 628.551 +	ldf.fill f6=[r16],32
 628.552 +	ldf.fill f7=[r17],32
 628.553 +	;;
 628.554 +	ldf.fill f8=[r16],32
 628.555 +	ldf.fill f9=[r17],32
 628.556 +	;;
 628.557 +	ldf.fill f10=[r16]
 628.558 +	ldf.fill f11=[r17]
 628.559 +	// the syscall number may have changed, so re-load it and re-calculate the
 628.560 +	// syscall entry-point:
 628.561 +	adds r15=PT(R15)+16,sp			// r15 = &pt_regs.r15 (syscall #)
 628.562 +	;;
 628.563 +	ld8 r15=[r15]
 628.564 +	mov r3=NR_syscalls - 1
 628.565 +	;;
 628.566 +	adds r15=-1024,r15
 628.567 +	movl r16=sys_call_table
 628.568 +	;;
 628.569 +	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)
 628.570 +	cmp.leu p6,p7=r15,r3
 628.571 +	;;
 628.572 +(p6)	ld8 r20=[r20]				// load address of syscall entry point
 628.573 +(p7)	movl r20=sys_ni_syscall
 628.574 +	;;
 628.575 +	mov b6=r20
 628.576 +	br.call.sptk.many rp=b6			// do the syscall
 628.577 +.strace_check_retval:
 628.578 +	cmp.lt p6,p0=r8,r0			// syscall failed?
 628.579 +	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
 628.580 +	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
 628.581 +	mov r10=0
 628.582 +(p6)	br.cond.sptk strace_error		// syscall failed ->
 628.583 +	;;					// avoid RAW on r10
 628.584 +.strace_save_retval:
 628.585 +.mem.offset 0,0; st8.spill [r2]=r8		// store return value in slot for r8
 628.586 +.mem.offset 8,0; st8.spill [r3]=r10		// clear error indication in slot for r10
 628.587 +	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
 628.588 +.ret3:	br.cond.sptk .work_pending_syscall_end
 628.589 +
 628.590 +strace_error:
 628.591 +	ld8 r3=[r2]				// load pt_regs.r8
 628.592 +	sub r9=0,r8				// negate return value to get errno value
 628.593 +	;;
 628.594 +	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?
 628.595 +	adds r3=16,r2				// r3=&pt_regs.r10
 628.596 +	;;
 628.597 +(p6)	mov r10=-1
 628.598 +(p6)	mov r8=r9
 628.599 +	br.cond.sptk .strace_save_retval
 628.600 +END(ia64_trace_syscall)
 628.601 +
 628.602 +	/*
 628.603 +	 * When traced and returning from sigreturn, we invoke syscall_trace but then
 628.604 +	 * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
 628.605 +	 */
 628.606 +GLOBAL_ENTRY(ia64_strace_leave_kernel)
 628.607 +	PT_REGS_UNWIND_INFO(0)
 628.608 +{	/*
 628.609 +	 * Some versions of gas generate bad unwind info if the first instruction of a
 628.610 +	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.
 628.611 +	 */
 628.612 +	nop.m 0
 628.613 +	nop.i 0
 628.614 +	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
 628.615 +}
 628.616 +.ret4:	br.cond.sptk ia64_leave_kernel
 628.617 +END(ia64_strace_leave_kernel)
 628.618 +#endif
 628.619 +
 628.620 +GLOBAL_ENTRY(ia64_ret_from_clone)
 628.621 +	PT_REGS_UNWIND_INFO(0)
 628.622 +{	/*
 628.623 +	 * Some versions of gas generate bad unwind info if the first instruction of a
 628.624 +	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.
 628.625 +	 */
 628.626 +	nop.m 0
 628.627 +	nop.i 0
 628.628 +	/*
 628.629 +	 * We need to call schedule_tail() to complete the scheduling process.
 628.630 +	 * Called by ia64_switch_to() after do_fork()->copy_thread().  r8 contains the
 628.631 +	 * address of the previously executing task.
 628.632 +	 */
 628.633 +	br.call.sptk.many rp=ia64_invoke_schedule_tail
 628.634 +}
 628.635 +#ifdef XEN
 628.636 +	// new domains are cloned but not exec'ed so switch to user mode here
 628.637 +	cmp.ne pKStk,pUStk=r0,r0
 628.638 +#ifdef CONFIG_VTI
 628.639 +	br.cond.spnt ia64_leave_hypervisor
 628.640 +#else // CONFIG_VTI
 628.641 +	br.cond.spnt ia64_leave_kernel
 628.642 +#endif // CONFIG_VTI
 628.643 +#else
 628.644 +.ret8:
 628.645 +	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
 628.646 +	;;
 628.647 +	ld4 r2=[r2]
 628.648 +	;;
 628.649 +	mov r8=0
 628.650 +	and r2=_TIF_SYSCALL_TRACEAUDIT,r2
 628.651 +	;;
 628.652 +	cmp.ne p6,p0=r2,r0
 628.653 +(p6)	br.cond.spnt .strace_check_retval
 628.654 +#endif
 628.655 +	;;					// added stop bits to prevent r8 dependency
 628.656 +END(ia64_ret_from_clone)
 628.657 +	// fall through
 628.658 +GLOBAL_ENTRY(ia64_ret_from_syscall)
 628.659 +	PT_REGS_UNWIND_INFO(0)
 628.660 +	cmp.ge p6,p7=r8,r0			// syscall executed successfully?
 628.661 +	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
 628.662 +	mov r10=r0				// clear error indication in r10
 628.663 +(p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
 628.664 +END(ia64_ret_from_syscall)
 628.665 +	// fall through
 628.666 +/*
 628.667 + * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
 628.668 + *	need to switch to bank 0 and doesn't restore the scratch registers.
 628.669 + *	To avoid leaking kernel bits, the scratch registers are set to
 628.670 + *	the following known-to-be-safe values:
 628.671 + *
 628.672 + *		  r1: restored (global pointer)
 628.673 + *		  r2: cleared
 628.674 + *		  r3: 1 (when returning to user-level)
 628.675 + *	      r8-r11: restored (syscall return value(s))
 628.676 + *		 r12: restored (user-level stack pointer)
 628.677 + *		 r13: restored (user-level thread pointer)
 628.678 + *		 r14: cleared
 628.679 + *		 r15: restored (syscall #)
 628.680 + *	     r16-r17: cleared
 628.681 + *		 r18: user-level b6
 628.682 + *		 r19: cleared
 628.683 + *		 r20: user-level ar.fpsr
 628.684 + *		 r21: user-level b0
 628.685 + *		 r22: cleared
 628.686 + *		 r23: user-level ar.bspstore
 628.687 + *		 r24: user-level ar.rnat
 628.688 + *		 r25: user-level ar.unat
 628.689 + *		 r26: user-level ar.pfs
 628.690 + *		 r27: user-level ar.rsc
 628.691 + *		 r28: user-level ip
 628.692 + *		 r29: user-level psr
 628.693 + *		 r30: user-level cfm
 628.694 + *		 r31: user-level pr
 628.695 + *	      f6-f11: cleared
 628.696 + *		  pr: restored (user-level pr)
 628.697 + *		  b0: restored (user-level rp)
 628.698 + *	          b6: restored
 628.699 + *		  b7: cleared
 628.700 + *	     ar.unat: restored (user-level ar.unat)
 628.701 + *	      ar.pfs: restored (user-level ar.pfs)
 628.702 + *	      ar.rsc: restored (user-level ar.rsc)
 628.703 + *	     ar.rnat: restored (user-level ar.rnat)
 628.704 + *	 ar.bspstore: restored (user-level ar.bspstore)
 628.705 + *	     ar.fpsr: restored (user-level ar.fpsr)
 628.706 + *	      ar.ccv: cleared
 628.707 + *	      ar.csd: cleared
 628.708 + *	      ar.ssd: cleared
 628.709 + */
 628.710 +ENTRY(ia64_leave_syscall)
 628.711 +	PT_REGS_UNWIND_INFO(0)
 628.712 +	/*
 628.713 +	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
 628.714 +	 * user- or fsys-mode, hence we disable interrupts early on.
 628.715 +	 *
 628.716 +	 * p6 controls whether current_thread_info()->flags needs to be check for
 628.717 +	 * extra work.  We always check for extra work when returning to user-level.
 628.718 +	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
 628.719 +	 * is 0.  After extra work processing has been completed, execution
 628.720 +	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
 628.721 +	 * needs to be redone.
 628.722 +	 */
 628.723 +#ifdef CONFIG_PREEMPT
 628.724 +	rsm psr.i				// disable interrupts
 628.725 +	cmp.eq pLvSys,p0=r0,r0			// pLvSys=1: leave from syscall
 628.726 +(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
 628.727 +	;;
 628.728 +	.pred.rel.mutex pUStk,pKStk
 628.729 +(pKStk) ld4 r21=[r20]			// r21 <- preempt_count
 628.730 +(pUStk)	mov r21=0			// r21 <- 0
 628.731 +	;;
 628.732 +	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
 628.733 +#else /* !CONFIG_PREEMPT */
 628.734 +(pUStk)	rsm psr.i
 628.735 +	cmp.eq pLvSys,p0=r0,r0		// pLvSys=1: leave from syscall
 628.736 +(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
 628.737 +#endif
 628.738 +.work_processed_syscall:
 628.739 +	adds r2=PT(LOADRS)+16,r12
 628.740 +	adds r3=PT(AR_BSPSTORE)+16,r12
 628.741 +#ifdef XEN
 628.742 +	;;
 628.743 +#else
 628.744 +	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
 628.745 +	;;
 628.746 +(p6)	ld4 r31=[r18]				// load current_thread_info()->flags
 628.747 +#endif
 628.748 +	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
 628.749 +	mov b7=r0		// clear b7
 628.750 +	;;
 628.751 +	ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
 628.752 +	ld8 r18=[r2],PT(R9)-PT(B6)		// load b6
 628.753 +#ifndef XEN
 628.754 +(p6)	and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
 628.755 +#endif
 628.756 +	;;
 628.757 +	mov r16=ar.bsp				// M2  get existing backing store pointer
 628.758 +#ifndef XEN
 628.759 +(p6)	cmp4.ne.unc p6,p0=r15, r0		// any special work pending?
 628.760 +(p6)	br.cond.spnt .work_pending_syscall
 628.761 +#endif
 628.762 +	;;
 628.763 +	// start restoring the state saved on the kernel stack (struct pt_regs):
 628.764 +	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
 628.765 +	ld8 r11=[r3],PT(CR_IIP)-PT(R11)
 628.766 +	mov f6=f0		// clear f6
 628.767 +	;;
 628.768 +	invala			// M0|1 invalidate ALAT
 628.769 +	rsm psr.i | psr.ic	// M2 initiate turning off of interrupt and interruption collection
 628.770 +	mov f9=f0		// clear f9
 628.771 +
 628.772 +	ld8 r29=[r2],16		// load cr.ipsr
 628.773 +	ld8 r28=[r3],16			// load cr.iip
 628.774 +	mov f8=f0		// clear f8
 628.775 +	;;
 628.776 +	ld8 r30=[r2],16		// M0|1 load cr.ifs
 628.777 +	mov.m ar.ssd=r0		// M2 clear ar.ssd
 628.778 +	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
 628.779 +	;;
 628.780 +	ld8 r25=[r3],16		// M0|1 load ar.unat
 628.781 +	mov.m ar.csd=r0		// M2 clear ar.csd
 628.782 +	mov r22=r0		// clear r22
 628.783 +	;;
 628.784 +	ld8 r26=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs
 628.785 +(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
 628.786 +	mov f10=f0		// clear f10
 628.787 +	;;
 628.788 +	ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
 628.789 +	ld8 r27=[r3],PT(PR)-PT(AR_RSC)	// load ar.rsc
 628.790 +	mov f11=f0		// clear f11
 628.791 +	;;
 628.792 +	ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// load ar.rnat (may be garbage)
 628.793 +	ld8 r31=[r3],PT(R1)-PT(PR)		// load predicates
 628.794 +(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
 628.795 +	;;
 628.796 +	ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// load ar.fpsr
 628.797 +	ld8.fill r1=[r3],16	// load r1
 628.798 +(pUStk) mov r17=1
 628.799 +	;;
 628.800 +	srlz.d			// M0  ensure interruption collection is off
 628.801 +	ld8.fill r13=[r3],16
 628.802 +	mov f7=f0		// clear f7
 628.803 +	;;
 628.804 +	ld8.fill r12=[r2]	// restore r12 (sp)
 628.805 +	ld8.fill r15=[r3]	// restore r15
 628.806 +#ifdef XEN
 628.807 +	movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
 628.808 +#else
 628.809 +	addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
 628.810 +#endif
 628.811 +	;;
 628.812 +(pUStk)	ld4 r3=[r3]		// r3 = cpu_data->phys_stacked_size_p8
 628.813 +(pUStk) st1 [r14]=r17
 628.814 +	mov b6=r18		// I0  restore b6
 628.815 +	;;
 628.816 +	mov r14=r0		// clear r14
 628.817 +	shr.u r18=r19,16	// I0|1 get byte size of existing "dirty" partition
 628.818 +(pKStk) br.cond.dpnt.many skip_rbs_switch
 628.819 +
 628.820 +	mov.m ar.ccv=r0		// clear ar.ccv
 628.821 +(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
 628.822 +	br.cond.sptk.many rbs_switch
 628.823 +END(ia64_leave_syscall)
 628.824 +
 628.825 +#ifdef CONFIG_IA32_SUPPORT
 628.826 +GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
 628.827 +	PT_REGS_UNWIND_INFO(0)
 628.828 +	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
 628.829 +	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
 628.830 +	;;
 628.831 +	.mem.offset 0,0
 628.832 +	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
 628.833 +	.mem.offset 8,0
 628.834 +	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
 628.835 +END(ia64_ret_from_ia32_execve_syscall)
 628.836 +	// fall through
 628.837 +#endif /* CONFIG_IA32_SUPPORT */
 628.838 +GLOBAL_ENTRY(ia64_leave_kernel)
 628.839 +	PT_REGS_UNWIND_INFO(0)
 628.840 +	/*
 628.841 +	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
 628.842 +	 * user- or fsys-mode, hence we disable interrupts early on.
 628.843 +	 *
 628.844 +	 * p6 controls whether current_thread_info()->flags needs to be check for
 628.845 +	 * extra work.  We always check for extra work when returning to user-level.
 628.846 +	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
 628.847 +	 * is 0.  After extra work processing has been completed, execution
 628.848 +	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
 628.849 +	 * needs to be redone.
 628.850 +	 */
 628.851 +#ifdef CONFIG_PREEMPT
 628.852 +	rsm psr.i				// disable interrupts
 628.853 +	cmp.eq p0,pLvSys=r0,r0			// pLvSys=0: leave from kernel
 628.854 +(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
 628.855 +	;;
 628.856 +	.pred.rel.mutex pUStk,pKStk
 628.857 +(pKStk)	ld4 r21=[r20]			// r21 <- preempt_count
 628.858 +(pUStk)	mov r21=0			// r21 <- 0
 628.859 +	;;
 628.860 +	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
 628.861 +#else
 628.862 +(pUStk)	rsm psr.i
 628.863 +	cmp.eq p0,pLvSys=r0,r0		// pLvSys=0: leave from kernel
 628.864 +(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
 628.865 +#endif
 628.866 +.work_processed_kernel:
 628.867 +#ifdef XEN
 628.868 +	alloc loc0=ar.pfs,0,1,1,0
 628.869 +	adds out0=16,r12
 628.870 +	;;
 628.871 +(p6)	br.call.sptk.many b0=deliver_pending_interrupt
 628.872 +	mov ar.pfs=loc0
 628.873 +	mov r31=r0
 628.874 +#else
 628.875 +	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
 628.876 +	;;
 628.877 +(p6)	ld4 r31=[r17]				// load current_thread_info()->flags
 628.878 +#endif
 628.879 +	adds r21=PT(PR)+16,r12
 628.880 +	;;
 628.881 +
 628.882 +	lfetch [r21],PT(CR_IPSR)-PT(PR)
 628.883 +	adds r2=PT(B6)+16,r12
 628.884 +	adds r3=PT(R16)+16,r12
 628.885 +	;;
 628.886 +	lfetch [r21]
 628.887 +	ld8 r28=[r2],8		// load b6
 628.888 +	adds r29=PT(R24)+16,r12
 628.889 +
 628.890 +	ld8.fill r16=[r3]
 628.891 +	adds r30=PT(AR_CCV)+16,r12
 628.892 +(p6)	and r19=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
 628.893 +	;;
 628.894 +	adds r3=PT(AR_CSD)-PT(R16),r3
 628.895 +	ld8.fill r24=[r29]
 628.896 +	ld8 r15=[r30]		// load ar.ccv
 628.897 +(p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?
 628.898 +	;;
 628.899 +	ld8 r29=[r2],16		// load b7
 628.900 +	ld8 r30=[r3],16		// load ar.csd
 628.901 +#ifndef XEN
 628.902 +(p6)	br.cond.spnt .work_pending
 628.903 +#endif
 628.904 +	;;
 628.905 +	ld8 r31=[r2],16		// load ar.ssd
 628.906 +	ld8.fill r8=[r3],16
 628.907 +	;;
 628.908 +	ld8.fill r9=[r2],16
 628.909 +	ld8.fill r10=[r3],PT(R17)-PT(R10)
 628.910 +	;;
 628.911 +	ld8.fill r11=[r2],PT(R18)-PT(R11)
 628.912 +	ld8.fill r17=[r3],16
 628.913 +	;;
 628.914 +	ld8.fill r18=[r2],16
 628.915 +	ld8.fill r19=[r3],16
 628.916 +	;;
 628.917 +	ld8.fill r20=[r2],16
 628.918 +	ld8.fill r21=[r3],16
 628.919 +	mov ar.csd=r30
 628.920 +	mov ar.ssd=r31
 628.921 +	;;
 628.922 +	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
 628.923 +	invala			// invalidate ALAT
 628.924 +	;;
 628.925 +	ld8.fill r22=[r2],24
 628.926 +	ld8.fill r23=[r3],24
 628.927 +	mov b6=r28
 628.928 +	;;
 628.929 +	ld8.fill r25=[r2],16
 628.930 +	ld8.fill r26=[r3],16
 628.931 +	mov b7=r29
 628.932 +	;;
 628.933 +	ld8.fill r27=[r2],16
 628.934 +	ld8.fill r28=[r3],16
 628.935 +	;;
 628.936 +	ld8.fill r29=[r2],16
 628.937 +	ld8.fill r30=[r3],24
 628.938 +	;;
 628.939 +	ld8.fill r31=[r2],PT(F9)-PT(R31)
 628.940 +	adds r3=PT(F10)-PT(F6),r3
 628.941 +	;;
 628.942 +	ldf.fill f9=[r2],PT(F6)-PT(F9)
 628.943 +	ldf.fill f10=[r3],PT(F8)-PT(F10)
 628.944 +	;;
 628.945 +	ldf.fill f6=[r2],PT(F7)-PT(F6)
 628.946 +	;;
 628.947 +	ldf.fill f7=[r2],PT(F11)-PT(F7)
 628.948 +	ldf.fill f8=[r3],32
 628.949 +	;;
 628.950 +	srlz.i			// ensure interruption collection is off
 628.951 +	mov ar.ccv=r15
 628.952 +	;;
 628.953 +	ldf.fill f11=[r2]
 628.954 +	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
 628.955 +	;;
 628.956 +(pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
 628.957 +(pUStk) ld8 r18=[r18]
 628.958 +	adds r16=PT(CR_IPSR)+16,r12
 628.959 +	adds r17=PT(CR_IIP)+16,r12
 628.960 +
 628.961 +(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
 628.962 +	nop.i 0
 628.963 +	nop.i 0
 628.964 +	;;
 628.965 +	ld8 r29=[r16],16	// load cr.ipsr
 628.966 +	ld8 r28=[r17],16	// load cr.iip
 628.967 +	;;
 628.968 +	ld8 r30=[r16],16	// load cr.ifs
 628.969 +	ld8 r25=[r17],16	// load ar.unat
 628.970 +	;;
 628.971 +	ld8 r26=[r16],16	// load ar.pfs
 628.972 +	ld8 r27=[r17],16	// load ar.rsc
 628.973 +	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
 628.974 +	;;
 628.975 +	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
 628.976 +	ld8 r23=[r17],16	// load ar.bspstore (may be garbage)
 628.977 +	;;
 628.978 +	ld8 r31=[r16],16	// load predicates
 628.979 +	ld8 r21=[r17],16	// load b0
 628.980 +	;;
 628.981 +	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
 628.982 +	ld8.fill r1=[r17],16	// load r1
 628.983 +	;;
 628.984 +	ld8.fill r12=[r16],16
 628.985 +	ld8.fill r13=[r17],16
 628.986 +(pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
 628.987 +	;;
 628.988 +	ld8 r20=[r16],16	// ar.fpsr
 628.989 +	ld8.fill r15=[r17],16
 628.990 +	;;
 628.991 +	ld8.fill r14=[r16],16
 628.992 +	ld8.fill r2=[r17]
 628.993 +(pUStk)	mov r17=1
 628.994 +	;;
 628.995 +	ld8.fill r3=[r16]
 628.996 +(pUStk)	st1 [r18]=r17		// restore current->thread.on_ustack
 628.997 +	shr.u r18=r19,16	// get byte size of existing "dirty" partition
 628.998 +	;;
 628.999 +	mov r16=ar.bsp		// get existing backing store pointer
628.1000 +#ifdef XEN
628.1001 +	movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
628.1002 +#else
628.1003 +	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
628.1004 +#endif
628.1005 +	;;
628.1006 +	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
628.1007 +(pKStk)	br.cond.dpnt skip_rbs_switch
628.1008 +
628.1009 +	/*
628.1010 +	 * Restore user backing store.
628.1011 +	 *
628.1012 +	 * NOTE: alloc, loadrs, and cover can't be predicated.
628.1013 +	 */
628.1014 +(pNonSys) br.cond.dpnt dont_preserve_current_frame
628.1015 +
628.1016 +rbs_switch:
628.1017 +	cover				// add current frame into dirty partition and set cr.ifs
628.1018 +	;;
628.1019 +	mov r19=ar.bsp			// get new backing store pointer
628.1020 +	sub r16=r16,r18			// krbs = old bsp - size of dirty partition
628.1021 +	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs
628.1022 +	;;
628.1023 +	sub r19=r19,r16			// calculate total byte size of dirty partition
628.1024 +	add r18=64,r18			// don't force in0-in7 into memory...
628.1025 +	;;
628.1026 +	shl r19=r19,16			// shift size of dirty partition into loadrs position
628.1027 +	;;
628.1028 +dont_preserve_current_frame:
628.1029 +	/*
628.1030 +	 * To prevent leaking bits between the kernel and user-space,
628.1031 +	 * we must clear the stacked registers in the "invalid" partition here.
628.1032 +	 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
628.1033 +	 * 5 registers/cycle on McKinley).
628.1034 +	 */
628.1035 +#	define pRecurse	p6
628.1036 +#	define pReturn	p7
628.1037 +#ifdef CONFIG_ITANIUM
628.1038 +#	define Nregs	10
628.1039 +#else
628.1040 +#	define Nregs	14
628.1041 +#endif
628.1042 +	alloc loc0=ar.pfs,2,Nregs-2,2,0
628.1043 +	shr.u loc1=r18,9		// RNaTslots <= floor(dirtySize / (64*8))
628.1044 +	sub r17=r17,r18			// r17 = (physStackedSize + 8) - dirtySize
628.1045 +	;;
628.1046 +	mov ar.rsc=r19			// load ar.rsc to be used for "loadrs"
628.1047 +	shladd in0=loc1,3,r17
628.1048 +	mov in1=0
628.1049 +	;;
628.1050 +	TEXT_ALIGN(32)
628.1051 +rse_clear_invalid:
628.1052 +#ifdef CONFIG_ITANIUM
628.1053 +	// cycle 0
628.1054 + { .mii
628.1055 +	alloc loc0=ar.pfs,2,Nregs-2,2,0
628.1056 +	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
628.1057 +	add out0=-Nregs*8,in0
628.1058 +}{ .mfb
628.1059 +	add out1=1,in1			// increment recursion count
628.1060 +	nop.f 0
628.1061 +	nop.b 0				// can't do br.call here because of alloc (WAW on CFM)
628.1062 +	;;
628.1063 +}{ .mfi	// cycle 1
628.1064 +	mov loc1=0
628.1065 +	nop.f 0
628.1066 +	mov loc2=0
628.1067 +}{ .mib
628.1068 +	mov loc3=0
628.1069 +	mov loc4=0
628.1070 +(pRecurse) br.call.sptk.many b0=rse_clear_invalid
628.1071 +
628.1072 +}{ .mfi	// cycle 2
628.1073 +	mov loc5=0
628.1074 +	nop.f 0
628.1075 +	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
628.1076 +}{ .mib
628.1077 +	mov loc6=0
628.1078 +	mov loc7=0
628.1079 +(pReturn) br.ret.sptk.many b0
628.1080 +}
628.1081 +#else /* !CONFIG_ITANIUM */
628.1082 +	alloc loc0=ar.pfs,2,Nregs-2,2,0
628.1083 +	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
628.1084 +	add out0=-Nregs*8,in0
628.1085 +	add out1=1,in1			// increment recursion count
628.1086 +	mov loc1=0
628.1087 +	mov loc2=0
628.1088 +	;;
628.1089 +	mov loc3=0
628.1090 +	mov loc4=0
628.1091 +	mov loc5=0
628.1092 +	mov loc6=0
628.1093 +	mov loc7=0
628.1094 +(pRecurse) br.call.sptk.few b0=rse_clear_invalid
628.1095 +	;;
628.1096 +	mov loc8=0
628.1097 +	mov loc9=0
628.1098 +	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
628.1099 +	mov loc10=0
628.1100 +	mov loc11=0
628.1101 +(pReturn) br.ret.sptk.many b0
628.1102 +#endif /* !CONFIG_ITANIUM */
628.1103 +#	undef pRecurse
628.1104 +#	undef pReturn
628.1105 +	;;
628.1106 +	alloc r17=ar.pfs,0,0,0,0	// drop current register frame
628.1107 +	;;
628.1108 +	loadrs
628.1109 +	;;
628.1110 +skip_rbs_switch:
628.1111 +	mov ar.unat=r25		// M2
628.1112 +(pKStk)	extr.u r22=r22,21,1	// I0 extract current value of psr.pp from r22
628.1113 +(pLvSys)mov r19=r0		// A  clear r19 for leave_syscall, no-op otherwise
628.1114 +	;;
628.1115 +(pUStk)	mov ar.bspstore=r23	// M2
628.1116 +(pKStk)	dep r29=r22,r29,21,1	// I0 update ipsr.pp with psr.pp
628.1117 +(pLvSys)mov r16=r0		// A  clear r16 for leave_syscall, no-op otherwise
628.1118 +	;;
628.1119 +	mov cr.ipsr=r29		// M2
628.1120 +	mov ar.pfs=r26		// I0
628.1121 +(pLvSys)mov r17=r0		// A  clear r17 for leave_syscall, no-op otherwise
628.1122 +
628.1123 +(p9)	mov cr.ifs=r30		// M2
628.1124 +	mov b0=r21		// I0
628.1125 +(pLvSys)mov r18=r0		// A  clear r18 for leave_syscall, no-op otherwise
628.1126 +
628.1127 +	mov ar.fpsr=r20		// M2
628.1128 +	mov cr.iip=r28		// M2
628.1129 +	nop 0
628.1130 +	;;
628.1131 +(pUStk)	mov ar.rnat=r24		// M2 must happen with RSE in lazy mode
628.1132 +	nop 0
628.1133 +(pLvSys)mov r2=r0
628.1134 +
628.1135 +	mov ar.rsc=r27		// M2
628.1136 +	mov pr=r31,-1		// I0
628.1137 +	rfi			// B
628.1138 +
628.1139 +#ifndef XEN
628.1140 +	/*
628.1141 +	 * On entry:
628.1142 +	 *	r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
628.1143 +	 *	r31 = current->thread_info->flags
628.1144 +	 * On exit:
628.1145 +	 *	p6 = TRUE if work-pending-check needs to be redone
628.1146 +	 */
628.1147 +.work_pending_syscall:
628.1148 +	add r2=-8,r2
628.1149 +	add r3=-8,r3
628.1150 +	;;
628.1151 +	st8 [r2]=r8
628.1152 +	st8 [r3]=r10
628.1153 +.work_pending:
628.1154 +	tbit.nz p6,p0=r31,TIF_SIGDELAYED		// signal delayed from  MCA/INIT/NMI/PMI context?
628.1155 +(p6)	br.cond.sptk.few .sigdelayed
628.1156 +	;;
628.1157 +	tbit.z p6,p0=r31,TIF_NEED_RESCHED		// current_thread_info()->need_resched==0?
628.1158 +(p6)	br.cond.sptk.few .notify
628.1159 +#ifdef CONFIG_PREEMPT
628.1160 +(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
628.1161 +	;;
628.1162 +(pKStk) st4 [r20]=r21
628.1163 +	ssm psr.i		// enable interrupts
628.1164 +#endif
628.1165 +	br.call.spnt.many rp=schedule
628.1166 +.ret9:	cmp.eq p6,p0=r0,r0				// p6 <- 1
628.1167 +	rsm psr.i		// disable interrupts
628.1168 +	;;
628.1169 +#ifdef CONFIG_PREEMPT
628.1170 +(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
628.1171 +	;;
628.1172 +(pKStk)	st4 [r20]=r0		// preempt_count() <- 0
628.1173 +#endif
628.1174 +(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
628.1175 +	br.cond.sptk.many .work_processed_kernel	// re-check
628.1176 +
628.1177 +.notify:
628.1178 +(pUStk)	br.call.spnt.many rp=notify_resume_user
628.1179 +.ret10:	cmp.ne p6,p0=r0,r0				// p6 <- 0
628.1180 +(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
628.1181 +	br.cond.sptk.many .work_processed_kernel	// don't re-check
628.1182 +
628.1183 +// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
628.1184 +// it could not be delivered.  Deliver it now.  The signal might be for us and
628.1185 +// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
628.1186 +// signal.
628.1187 +
628.1188 +.sigdelayed:
628.1189 +	br.call.sptk.many rp=do_sigdelayed
628.1190 +	cmp.eq p6,p0=r0,r0				// p6 <- 1, always re-check
628.1191 +(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
628.1192 +	br.cond.sptk.many .work_processed_kernel	// re-check
628.1193 +
628.1194 +.work_pending_syscall_end:
628.1195 +	adds r2=PT(R8)+16,r12
628.1196 +	adds r3=PT(R10)+16,r12
628.1197 +	;;
628.1198 +	ld8 r8=[r2]
628.1199 +	ld8 r10=[r3]
628.1200 +	br.cond.sptk.many .work_processed_syscall	// re-check
628.1201 +#endif
628.1202 +
628.1203 +END(ia64_leave_kernel)
628.1204 +
628.1205 +ENTRY(handle_syscall_error)
628.1206 +	/*
628.1207 +	 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
628.1208 +	 * lead us to mistake a negative return value as a failed syscall.  Those syscall
628.1209 +	 * must deposit a non-zero value in pt_regs.r8 to indicate an error.  If
628.1210 +	 * pt_regs.r8 is zero, we assume that the call completed successfully.
628.1211 +	 */
628.1212 +	PT_REGS_UNWIND_INFO(0)
628.1213 +	ld8 r3=[r2]		// load pt_regs.r8
628.1214 +	;;
628.1215 +	cmp.eq p6,p7=r3,r0	// is pt_regs.r8==0?
628.1216 +	;;
628.1217 +(p7)	mov r10=-1
628.1218 +(p7)	sub r8=0,r8		// negate return value to get errno
628.1219 +	br.cond.sptk ia64_leave_syscall
628.1220 +END(handle_syscall_error)
628.1221 +
628.1222 +	/*
628.1223 +	 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
628.1224 +	 * in case a system call gets restarted.
628.1225 +	 */
628.1226 +GLOBAL_ENTRY(ia64_invoke_schedule_tail)
628.1227 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
628.1228 +	alloc loc1=ar.pfs,8,2,1,0
628.1229 +	mov loc0=rp
628.1230 +	mov out0=r8				// Address of previous task
628.1231 +	;;
628.1232 +	br.call.sptk.many rp=schedule_tail
628.1233 +.ret11:	mov ar.pfs=loc1
628.1234 +	mov rp=loc0
628.1235 +	br.ret.sptk.many rp
628.1236 +END(ia64_invoke_schedule_tail)
628.1237 +
628.1238 +#ifndef XEN
628.1239 +	/*
628.1240 +	 * Setup stack and call do_notify_resume_user().  Note that pSys and pNonSys need to
628.1241 +	 * be set up by the caller.  We declare 8 input registers so the system call
628.1242 +	 * args get preserved, in case we need to restart a system call.
628.1243 +	 */
628.1244 +ENTRY(notify_resume_user)
628.1245 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
628.1246 +	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
628.1247 +	mov r9=ar.unat
628.1248 +	mov loc0=rp				// save return address
628.1249 +	mov out0=0				// there is no "oldset"
628.1250 +	adds out1=8,sp				// out1=&sigscratch->ar_pfs
628.1251 +(pSys)	mov out2=1				// out2==1 => we're in a syscall
628.1252 +	;;
628.1253 +(pNonSys) mov out2=0				// out2==0 => not a syscall
628.1254 +	.fframe 16
628.1255 +	.spillpsp ar.unat, 16			// (note that offset is relative to psp+0x10!)
628.1256 +	st8 [sp]=r9,-16				// allocate space for ar.unat and save it
628.1257 +	st8 [out1]=loc1,-8			// save ar.pfs, out1=&sigscratch
628.1258 +	.body
628.1259 +	br.call.sptk.many rp=do_notify_resume_user
628.1260 +.ret15:	.restore sp
628.1261 +	adds sp=16,sp				// pop scratch stack space
628.1262 +	;;
628.1263 +	ld8 r9=[sp]				// load new unat from sigscratch->scratch_unat
628.1264 +	mov rp=loc0
628.1265 +	;;
628.1266 +	mov ar.unat=r9
628.1267 +	mov ar.pfs=loc1
628.1268 +	br.ret.sptk.many rp
628.1269 +END(notify_resume_user)
628.1270 +
628.1271 +GLOBAL_ENTRY(sys_rt_sigsuspend)
628.1272 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
628.1273 +	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
628.1274 +	mov r9=ar.unat
628.1275 +	mov loc0=rp				// save return address
628.1276 +	mov out0=in0				// mask
628.1277 +	mov out1=in1				// sigsetsize
628.1278 +	adds out2=8,sp				// out2=&sigscratch->ar_pfs
628.1279 +	;;
628.1280 +	.fframe 16
628.1281 +	.spillpsp ar.unat, 16			// (note that offset is relative to psp+0x10!)
628.1282 +	st8 [sp]=r9,-16				// allocate space for ar.unat and save it
628.1283 +	st8 [out2]=loc1,-8			// save ar.pfs, out2=&sigscratch
628.1284 +	.body
628.1285 +	br.call.sptk.many rp=ia64_rt_sigsuspend
628.1286 +.ret17:	.restore sp
628.1287 +	adds sp=16,sp				// pop scratch stack space
628.1288 +	;;
628.1289 +	ld8 r9=[sp]				// load new unat from sw->caller_unat
628.1290 +	mov rp=loc0
628.1291 +	;;
628.1292 +	mov ar.unat=r9
628.1293 +	mov ar.pfs=loc1
628.1294 +	br.ret.sptk.many rp
628.1295 +END(sys_rt_sigsuspend)
628.1296 +
628.1297 +ENTRY(sys_rt_sigreturn)
628.1298 +	PT_REGS_UNWIND_INFO(0)
628.1299 +	/*
628.1300 +	 * Allocate 8 input registers since ptrace() may clobber them
628.1301 +	 */
628.1302 +	alloc r2=ar.pfs,8,0,1,0
628.1303 +	.prologue
628.1304 +	PT_REGS_SAVES(16)
628.1305 +	adds sp=-16,sp
628.1306 +	.body
628.1307 +	cmp.eq pNonSys,pSys=r0,r0		// sigreturn isn't a normal syscall...
628.1308 +	;;
628.1309 +	/*
628.1310 +	 * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
628.1311 +	 * syscall-entry path does not save them we save them here instead.  Note: we
628.1312 +	 * don't need to save any other registers that are not saved by the stream-lined
628.1313 +	 * syscall path, because restore_sigcontext() restores them.
628.1314 +	 */
628.1315 +	adds r16=PT(F6)+32,sp
628.1316 +	adds r17=PT(F7)+32,sp
628.1317 +	;;
628.1318 + 	stf.spill [r16]=f6,32
628.1319 + 	stf.spill [r17]=f7,32
628.1320 +	;;
628.1321 + 	stf.spill [r16]=f8,32
628.1322 + 	stf.spill [r17]=f9,32
628.1323 +	;;
628.1324 + 	stf.spill [r16]=f10
628.1325 + 	stf.spill [r17]=f11
628.1326 +	adds out0=16,sp				// out0 = &sigscratch
628.1327 +	br.call.sptk.many rp=ia64_rt_sigreturn
628.1328 +.ret19:	.restore sp 0
628.1329 +	adds sp=16,sp
628.1330 +	;;
628.1331 +	ld8 r9=[sp]				// load new ar.unat
628.1332 +	mov.sptk b7=r8,ia64_leave_kernel
628.1333 +	;;
628.1334 +	mov ar.unat=r9
628.1335 +	br.many b7
628.1336 +END(sys_rt_sigreturn)
628.1337 +#endif
628.1338 +
628.1339 +GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
628.1340 +	.prologue
628.1341 +	/*
628.1342 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
628.1343 +	 */
628.1344 +	mov r16=r0
628.1345 +	DO_SAVE_SWITCH_STACK
628.1346 +	br.call.sptk.many rp=ia64_handle_unaligned	// stack frame setup in ivt
628.1347 +.ret21:	.body
628.1348 +	DO_LOAD_SWITCH_STACK
628.1349 +	br.cond.sptk.many rp				// goes to ia64_leave_kernel
628.1350 +END(ia64_prepare_handle_unaligned)
628.1351 +
628.1352 +#ifndef XEN
628.1353 +	//
628.1354 +	// unw_init_running(void (*callback)(info, arg), void *arg)
628.1355 +	//
628.1356 +#	define EXTRA_FRAME_SIZE	((UNW_FRAME_INFO_SIZE+15)&~15)
628.1357 +
628.1358 +GLOBAL_ENTRY(unw_init_running)
628.1359 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
628.1360 +	alloc loc1=ar.pfs,2,3,3,0
628.1361 +	;;
628.1362 +	ld8 loc2=[in0],8
628.1363 +	mov loc0=rp
628.1364 +	mov r16=loc1
628.1365 +	DO_SAVE_SWITCH_STACK
628.1366 +	.body
628.1367 +
628.1368 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
628.1369 +	.fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
628.1370 +	SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
628.1371 +	adds sp=-EXTRA_FRAME_SIZE,sp
628.1372 +	.body
628.1373 +	;;
628.1374 +	adds out0=16,sp				// &info
628.1375 +	mov out1=r13				// current
628.1376 +	adds out2=16+EXTRA_FRAME_SIZE,sp	// &switch_stack
628.1377 +	br.call.sptk.many rp=unw_init_frame_info
628.1378 +1:	adds out0=16,sp				// &info
628.1379 +	mov b6=loc2
628.1380 +	mov loc2=gp				// save gp across indirect function call
628.1381 +	;;
628.1382 +	ld8 gp=[in0]
628.1383 +	mov out1=in1				// arg
628.1384 +	br.call.sptk.many rp=b6			// invoke the callback function
628.1385 +1:	mov gp=loc2				// restore gp
628.1386 +
628.1387 +	// For now, we don't allow changing registers from within
628.1388 +	// unw_init_running; if we ever want to allow that, we'd
628.1389 +	// have to do a load_switch_stack here:
628.1390 +	.restore sp
628.1391 +	adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
628.1392 +
628.1393 +	mov ar.pfs=loc1
628.1394 +	mov rp=loc0
628.1395 +	br.ret.sptk.many rp
628.1396 +END(unw_init_running)
628.1397 +
628.1398 +	.rodata
628.1399 +	.align 8
628.1400 +	.globl sys_call_table
628.1401 +sys_call_table:
628.1402 +	data8 sys_ni_syscall		//  This must be sys_ni_syscall!  See ivt.S.
628.1403 +	data8 sys_exit				// 1025
628.1404 +	data8 sys_read
628.1405 +	data8 sys_write
628.1406 +	data8 sys_open
628.1407 +	data8 sys_close
628.1408 +	data8 sys_creat				// 1030
628.1409 +	data8 sys_link
628.1410 +	data8 sys_unlink
628.1411 +	data8 ia64_execve
628.1412 +	data8 sys_chdir
628.1413 +	data8 sys_fchdir			// 1035
628.1414 +	data8 sys_utimes
628.1415 +	data8 sys_mknod
628.1416 +	data8 sys_chmod
628.1417 +	data8 sys_chown
628.1418 +	data8 sys_lseek				// 1040
628.1419 +	data8 sys_getpid
628.1420 +	data8 sys_getppid
628.1421 +	data8 sys_mount
628.1422 +	data8 sys_umount
628.1423 +	data8 sys_setuid			// 1045
628.1424 +	data8 sys_getuid
628.1425 +	data8 sys_geteuid
628.1426 +	data8 sys_ptrace
628.1427 +	data8 sys_access
628.1428 +	data8 sys_sync				// 1050
628.1429 +	data8 sys_fsync
628.1430 +	data8 sys_fdatasync
628.1431 +	data8 sys_kill
628.1432 +	data8 sys_rename
628.1433 +	data8 sys_mkdir				// 1055
628.1434 +	data8 sys_rmdir
628.1435 +	data8 sys_dup
628.1436 +	data8 sys_pipe
628.1437 +	data8 sys_times
628.1438 +	data8 ia64_brk				// 1060
628.1439 +	data8 sys_setgid
628.1440 +	data8 sys_getgid
628.1441 +	data8 sys_getegid
628.1442 +	data8 sys_acct
628.1443 +	data8 sys_ioctl				// 1065
628.1444 +	data8 sys_fcntl
628.1445 +	data8 sys_umask
628.1446 +	data8 sys_chroot
628.1447 +	data8 sys_ustat
628.1448 +	data8 sys_dup2				// 1070
628.1449 +	data8 sys_setreuid
628.1450 +	data8 sys_setregid
628.1451 +	data8 sys_getresuid
628.1452 +	data8 sys_setresuid
628.1453 +	data8 sys_getresgid			// 1075
628.1454 +	data8 sys_setresgid
628.1455 +	data8 sys_getgroups
628.1456 +	data8 sys_setgroups
628.1457 +	data8 sys_getpgid
628.1458 +	data8 sys_setpgid			// 1080
628.1459 +	data8 sys_setsid
628.1460 +	data8 sys_getsid
628.1461 +	data8 sys_sethostname
628.1462 +	data8 sys_setrlimit
628.1463 +	data8 sys_getrlimit			// 1085
628.1464 +	data8 sys_getrusage
628.1465 +	data8 sys_gettimeofday
628.1466 +	data8 sys_settimeofday
628.1467 +	data8 sys_select
628.1468 +	data8 sys_poll				// 1090
628.1469 +	data8 sys_symlink
628.1470 +	data8 sys_readlink
628.1471 +	data8 sys_uselib
628.1472 +	data8 sys_swapon
628.1473 +	data8 sys_swapoff			// 1095
628.1474 +	data8 sys_reboot
628.1475 +	data8 sys_truncate
628.1476 +	data8 sys_ftruncate
628.1477 +	data8 sys_fchmod
628.1478 +	data8 sys_fchown			// 1100
628.1479 +	data8 ia64_getpriority
628.1480 +	data8 sys_setpriority
628.1481 +	data8 sys_statfs
628.1482 +	data8 sys_fstatfs
628.1483 +	data8 sys_gettid			// 1105
628.1484 +	data8 sys_semget
628.1485 +	data8 sys_semop
628.1486 +	data8 sys_semctl
628.1487 +	data8 sys_msgget
628.1488 +	data8 sys_msgsnd			// 1110
628.1489 +	data8 sys_msgrcv
628.1490 +	data8 sys_msgctl
628.1491 +	data8 sys_shmget
628.1492 +	data8 ia64_shmat
628.1493 +	data8 sys_shmdt				// 1115
628.1494 +	data8 sys_shmctl
628.1495 +	data8 sys_syslog
628.1496 +	data8 sys_setitimer
628.1497 +	data8 sys_getitimer
628.1498 +	data8 sys_ni_syscall			// 1120		/* was: ia64_oldstat */
628.1499 +	data8 sys_ni_syscall					/* was: ia64_oldlstat */
628.1500 +	data8 sys_ni_syscall					/* was: ia64_oldfstat */
628.1501 +	data8 sys_vhangup
628.1502 +	data8 sys_lchown
628.1503 +	data8 sys_remap_file_pages		// 1125
628.1504 +	data8 sys_wait4
628.1505 +	data8 sys_sysinfo
628.1506 +	data8 sys_clone
628.1507 +	data8 sys_setdomainname
628.1508 +	data8 sys_newuname			// 1130
628.1509 +	data8 sys_adjtimex
628.1510 +	data8 sys_ni_syscall					/* was: ia64_create_module */
628.1511 +	data8 sys_init_module
628.1512 +	data8 sys_delete_module
628.1513 +	data8 sys_ni_syscall			// 1135		/* was: sys_get_kernel_syms */
628.1514 +	data8 sys_ni_syscall					/* was: sys_query_module */
628.1515 +	data8 sys_quotactl
628.1516 +	data8 sys_bdflush
628.1517 +	data8 sys_sysfs
628.1518 +	data8 sys_personality			// 1140
628.1519 +	data8 sys_ni_syscall		// sys_afs_syscall
628.1520 +	data8 sys_setfsuid
628.1521 +	data8 sys_setfsgid
628.1522 +	data8 sys_getdents
628.1523 +	data8 sys_flock				// 1145
628.1524 +	data8 sys_readv
628.1525 +	data8 sys_writev
628.1526 +	data8 sys_pread64
628.1527 +	data8 sys_pwrite64
628.1528 +	data8 sys_sysctl			// 1150
628.1529 +	data8 sys_mmap
628.1530 +	data8 sys_munmap
628.1531 +	data8 sys_mlock
628.1532 +	data8 sys_mlockall
628.1533 +	data8 sys_mprotect			// 1155
628.1534 +	data8 ia64_mremap
628.1535 +	data8 sys_msync
628.1536 +	data8 sys_munlock
628.1537 +	data8 sys_munlockall
628.1538 +	data8 sys_sched_getparam		// 1160
628.1539 +	data8 sys_sched_setparam
628.1540 +	data8 sys_sched_getscheduler
628.1541 +	data8 sys_sched_setscheduler
628.1542 +	data8 sys_sched_yield
628.1543 +	data8 sys_sched_get_priority_max	// 1165
628.1544 +	data8 sys_sched_get_priority_min
628.1545 +	data8 sys_sched_rr_get_interval
628.1546 +	data8 sys_nanosleep
628.1547 +	data8 sys_nfsservctl
628.1548 +	data8 sys_prctl				// 1170
628.1549 +	data8 sys_getpagesize
628.1550 +	data8 sys_mmap2
628.1551 +	data8 sys_pciconfig_read
628.1552 +	data8 sys_pciconfig_write
628.1553 +	data8 sys_perfmonctl			// 1175
628.1554 +	data8 sys_sigaltstack
628.1555 +	data8 sys_rt_sigaction
628.1556 +	data8 sys_rt_sigpending
628.1557 +	data8 sys_rt_sigprocmask
628.1558 +	data8 sys_rt_sigqueueinfo		// 1180
628.1559 +	data8 sys_rt_sigreturn
628.1560 +	data8 sys_rt_sigsuspend
628.1561 +	data8 sys_rt_sigtimedwait
628.1562 +	data8 sys_getcwd
628.1563 +	data8 sys_capget			// 1185
628.1564 +	data8 sys_capset
628.1565 +	data8 sys_sendfile64
628.1566 +	data8 sys_ni_syscall		// sys_getpmsg (STREAMS)
628.1567 +	data8 sys_ni_syscall		// sys_putpmsg (STREAMS)
628.1568 +	data8 sys_socket			// 1190
628.1569 +	data8 sys_bind
628.1570 +	data8 sys_connect
628.1571 +	data8 sys_listen
628.1572 +	data8 sys_accept
628.1573 +	data8 sys_getsockname			// 1195
628.1574 +	data8 sys_getpeername
628.1575 +	data8 sys_socketpair
628.1576 +	data8 sys_send
628.1577 +	data8 sys_sendto
628.1578 +	data8 sys_recv				// 1200
628.1579 +	data8 sys_recvfrom
628.1580 +	data8 sys_shutdown
628.1581 +	data8 sys_setsockopt
628.1582 +	data8 sys_getsockopt
628.1583 +	data8 sys_sendmsg			// 1205
628.1584 +	data8 sys_recvmsg
628.1585 +	data8 sys_pivot_root
628.1586 +	data8 sys_mincore
628.1587 +	data8 sys_madvise
628.1588 +	data8 sys_newstat			// 1210
628.1589 +	data8 sys_newlstat
628.1590 +	data8 sys_newfstat
628.1591 +	data8 sys_clone2
628.1592 +	data8 sys_getdents64
628.1593 +	data8 sys_getunwind			// 1215
628.1594 +	data8 sys_readahead
628.1595 +	data8 sys_setxattr
628.1596 +	data8 sys_lsetxattr
628.1597 +	data8 sys_fsetxattr
628.1598 +	data8 sys_getxattr			// 1220
628.1599 +	data8 sys_lgetxattr
628.1600 +	data8 sys_fgetxattr
628.1601 +	data8 sys_listxattr
628.1602 +	data8 sys_llistxattr
628.1603 +	data8 sys_flistxattr			// 1225
628.1604 +	data8 sys_removexattr
628.1605 +	data8 sys_lremovexattr
628.1606 +	data8 sys_fremovexattr
628.1607 +	data8 sys_tkill
628.1608 +	data8 sys_futex				// 1230
628.1609 +	data8 sys_sched_setaffinity
628.1610 +	data8 sys_sched_getaffinity
628.1611 +	data8 sys_set_tid_address
628.1612 +	data8 sys_fadvise64_64
628.1613 +	data8 sys_tgkill 			// 1235
628.1614 +	data8 sys_exit_group
628.1615 +	data8 sys_lookup_dcookie
628.1616 +	data8 sys_io_setup
628.1617 +	data8 sys_io_destroy
628.1618 +	data8 sys_io_getevents			// 1240
628.1619 +	data8 sys_io_submit
628.1620 +	data8 sys_io_cancel
628.1621 +	data8 sys_epoll_create
628.1622 +	data8 sys_epoll_ctl
628.1623 +	data8 sys_epoll_wait			// 1245
628.1624 +	data8 sys_restart_syscall
628.1625 +	data8 sys_semtimedop
628.1626 +	data8 sys_timer_create
628.1627 +	data8 sys_timer_settime
628.1628 +	data8 sys_timer_gettime			// 1250
628.1629 +	data8 sys_timer_getoverrun
628.1630 +	data8 sys_timer_delete
628.1631 +	data8 sys_clock_settime
628.1632 +	data8 sys_clock_gettime
628.1633 +	data8 sys_clock_getres			// 1255
628.1634 +	data8 sys_clock_nanosleep
628.1635 +	data8 sys_fstatfs64
628.1636 +	data8 sys_statfs64
628.1637 +	data8 sys_mbind
628.1638 +	data8 sys_get_mempolicy			// 1260
628.1639 +	data8 sys_set_mempolicy
628.1640 +	data8 sys_mq_open
628.1641 +	data8 sys_mq_unlink
628.1642 +	data8 sys_mq_timedsend
628.1643 +	data8 sys_mq_timedreceive		// 1265
628.1644 +	data8 sys_mq_notify
628.1645 +	data8 sys_mq_getsetattr
628.1646 +	data8 sys_ni_syscall			// reserved for kexec_load
628.1647 +	data8 sys_ni_syscall			// reserved for vserver
628.1648 +	data8 sys_waitid			// 1270
628.1649 +	data8 sys_add_key
628.1650 +	data8 sys_request_key
628.1651 +	data8 sys_keyctl
628.1652 +	data8 sys_ni_syscall
628.1653 +	data8 sys_ni_syscall			// 1275
628.1654 +	data8 sys_ni_syscall
628.1655 +	data8 sys_ni_syscall
628.1656 +	data8 sys_ni_syscall
628.1657 +	data8 sys_ni_syscall
628.1658 +
628.1659 +	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
628.1660 +#endif
   629.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   629.2 +++ b/xen/arch/ia64/linux-xen/entry.h	Fri Aug 26 09:05:43 2005 +0000
   629.3 @@ -0,0 +1,97 @@
   629.4 +#include <linux/config.h>
   629.5 +
   629.6 +/*
   629.7 + * Preserved registers that are shared between code in ivt.S and
   629.8 + * entry.S.  Be careful not to step on these!
   629.9 + */
  629.10 +#define PRED_LEAVE_SYSCALL	1 /* TRUE iff leave from syscall */
  629.11 +#define PRED_KERNEL_STACK	2 /* returning to kernel-stacks? */
  629.12 +#define PRED_USER_STACK		3 /* returning to user-stacks? */
  629.13 +#ifdef CONFIG_VTI
  629.14 +#define PRED_EMUL		2 /* Need to save r4-r7 for inst emulation */
  629.15 +#define PRED_NON_EMUL		3 /* No need to save r4-r7 for normal path */
  629.16 +#define PRED_BN0		6 /* Guest is in bank 0 */
  629.17 +#define PRED_BN1		7 /* Guest is in bank 1 */
  629.18 +#endif // CONFIG_VTI
  629.19 +#define PRED_SYSCALL		4 /* inside a system call? */
  629.20 +#define PRED_NON_SYSCALL	5 /* complement of PRED_SYSCALL */
  629.21 +
  629.22 +#ifdef __ASSEMBLY__
  629.23 +# define PASTE2(x,y)	x##y
  629.24 +# define PASTE(x,y)	PASTE2(x,y)
  629.25 +
  629.26 +# define pLvSys		PASTE(p,PRED_LEAVE_SYSCALL)
  629.27 +# define pKStk		PASTE(p,PRED_KERNEL_STACK)
  629.28 +# define pUStk		PASTE(p,PRED_USER_STACK)
  629.29 +#ifdef CONFIG_VTI
  629.30 +# define pEml		PASTE(p,PRED_EMUL)
  629.31 +# define pNonEml	PASTE(p,PRED_NON_EMUL)
  629.32 +# define pBN0		PASTE(p,PRED_BN0)
  629.33 +# define pBN1		PASTE(p,PRED_BN1)
  629.34 +#endif // CONFIG_VTI
  629.35 +# define pSys		PASTE(p,PRED_SYSCALL)
  629.36 +# define pNonSys	PASTE(p,PRED_NON_SYSCALL)
  629.37 +#endif
  629.38 +
  629.39 +#define PT(f)		(IA64_PT_REGS_##f##_OFFSET)
  629.40 +#define SW(f)		(IA64_SWITCH_STACK_##f##_OFFSET)
  629.41 +#ifdef CONFIG_VTI
  629.42 +#define VPD(f)      (VPD_##f##_START_OFFSET)
  629.43 +#endif // CONFIG_VTI
  629.44 +
  629.45 +#define PT_REGS_SAVES(off)			\
  629.46 +	.unwabi 3, 'i';				\
  629.47 +	.fframe IA64_PT_REGS_SIZE+16+(off);	\
  629.48 +	.spillsp rp, PT(CR_IIP)+16+(off);	\
  629.49 +	.spillsp ar.pfs, PT(CR_IFS)+16+(off);	\
  629.50 +	.spillsp ar.unat, PT(AR_UNAT)+16+(off);	\
  629.51 +	.spillsp ar.fpsr, PT(AR_FPSR)+16+(off);	\
  629.52 +	.spillsp pr, PT(PR)+16+(off);
  629.53 +
  629.54 +#define PT_REGS_UNWIND_INFO(off)		\
  629.55 +	.prologue;				\
  629.56 +	PT_REGS_SAVES(off);			\
  629.57 +	.body
  629.58 +
  629.59 +#define SWITCH_STACK_SAVES(off)							\
  629.60 +	.savesp ar.unat,SW(CALLER_UNAT)+16+(off);				\
  629.61 +	.savesp ar.fpsr,SW(AR_FPSR)+16+(off);					\
  629.62 +	.spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off);		\
  629.63 +	.spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off);		\
  629.64 +	.spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off);		\
  629.65 +	.spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off);		\
  629.66 +	.spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off);		\
  629.67 +	.spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off);		\
  629.68 +	.spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off);		\
  629.69 +	.spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off);		\
  629.70 +	.spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off);		\
  629.71 +	.spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off);		\
  629.72 +	.spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off);		\
  629.73 +	.spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off);		\
  629.74 +	.spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off);		\
  629.75 +	.spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off);		\
  629.76 +	.spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off);		\
  629.77 +	.spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off);	\
  629.78 +	.spillsp @priunat,SW(AR_UNAT)+16+(off);					\
  629.79 +	.spillsp ar.rnat,SW(AR_RNAT)+16+(off);					\
  629.80 +	.spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);				\
  629.81 +	.spillsp pr,SW(PR)+16+(off))
  629.82 +
  629.83 +#define DO_SAVE_SWITCH_STACK			\
  629.84 +	movl r28=1f;				\
  629.85 +	;;					\
  629.86 +	.fframe IA64_SWITCH_STACK_SIZE;		\
  629.87 +	adds sp=-IA64_SWITCH_STACK_SIZE,sp;	\
  629.88 +	mov.ret.sptk b7=r28,1f;			\
  629.89 +	SWITCH_STACK_SAVES(0);			\
  629.90 +	br.cond.sptk.many save_switch_stack;	\
  629.91 +1:
  629.92 +
  629.93 +#define DO_LOAD_SWITCH_STACK			\
  629.94 +	movl r28=1f;				\
  629.95 +	;;					\
  629.96 +	invala;					\
  629.97 +	mov.ret.sptk b7=r28,1f;			\
  629.98 +	br.cond.sptk.many load_switch_stack;	\
  629.99 +1:	.restore sp;				\
 629.100 +	adds sp=IA64_SWITCH_STACK_SIZE,sp
   630.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   630.2 +++ b/xen/arch/ia64/linux-xen/head.S	Fri Aug 26 09:05:43 2005 +0000
   630.3 @@ -0,0 +1,1026 @@
   630.4 +/*
   630.5 + * Here is where the ball gets rolling as far as the kernel is concerned.
   630.6 + * When control is transferred to _start, the bootload has already
   630.7 + * loaded us to the correct address.  All that's left to do here is
   630.8 + * to set up the kernel's global pointer and jump to the kernel
   630.9 + * entry point.
  630.10 + *
  630.11 + * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
  630.12 + *	David Mosberger-Tang <davidm@hpl.hp.com>
  630.13 + *	Stephane Eranian <eranian@hpl.hp.com>
  630.14 + * Copyright (C) 1999 VA Linux Systems
  630.15 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  630.16 + * Copyright (C) 1999 Intel Corp.
  630.17 + * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
  630.18 + * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
  630.19 + * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
  630.20 + *   -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
  630.21 + */
  630.22 +
  630.23 +#include <linux/config.h>
  630.24 +
  630.25 +#include <asm/asmmacro.h>
  630.26 +#include <asm/fpu.h>
  630.27 +#include <asm/kregs.h>
  630.28 +#include <asm/mmu_context.h>
  630.29 +#include <asm/offsets.h>
  630.30 +#include <asm/pal.h>
  630.31 +#include <asm/pgtable.h>
  630.32 +#include <asm/processor.h>
  630.33 +#include <asm/ptrace.h>
  630.34 +#include <asm/system.h>
  630.35 +
  630.36 +	.section __special_page_section,"ax"
  630.37 +
  630.38 +	.global empty_zero_page
  630.39 +empty_zero_page:
  630.40 +	.skip PAGE_SIZE
  630.41 +
  630.42 +	.global swapper_pg_dir
  630.43 +swapper_pg_dir:
  630.44 +	.skip PAGE_SIZE
  630.45 +
  630.46 +	.rodata
  630.47 +halt_msg:
  630.48 +	stringz "Halting kernel\n"
  630.49 +
  630.50 +	.text
  630.51 +
  630.52 +	.global start_ap
  630.53 +
  630.54 +	/*
  630.55 +	 * Start the kernel.  When the bootloader passes control to _start(), r28
  630.56 +	 * points to the address of the boot parameter area.  Execution reaches
  630.57 +	 * here in physical mode.
  630.58 +	 */
  630.59 +GLOBAL_ENTRY(_start)
  630.60 +start_ap:
  630.61 +	.prologue
  630.62 +	.save rp, r0		// terminate unwind chain with a NULL rp
  630.63 +	.body
  630.64 +
  630.65 +	rsm psr.i | psr.ic
  630.66 +	;;
  630.67 +	srlz.i
  630.68 +	;;
  630.69 +	/*
  630.70 +	 * Initialize kernel region registers:
  630.71 +	 *	rr[0]: VHPT enabled, page size = PAGE_SHIFT
  630.72 +	 *	rr[1]: VHPT enabled, page size = PAGE_SHIFT
  630.73 +	 *	rr[2]: VHPT enabled, page size = PAGE_SHIFT
  630.74 +	 *	rr[3]: VHPT enabled, page size = PAGE_SHIFT
  630.75 +	 *	rr[4]: VHPT enabled, page size = PAGE_SHIFT
  630.76 +	 *	rr[5]: VHPT enabled, page size = PAGE_SHIFT
  630.77 +	 *	rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
  630.78 +	 *	rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
  630.79 +	 * We initialize all of them to prevent inadvertently assuming
  630.80 +	 * something about the state of address translation early in boot.
  630.81 +	 */
  630.82 +	movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
  630.83 +	movl r7=(0<<61)
  630.84 +	movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
  630.85 +	movl r9=(1<<61)
  630.86 +	movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
  630.87 +	movl r11=(2<<61)
  630.88 +	movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
  630.89 +	movl r13=(3<<61)
  630.90 +	movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
  630.91 +	movl r15=(4<<61)
  630.92 +	movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
  630.93 +	movl r17=(5<<61)
  630.94 +	movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
  630.95 +	movl r19=(6<<61)
  630.96 +	movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
  630.97 +	movl r21=(7<<61)
  630.98 +	;;
  630.99 +	mov rr[r7]=r6
 630.100 +	mov rr[r9]=r8
 630.101 +	mov rr[r11]=r10
 630.102 +	mov rr[r13]=r12
 630.103 +	mov rr[r15]=r14
 630.104 +	mov rr[r17]=r16
 630.105 +	mov rr[r19]=r18
 630.106 +	mov rr[r21]=r20
 630.107 +	;;
 630.108 +	/*
 630.109 +	 * Now pin mappings into the TLB for kernel text and data
 630.110 +	 */
 630.111 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
 630.112 +	movl r17=KERNEL_START
 630.113 +	;;
 630.114 +	mov cr.itir=r18
 630.115 +	mov cr.ifa=r17
 630.116 +	mov r16=IA64_TR_KERNEL
 630.117 +	mov r3=ip
 630.118 +	movl r18=PAGE_KERNEL
 630.119 +	;;
 630.120 +	dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
 630.121 +	;;
 630.122 +	or r18=r2,r18
 630.123 +	;;
 630.124 +	srlz.i
 630.125 +	;;
 630.126 +	itr.i itr[r16]=r18
 630.127 +	;;
 630.128 +	itr.d dtr[r16]=r18
 630.129 +	;;
 630.130 +	srlz.i
 630.131 +
 630.132 +	/*
 630.133 +	 * Switch into virtual mode:
 630.134 +	 */
 630.135 +#ifdef CONFIG_VTI
 630.136 +	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
 630.137 +		  |IA64_PSR_DI)
 630.138 +#else // CONFIG_VTI
 630.139 +	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
 630.140 +		  |IA64_PSR_DI)
 630.141 +#endif // CONFIG_VTI
 630.142 +	;;
 630.143 +	mov cr.ipsr=r16
 630.144 +	movl r17=1f
 630.145 +	;;
 630.146 +	mov cr.iip=r17
 630.147 +	mov cr.ifs=r0
 630.148 +	;;
 630.149 +	rfi
 630.150 +	;;
 630.151 +1:	// now we are in virtual mode
 630.152 +
 630.153 +	// set IVT entry point---can't access I/O ports without it
 630.154 +#ifdef CONFIG_VTI
 630.155 +    movl r3=vmx_ia64_ivt
 630.156 +#else // CONFIG_VTI
 630.157 +	movl r3=ia64_ivt
 630.158 +#endif // CONFIG_VTI
 630.159 +	;;
 630.160 +	mov cr.iva=r3
 630.161 +	movl r2=FPSR_DEFAULT
 630.162 +	;;
 630.163 +	srlz.i
 630.164 +	movl gp=__gp
 630.165 +
 630.166 +	mov ar.fpsr=r2
 630.167 +	;;
 630.168 +
 630.169 +#define isAP	p2	// are we an Application Processor?
 630.170 +#define isBP	p3	// are we the Bootstrap Processor?
 630.171 +
 630.172 +#ifdef CONFIG_SMP
 630.173 +	/*
 630.174 +	 * Find the init_task for the currently booting CPU.  At poweron, and in
 630.175 +	 * UP mode, task_for_booting_cpu is NULL.
 630.176 +	 */
 630.177 +	movl r3=task_for_booting_cpu
 630.178 + 	;;
 630.179 +	ld8 r3=[r3]
 630.180 +	movl r2=init_task
 630.181 +	;;
 630.182 +	cmp.eq isBP,isAP=r3,r0
 630.183 +	;;
 630.184 +(isAP)	mov r2=r3
 630.185 +#else
 630.186 +	movl r2=init_task
 630.187 +	cmp.eq isBP,isAP=r0,r0
 630.188 +#endif
 630.189 +	;;
 630.190 +	tpa r3=r2		// r3 == phys addr of task struct
 630.191 +	mov r16=-1
 630.192 +(isBP)	br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it
 630.193 +
 630.194 +	// load mapping for stack (virtaddr in r2, physaddr in r3)
 630.195 +	rsm psr.ic
 630.196 +	movl r17=PAGE_KERNEL
 630.197 +	;;
 630.198 +	srlz.d
 630.199 +	dep r18=0,r3,0,12
 630.200 +	;;
 630.201 +	or r18=r17,r18
 630.202 +#ifdef XEN
 630.203 +	dep r2=-1,r3,60,4	// IMVA of task
 630.204 +#else
 630.205 +	dep r2=-1,r3,61,3	// IMVA of task
 630.206 +#endif
 630.207 +	;;
 630.208 +	mov r17=rr[r2]
 630.209 +	shr.u r16=r3,IA64_GRANULE_SHIFT
 630.210 +	;;
 630.211 +	dep r17=0,r17,8,24
 630.212 +	;;
 630.213 +	mov cr.itir=r17
 630.214 +	mov cr.ifa=r2
 630.215 +
 630.216 +	mov r19=IA64_TR_CURRENT_STACK
 630.217 +	;;
 630.218 +	itr.d dtr[r19]=r18
 630.219 +	;;
 630.220 +	ssm psr.ic
 630.221 +	srlz.d
 630.222 +  	;;
 630.223 +
 630.224 +.load_current:
 630.225 +	// load the "current" pointer (r13) and ar.k6 with the current task
 630.226 +#ifdef CONFIG_VTI
 630.227 +	mov r21=r2		// virtual address
 630.228 +	;;
 630.229 +	bsw.1
 630.230 +	;;
 630.231 +#else // CONFIG_VTI
 630.232 +	mov IA64_KR(CURRENT)=r2
 630.233 +	mov IA64_KR(CURRENT_STACK)=r16
 630.234 +#endif // CONFIG_VTI
 630.235 +	mov r13=r2
 630.236 +	/*
 630.237 +	 * Reserve space at the top of the stack for "struct pt_regs".  Kernel threads
 630.238 +	 * don't store interesting values in that structure, but the space still needs
 630.239 +	 * to be there because time-critical stuff such as the context switching can
 630.240 +	 * be implemented more efficiently (for example, __switch_to()
 630.241 +	 * always sets the psr.dfh bit of the task it is switching to).
 630.242 +	 */
 630.243 +	addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
 630.244 +	addl r2=IA64_RBS_OFFSET,r2	// initialize the RSE
 630.245 +	mov ar.rsc=0		// place RSE in enforced lazy mode
 630.246 +	;;
 630.247 +	loadrs			// clear the dirty partition
 630.248 +	;;
 630.249 +	mov ar.bspstore=r2	// establish the new RSE stack
 630.250 +	;;
 630.251 +	mov ar.rsc=0x3		// place RSE in eager mode
 630.252 +
 630.253 +#ifdef XEN
 630.254 +(isBP)	dep r28=-1,r28,60,4	// make address virtual
 630.255 +#else
 630.256 +(isBP)	dep r28=-1,r28,61,3	// make address virtual
 630.257 +#endif
 630.258 +(isBP)	movl r2=ia64_boot_param
 630.259 +	;;
 630.260 +(isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
 630.261 +
 630.262 +#ifdef CONFIG_SMP
 630.263 +(isAP)	br.call.sptk.many rp=start_secondary
 630.264 +.ret0:
 630.265 +(isAP)	br.cond.sptk self
 630.266 +#endif
 630.267 +
 630.268 +	// This is executed by the bootstrap processor (bsp) only:
 630.269 +
 630.270 +#ifdef CONFIG_IA64_FW_EMU
 630.271 +	// initialize PAL & SAL emulator:
 630.272 +	br.call.sptk.many rp=sys_fw_init
 630.273 +.ret1:
 630.274 +#endif
 630.275 +	br.call.sptk.many rp=start_kernel
 630.276 +.ret2:	addl r3=@ltoff(halt_msg),gp
 630.277 +	;;
 630.278 +	alloc r2=ar.pfs,8,0,2,0
 630.279 +	;;
 630.280 +	ld8 out0=[r3]
 630.281 +	br.call.sptk.many b0=console_print
 630.282 +
 630.283 +self:	hint @pause
 630.284 +	;;
 630.285 +	br.sptk.many self		// endless loop
 630.286 +	;;
 630.287 +END(_start)
 630.288 +
 630.289 +GLOBAL_ENTRY(ia64_save_debug_regs)
 630.290 +	alloc r16=ar.pfs,1,0,0,0
 630.291 +	mov r20=ar.lc			// preserve ar.lc
 630.292 +	mov ar.lc=IA64_NUM_DBG_REGS-1
 630.293 +	mov r18=0
 630.294 +	add r19=IA64_NUM_DBG_REGS*8,in0
 630.295 +	;;
 630.296 +1:	mov r16=dbr[r18]
 630.297 +#ifdef CONFIG_ITANIUM
 630.298 +	;;
 630.299 +	srlz.d
 630.300 +#endif
 630.301 +	mov r17=ibr[r18]
 630.302 +	add r18=1,r18
 630.303 +	;;
 630.304 +	st8.nta [in0]=r16,8
 630.305 +	st8.nta [r19]=r17,8
 630.306 +	br.cloop.sptk.many 1b
 630.307 +	;;
 630.308 +	mov ar.lc=r20			// restore ar.lc
 630.309 +	br.ret.sptk.many rp
 630.310 +END(ia64_save_debug_regs)
 630.311 +
 630.312 +GLOBAL_ENTRY(ia64_load_debug_regs)
 630.313 +	alloc r16=ar.pfs,1,0,0,0
 630.314 +	lfetch.nta [in0]
 630.315 +	mov r20=ar.lc			// preserve ar.lc
 630.316 +	add r19=IA64_NUM_DBG_REGS*8,in0
 630.317 +	mov ar.lc=IA64_NUM_DBG_REGS-1
 630.318 +	mov r18=-1
 630.319 +	;;
 630.320 +1:	ld8.nta r16=[in0],8
 630.321 +	ld8.nta r17=[r19],8
 630.322 +	add r18=1,r18
 630.323 +	;;
 630.324 +	mov dbr[r18]=r16
 630.325 +#ifdef CONFIG_ITANIUM
 630.326 +	;;
 630.327 +	srlz.d				// Errata 132 (NoFix status)
 630.328 +#endif
 630.329 +	mov ibr[r18]=r17
 630.330 +	br.cloop.sptk.many 1b
 630.331 +	;;
 630.332 +	mov ar.lc=r20			// restore ar.lc
 630.333 +	br.ret.sptk.many rp
 630.334 +END(ia64_load_debug_regs)
 630.335 +
 630.336 +GLOBAL_ENTRY(__ia64_save_fpu)
 630.337 +	alloc r2=ar.pfs,1,4,0,0
 630.338 +	adds loc0=96*16-16,in0
 630.339 +	adds loc1=96*16-16-128,in0
 630.340 +	;;
 630.341 +	stf.spill.nta [loc0]=f127,-256
 630.342 +	stf.spill.nta [loc1]=f119,-256
 630.343 +	;;
 630.344 +	stf.spill.nta [loc0]=f111,-256
 630.345 +	stf.spill.nta [loc1]=f103,-256
 630.346 +	;;
 630.347 +	stf.spill.nta [loc0]=f95,-256
 630.348 +	stf.spill.nta [loc1]=f87,-256
 630.349 +	;;
 630.350 +	stf.spill.nta [loc0]=f79,-256
 630.351 +	stf.spill.nta [loc1]=f71,-256
 630.352 +	;;
 630.353 +	stf.spill.nta [loc0]=f63,-256
 630.354 +	stf.spill.nta [loc1]=f55,-256
 630.355 +	adds loc2=96*16-32,in0
 630.356 +	;;
 630.357 +	stf.spill.nta [loc0]=f47,-256
 630.358 +	stf.spill.nta [loc1]=f39,-256
 630.359 +	adds loc3=96*16-32-128,in0
 630.360 +	;;
 630.361 +	stf.spill.nta [loc2]=f126,-256
 630.362 +	stf.spill.nta [loc3]=f118,-256
 630.363 +	;;
 630.364 +	stf.spill.nta [loc2]=f110,-256
 630.365 +	stf.spill.nta [loc3]=f102,-256
 630.366 +	;;
 630.367 +	stf.spill.nta [loc2]=f94,-256
 630.368 +	stf.spill.nta [loc3]=f86,-256
 630.369 +	;;
 630.370 +	stf.spill.nta [loc2]=f78,-256
 630.371 +	stf.spill.nta [loc3]=f70,-256
 630.372 +	;;
 630.373 +	stf.spill.nta [loc2]=f62,-256
 630.374 +	stf.spill.nta [loc3]=f54,-256
 630.375 +	adds loc0=96*16-48,in0
 630.376 +	;;
 630.377 +	stf.spill.nta [loc2]=f46,-256
 630.378 +	stf.spill.nta [loc3]=f38,-256
 630.379 +	adds loc1=96*16-48-128,in0
 630.380 +	;;
 630.381 +	stf.spill.nta [loc0]=f125,-256
 630.382 +	stf.spill.nta [loc1]=f117,-256
 630.383 +	;;
 630.384 +	stf.spill.nta [loc0]=f109,-256
 630.385 +	stf.spill.nta [loc1]=f101,-256
 630.386 +	;;
 630.387 +	stf.spill.nta [loc0]=f93,-256
 630.388 +	stf.spill.nta [loc1]=f85,-256
 630.389 +	;;
 630.390 +	stf.spill.nta [loc0]=f77,-256
 630.391 +	stf.spill.nta [loc1]=f69,-256
 630.392 +	;;
 630.393 +	stf.spill.nta [loc0]=f61,-256
 630.394 +	stf.spill.nta [loc1]=f53,-256
 630.395 +	adds loc2=96*16-64,in0
 630.396 +	;;
 630.397 +	stf.spill.nta [loc0]=f45,-256
 630.398 +	stf.spill.nta [loc1]=f37,-256
 630.399 +	adds loc3=96*16-64-128,in0
 630.400 +	;;
 630.401 +	stf.spill.nta [loc2]=f124,-256
 630.402 +	stf.spill.nta [loc3]=f116,-256
 630.403 +	;;
 630.404 +	stf.spill.nta [loc2]=f108,-256
 630.405 +	stf.spill.nta [loc3]=f100,-256
 630.406 +	;;
 630.407 +	stf.spill.nta [loc2]=f92,-256
 630.408 +	stf.spill.nta [loc3]=f84,-256
 630.409 +	;;
 630.410 +	stf.spill.nta [loc2]=f76,-256
 630.411 +	stf.spill.nta [loc3]=f68,-256
 630.412 +	;;
 630.413 +	stf.spill.nta [loc2]=f60,-256
 630.414 +	stf.spill.nta [loc3]=f52,-256
 630.415 +	adds loc0=96*16-80,in0
 630.416 +	;;
 630.417 +	stf.spill.nta [loc2]=f44,-256
 630.418 +	stf.spill.nta [loc3]=f36,-256
 630.419 +	adds loc1=96*16-80-128,in0
 630.420 +	;;
 630.421 +	stf.spill.nta [loc0]=f123,-256
 630.422 +	stf.spill.nta [loc1]=f115,-256
 630.423 +	;;
 630.424 +	stf.spill.nta [loc0]=f107,-256
 630.425 +	stf.spill.nta [loc1]=f99,-256
 630.426 +	;;
 630.427 +	stf.spill.nta [loc0]=f91,-256
 630.428 +	stf.spill.nta [loc1]=f83,-256
 630.429 +	;;
 630.430 +	stf.spill.nta [loc0]=f75,-256
 630.431 +	stf.spill.nta [loc1]=f67,-256
 630.432 +	;;
 630.433 +	stf.spill.nta [loc0]=f59,-256
 630.434 +	stf.spill.nta [loc1]=f51,-256
 630.435 +	adds loc2=96*16-96,in0
 630.436 +	;;
 630.437 +	stf.spill.nta [loc0]=f43,-256
 630.438 +	stf.spill.nta [loc1]=f35,-256
 630.439 +	adds loc3=96*16-96-128,in0
 630.440 +	;;
 630.441 +	stf.spill.nta [loc2]=f122,-256
 630.442 +	stf.spill.nta [loc3]=f114,-256
 630.443 +	;;
 630.444 +	stf.spill.nta [loc2]=f106,-256
 630.445 +	stf.spill.nta [loc3]=f98,-256
 630.446 +	;;
 630.447 +	stf.spill.nta [loc2]=f90,-256
 630.448 +	stf.spill.nta [loc3]=f82,-256
 630.449 +	;;
 630.450 +	stf.spill.nta [loc2]=f74,-256
 630.451 +	stf.spill.nta [loc3]=f66,-256
 630.452 +	;;
 630.453 +	stf.spill.nta [loc2]=f58,-256
 630.454 +	stf.spill.nta [loc3]=f50,-256
 630.455 +	adds loc0=96*16-112,in0
 630.456 +	;;
 630.457 +	stf.spill.nta [loc2]=f42,-256
 630.458 +	stf.spill.nta [loc3]=f34,-256
 630.459 +	adds loc1=96*16-112-128,in0
 630.460 +	;;
 630.461 +	stf.spill.nta [loc0]=f121,-256
 630.462 +	stf.spill.nta [loc1]=f113,-256
 630.463 +	;;
 630.464 +	stf.spill.nta [loc0]=f105,-256
 630.465 +	stf.spill.nta [loc1]=f97,-256
 630.466 +	;;
 630.467 +	stf.spill.nta [loc0]=f89,-256
 630.468 +	stf.spill.nta [loc1]=f81,-256
 630.469 +	;;
 630.470 +	stf.spill.nta [loc0]=f73,-256
 630.471 +	stf.spill.nta [loc1]=f65,-256
 630.472 +	;;
 630.473 +	stf.spill.nta [loc0]=f57,-256
 630.474 +	stf.spill.nta [loc1]=f49,-256
 630.475 +	adds loc2=96*16-128,in0
 630.476 +	;;
 630.477 +	stf.spill.nta [loc0]=f41,-256
 630.478 +	stf.spill.nta [loc1]=f33,-256
 630.479 +	adds loc3=96*16-128-128,in0
 630.480 +	;;
 630.481 +	stf.spill.nta [loc2]=f120,-256
 630.482 +	stf.spill.nta [loc3]=f112,-256
 630.483 +	;;
 630.484 +	stf.spill.nta [loc2]=f104,-256
 630.485 +	stf.spill.nta [loc3]=f96,-256
 630.486 +	;;
 630.487 +	stf.spill.nta [loc2]=f88,-256
 630.488 +	stf.spill.nta [loc3]=f80,-256
 630.489 +	;;
 630.490 +	stf.spill.nta [loc2]=f72,-256
 630.491 +	stf.spill.nta [loc3]=f64,-256
 630.492 +	;;
 630.493 +	stf.spill.nta [loc2]=f56,-256
 630.494 +	stf.spill.nta [loc3]=f48,-256
 630.495 +	;;
 630.496 +	stf.spill.nta [loc2]=f40
 630.497 +	stf.spill.nta [loc3]=f32
 630.498 +	br.ret.sptk.many rp
 630.499 +END(__ia64_save_fpu)
 630.500 +
 630.501 +GLOBAL_ENTRY(__ia64_load_fpu)
 630.502 +	alloc r2=ar.pfs,1,2,0,0
 630.503 +	adds r3=128,in0
 630.504 +	adds r14=256,in0
 630.505 +	adds r15=384,in0
 630.506 +	mov loc0=512
 630.507 +	mov loc1=-1024+16
 630.508 +	;;
 630.509 +	ldf.fill.nta f32=[in0],loc0
 630.510 +	ldf.fill.nta f40=[ r3],loc0
 630.511 +	ldf.fill.nta f48=[r14],loc0
 630.512 +	ldf.fill.nta f56=[r15],loc0
 630.513 +	;;
 630.514 +	ldf.fill.nta f64=[in0],loc0
 630.515 +	ldf.fill.nta f72=[ r3],loc0
 630.516 +	ldf.fill.nta f80=[r14],loc0
 630.517 +	ldf.fill.nta f88=[r15],loc0
 630.518 +	;;
 630.519 +	ldf.fill.nta f96=[in0],loc1
 630.520 +	ldf.fill.nta f104=[ r3],loc1
 630.521 +	ldf.fill.nta f112=[r14],loc1
 630.522 +	ldf.fill.nta f120=[r15],loc1
 630.523 +	;;
 630.524 +	ldf.fill.nta f33=[in0],loc0
 630.525 +	ldf.fill.nta f41=[ r3],loc0
 630.526 +	ldf.fill.nta f49=[r14],loc0
 630.527 +	ldf.fill.nta f57=[r15],loc0
 630.528 +	;;
 630.529 +	ldf.fill.nta f65=[in0],loc0
 630.530 +	ldf.fill.nta f73=[ r3],loc0
 630.531 +	ldf.fill.nta f81=[r14],loc0
 630.532 +	ldf.fill.nta f89=[r15],loc0
 630.533 +	;;
 630.534 +	ldf.fill.nta f97=[in0],loc1
 630.535 +	ldf.fill.nta f105=[ r3],loc1
 630.536 +	ldf.fill.nta f113=[r14],loc1
 630.537 +	ldf.fill.nta f121=[r15],loc1
 630.538 +	;;
 630.539 +	ldf.fill.nta f34=[in0],loc0
 630.540 +	ldf.fill.nta f42=[ r3],loc0
 630.541 +	ldf.fill.nta f50=[r14],loc0
 630.542 +	ldf.fill.nta f58=[r15],loc0
 630.543 +	;;
 630.544 +	ldf.fill.nta f66=[in0],loc0
 630.545 +	ldf.fill.nta f74=[ r3],loc0
 630.546 +	ldf.fill.nta f82=[r14],loc0
 630.547 +	ldf.fill.nta f90=[r15],loc0
 630.548 +	;;
 630.549 +	ldf.fill.nta f98=[in0],loc1
 630.550 +	ldf.fill.nta f106=[ r3],loc1
 630.551 +	ldf.fill.nta f114=[r14],loc1
 630.552 +	ldf.fill.nta f122=[r15],loc1
 630.553 +	;;
 630.554 +	ldf.fill.nta f35=[in0],loc0
 630.555 +	ldf.fill.nta f43=[ r3],loc0
 630.556 +	ldf.fill.nta f51=[r14],loc0
 630.557 +	ldf.fill.nta f59=[r15],loc0
 630.558 +	;;
 630.559 +	ldf.fill.nta f67=[in0],loc0
 630.560 +	ldf.fill.nta f75=[ r3],loc0
 630.561 +	ldf.fill.nta f83=[r14],loc0
 630.562 +	ldf.fill.nta f91=[r15],loc0
 630.563 +	;;
 630.564 +	ldf.fill.nta f99=[in0],loc1
 630.565 +	ldf.fill.nta f107=[ r3],loc1
 630.566 +	ldf.fill.nta f115=[r14],loc1
 630.567 +	ldf.fill.nta f123=[r15],loc1
 630.568 +	;;
 630.569 +	ldf.fill.nta f36=[in0],loc0
 630.570 +	ldf.fill.nta f44=[ r3],loc0
 630.571 +	ldf.fill.nta f52=[r14],loc0
 630.572 +	ldf.fill.nta f60=[r15],loc0
 630.573 +	;;
 630.574 +	ldf.fill.nta f68=[in0],loc0
 630.575 +	ldf.fill.nta f76=[ r3],loc0
 630.576 +	ldf.fill.nta f84=[r14],loc0
 630.577 +	ldf.fill.nta f92=[r15],loc0
 630.578 +	;;
 630.579 +	ldf.fill.nta f100=[in0],loc1
 630.580 +	ldf.fill.nta f108=[ r3],loc1
 630.581 +	ldf.fill.nta f116=[r14],loc1
 630.582 +	ldf.fill.nta f124=[r15],loc1
 630.583 +	;;
 630.584 +	ldf.fill.nta f37=[in0],loc0
 630.585 +	ldf.fill.nta f45=[ r3],loc0
 630.586 +	ldf.fill.nta f53=[r14],loc0
 630.587 +	ldf.fill.nta f61=[r15],loc0
 630.588 +	;;
 630.589 +	ldf.fill.nta f69=[in0],loc0
 630.590 +	ldf.fill.nta f77=[ r3],loc0
 630.591 +	ldf.fill.nta f85=[r14],loc0
 630.592 +	ldf.fill.nta f93=[r15],loc0
 630.593 +	;;
 630.594 +	ldf.fill.nta f101=[in0],loc1
 630.595 +	ldf.fill.nta f109=[ r3],loc1
 630.596 +	ldf.fill.nta f117=[r14],loc1
 630.597 +	ldf.fill.nta f125=[r15],loc1
 630.598 +	;;
 630.599 +	ldf.fill.nta f38 =[in0],loc0
 630.600 +	ldf.fill.nta f46 =[ r3],loc0
 630.601 +	ldf.fill.nta f54 =[r14],loc0
 630.602 +	ldf.fill.nta f62 =[r15],loc0
 630.603 +	;;
 630.604 +	ldf.fill.nta f70 =[in0],loc0
 630.605 +	ldf.fill.nta f78 =[ r3],loc0
 630.606 +	ldf.fill.nta f86 =[r14],loc0
 630.607 +	ldf.fill.nta f94 =[r15],loc0
 630.608 +	;;
 630.609 +	ldf.fill.nta f102=[in0],loc1
 630.610 +	ldf.fill.nta f110=[ r3],loc1
 630.611 +	ldf.fill.nta f118=[r14],loc1
 630.612 +	ldf.fill.nta f126=[r15],loc1
 630.613 +	;;
 630.614 +	ldf.fill.nta f39 =[in0],loc0
 630.615 +	ldf.fill.nta f47 =[ r3],loc0
 630.616 +	ldf.fill.nta f55 =[r14],loc0
 630.617 +	ldf.fill.nta f63 =[r15],loc0
 630.618 +	;;
 630.619 +	ldf.fill.nta f71 =[in0],loc0
 630.620 +	ldf.fill.nta f79 =[ r3],loc0
 630.621 +	ldf.fill.nta f87 =[r14],loc0
 630.622 +	ldf.fill.nta f95 =[r15],loc0
 630.623 +	;;
 630.624 +	ldf.fill.nta f103=[in0]
 630.625 +	ldf.fill.nta f111=[ r3]
 630.626 +	ldf.fill.nta f119=[r14]
 630.627 +	ldf.fill.nta f127=[r15]
 630.628 +	br.ret.sptk.many rp
 630.629 +END(__ia64_load_fpu)
 630.630 +
 630.631 +GLOBAL_ENTRY(__ia64_init_fpu)
 630.632 +	stf.spill [sp]=f0		// M3
 630.633 +	mov	 f32=f0			// F
 630.634 +	nop.b	 0
 630.635 +
 630.636 +	ldfps	 f33,f34=[sp]		// M0
 630.637 +	ldfps	 f35,f36=[sp]		// M1
 630.638 +	mov      f37=f0			// F
 630.639 +	;;
 630.640 +
 630.641 +	setf.s	 f38=r0			// M2
 630.642 +	setf.s	 f39=r0			// M3
 630.643 +	mov      f40=f0			// F
 630.644 +
 630.645 +	ldfps	 f41,f42=[sp]		// M0
 630.646 +	ldfps	 f43,f44=[sp]		// M1
 630.647 +	mov      f45=f0			// F
 630.648 +
 630.649 +	setf.s	 f46=r0			// M2
 630.650 +	setf.s	 f47=r0			// M3
 630.651 +	mov      f48=f0			// F
 630.652 +
 630.653 +	ldfps	 f49,f50=[sp]		// M0
 630.654 +	ldfps	 f51,f52=[sp]		// M1
 630.655 +	mov      f53=f0			// F
 630.656 +
 630.657 +	setf.s	 f54=r0			// M2
 630.658 +	setf.s	 f55=r0			// M3
 630.659 +	mov      f56=f0			// F
 630.660 +
 630.661 +	ldfps	 f57,f58=[sp]		// M0
 630.662 +	ldfps	 f59,f60=[sp]		// M1
 630.663 +	mov      f61=f0			// F
 630.664 +
 630.665 +	setf.s	 f62=r0			// M2
 630.666 +	setf.s	 f63=r0			// M3
 630.667 +	mov      f64=f0			// F
 630.668 +
 630.669 +	ldfps	 f65,f66=[sp]		// M0
 630.670 +	ldfps	 f67,f68=[sp]		// M1
 630.671 +	mov      f69=f0			// F
 630.672 +
 630.673 +	setf.s	 f70=r0			// M2
 630.674 +	setf.s	 f71=r0			// M3
 630.675 +	mov      f72=f0			// F
 630.676 +
 630.677 +	ldfps	 f73,f74=[sp]		// M0
 630.678 +	ldfps	 f75,f76=[sp]		// M1
 630.679 +	mov      f77=f0			// F
 630.680 +
 630.681 +	setf.s	 f78=r0			// M2
 630.682 +	setf.s	 f79=r0			// M3
 630.683 +	mov      f80=f0			// F
 630.684 +
 630.685 +	ldfps	 f81,f82=[sp]		// M0
 630.686 +	ldfps	 f83,f84=[sp]		// M1
 630.687 +	mov      f85=f0			// F
 630.688 +
 630.689 +	setf.s	 f86=r0			// M2
 630.690 +	setf.s	 f87=r0			// M3
 630.691 +	mov      f88=f0			// F
 630.692 +
 630.693 +	/*
 630.694 +	 * When the instructions are cached, it would be faster to initialize
 630.695 +	 * the remaining registers with simply mov instructions (F-unit).
 630.696 +	 * This gets the time down to ~29 cycles.  However, this would use up
 630.697 +	 * 33 bundles, whereas continuing with the above pattern yields
 630.698 +	 * 10 bundles and ~30 cycles.
 630.699 +	 */
 630.700 +
 630.701 +	ldfps	 f89,f90=[sp]		// M0
 630.702 +	ldfps	 f91,f92=[sp]		// M1
 630.703 +	mov      f93=f0			// F
 630.704 +
 630.705 +	setf.s	 f94=r0			// M2
 630.706 +	setf.s	 f95=r0			// M3
 630.707 +	mov      f96=f0			// F
 630.708 +
 630.709 +	ldfps	 f97,f98=[sp]		// M0
 630.710 +	ldfps	 f99,f100=[sp]		// M1
 630.711 +	mov      f101=f0		// F
 630.712 +
 630.713 +	setf.s	 f102=r0		// M2
 630.714 +	setf.s	 f103=r0		// M3
 630.715 +	mov      f104=f0		// F
 630.716 +
 630.717 +	ldfps	 f105,f106=[sp]		// M0
 630.718 +	ldfps	 f107,f108=[sp]		// M1
 630.719 +	mov      f109=f0		// F
 630.720 +
 630.721 +	setf.s	 f110=r0		// M2
 630.722 +	setf.s	 f111=r0		// M3
 630.723 +	mov      f112=f0		// F
 630.724 +
 630.725 +	ldfps	 f113,f114=[sp]		// M0
 630.726 +	ldfps	 f115,f116=[sp]		// M1
 630.727 +	mov      f117=f0		// F
 630.728 +
 630.729 +	setf.s	 f118=r0		// M2
 630.730 +	setf.s	 f119=r0		// M3
 630.731 +	mov      f120=f0		// F
 630.732 +
 630.733 +	ldfps	 f121,f122=[sp]		// M0
 630.734 +	ldfps	 f123,f124=[sp]		// M1
 630.735 +	mov      f125=f0		// F
 630.736 +
 630.737 +	setf.s	 f126=r0		// M2
 630.738 +	setf.s	 f127=r0		// M3
 630.739 +	br.ret.sptk.many rp		// F
 630.740 +END(__ia64_init_fpu)
 630.741 +
 630.742 +/*
 630.743 + * Switch execution mode from virtual to physical
 630.744 + *
 630.745 + * Inputs:
 630.746 + *	r16 = new psr to establish
 630.747 + * Output:
 630.748 + *	r19 = old virtual address of ar.bsp
 630.749 + *	r20 = old virtual address of sp
 630.750 + *
 630.751 + * Note: RSE must already be in enforced lazy mode
 630.752 + */
 630.753 +GLOBAL_ENTRY(ia64_switch_mode_phys)
 630.754 + {
 630.755 +	alloc r2=ar.pfs,0,0,0,0
 630.756 +	rsm psr.i | psr.ic		// disable interrupts and interrupt collection
 630.757 +	mov r15=ip
 630.758 + }
 630.759 +	;;
 630.760 + {
 630.761 +	flushrs				// must be first insn in group
 630.762 +	srlz.i
 630.763 + }
 630.764 +	;;
 630.765 +	mov cr.ipsr=r16			// set new PSR
 630.766 +	add r3=1f-ia64_switch_mode_phys,r15
 630.767 +
 630.768 +	mov r19=ar.bsp
 630.769 +	mov r20=sp
 630.770 +	mov r14=rp			// get return address into a general register
 630.771 +	;;
 630.772 +
 630.773 +	// going to physical mode, use tpa to translate virt->phys
 630.774 +	tpa r17=r19
 630.775 +	tpa r3=r3
 630.776 +	tpa sp=sp
 630.777 +	tpa r14=r14
 630.778 +	;;
 630.779 +
 630.780 +	mov r18=ar.rnat			// save ar.rnat
 630.781 +	mov ar.bspstore=r17		// this steps on ar.rnat
 630.782 +	mov cr.iip=r3
 630.783 +	mov cr.ifs=r0
 630.784 +	;;
 630.785 +	mov ar.rnat=r18			// restore ar.rnat
 630.786 +	rfi				// must be last insn in group
 630.787 +	;;
 630.788 +1:	mov rp=r14
 630.789 +	br.ret.sptk.many rp
 630.790 +END(ia64_switch_mode_phys)
 630.791 +
 630.792 +/*
 630.793 + * Switch execution mode from physical to virtual
 630.794 + *
 630.795 + * Inputs:
 630.796 + *	r16 = new psr to establish
 630.797 + *	r19 = new bspstore to establish
 630.798 + *	r20 = new sp to establish
 630.799 + *
 630.800 + * Note: RSE must already be in enforced lazy mode
 630.801 + */
 630.802 +GLOBAL_ENTRY(ia64_switch_mode_virt)
 630.803 + {
 630.804 +	alloc r2=ar.pfs,0,0,0,0
 630.805 +	rsm psr.i | psr.ic		// disable interrupts and interrupt collection
 630.806 +	mov r15=ip
 630.807 + }
 630.808 +	;;
 630.809 + {
 630.810 +	flushrs				// must be first insn in group
 630.811 +	srlz.i
 630.812 + }
 630.813 +	;;
 630.814 +	mov cr.ipsr=r16			// set new PSR
 630.815 +	add r3=1f-ia64_switch_mode_virt,r15
 630.816 +
 630.817 +	mov r14=rp			// get return address into a general register
 630.818 +	;;
 630.819 +
 630.820 +	// going to virtual
 630.821 +	//   - for code addresses, set upper bits of addr to KERNEL_START
 630.822 +	//   - for stack addresses, copy from input argument
 630.823 +	movl r18=KERNEL_START
 630.824 +	dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
 630.825 +	dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
 630.826 +	mov sp=r20
 630.827 +	;;
 630.828 +	or r3=r3,r18
 630.829 +	or r14=r14,r18
 630.830 +	;;
 630.831 +
 630.832 +	mov r18=ar.rnat			// save ar.rnat
 630.833 +	mov ar.bspstore=r19		// this steps on ar.rnat
 630.834 +	mov cr.iip=r3
 630.835 +	mov cr.ifs=r0
 630.836 +	;;
 630.837 +	mov ar.rnat=r18			// restore ar.rnat
 630.838 +	rfi				// must be last insn in group
 630.839 +	;;
 630.840 +1:	mov rp=r14
 630.841 +	br.ret.sptk.many rp
 630.842 +END(ia64_switch_mode_virt)
 630.843 +
 630.844 +GLOBAL_ENTRY(ia64_delay_loop)
 630.845 +	.prologue
 630.846 +{	nop 0			// work around GAS unwind info generation bug...
 630.847 +	.save ar.lc,r2
 630.848 +	mov r2=ar.lc
 630.849 +	.body
 630.850 +	;;
 630.851 +	mov ar.lc=r32
 630.852 +}
 630.853 +	;;
 630.854 +	// force loop to be 32-byte aligned (GAS bug means we cannot use .align
 630.855 +	// inside function body without corrupting unwind info).
 630.856 +{	nop 0 }
 630.857 +1:	br.cloop.sptk.few 1b
 630.858 +	;;
 630.859 +	mov ar.lc=r2
 630.860 +	br.ret.sptk.many rp
 630.861 +END(ia64_delay_loop)
 630.862 +
 630.863 +/*
 630.864 + * Return a CPU-local timestamp in nano-seconds.  This timestamp is
 630.865 + * NOT synchronized across CPUs its return value must never be
 630.866 + * compared against the values returned on another CPU.  The usage in
 630.867 + * kernel/sched.c ensures that.
 630.868 + *
 630.869 + * The return-value of sched_clock() is NOT supposed to wrap-around.
 630.870 + * If it did, it would cause some scheduling hiccups (at the worst).
 630.871 + * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
 630.872 + * that would happen only once every 5+ years.
 630.873 + *
 630.874 + * The code below basically calculates:
 630.875 + *
 630.876 + *   (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
 630.877 + *
 630.878 + * except that the multiplication and the shift are done with 128-bit
 630.879 + * intermediate precision so that we can produce a full 64-bit result.
 630.880 + */
 630.881 +GLOBAL_ENTRY(sched_clock)
 630.882 +#ifdef XEN
 630.883 +	movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
 630.884 +#else
 630.885 +	addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
 630.886 +#endif
 630.887 +	mov.m r9=ar.itc		// fetch cycle-counter				(35 cyc)
 630.888 +	;;
 630.889 +	ldf8 f8=[r8]
 630.890 +	;;
 630.891 +	setf.sig f9=r9		// certain to stall, so issue it _after_ ldf8...
 630.892 +	;;
 630.893 +	xmpy.lu f10=f9,f8	// calculate low 64 bits of 128-bit product	(4 cyc)
 630.894 +	xmpy.hu f11=f9,f8	// calculate high 64 bits of 128-bit product
 630.895 +	;;
 630.896 +	getf.sig r8=f10		//						(5 cyc)
 630.897 +	getf.sig r9=f11
 630.898 +	;;
 630.899 +	shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
 630.900 +	br.ret.sptk.many rp
 630.901 +END(sched_clock)
 630.902 +
 630.903 +GLOBAL_ENTRY(start_kernel_thread)
 630.904 +	.prologue
 630.905 +	.save rp, r0				// this is the end of the call-chain
 630.906 +	.body
 630.907 +	alloc r2 = ar.pfs, 0, 0, 2, 0
 630.908 +	mov out0 = r9
 630.909 +	mov out1 = r11;;
 630.910 +	br.call.sptk.many rp = kernel_thread_helper;;
 630.911 +	mov out0 = r8
 630.912 +	br.call.sptk.many rp = sys_exit;;
 630.913 +1:	br.sptk.few 1b				// not reached
 630.914 +END(start_kernel_thread)
 630.915 +
 630.916 +#ifdef CONFIG_IA64_BRL_EMU
 630.917 +
 630.918 +/*
 630.919 + *  Assembly routines used by brl_emu.c to set preserved register state.
 630.920 + */
 630.921 +
 630.922 +#define SET_REG(reg)				\
 630.923 + GLOBAL_ENTRY(ia64_set_##reg);			\
 630.924 +	alloc r16=ar.pfs,1,0,0,0;		\
 630.925 +	mov reg=r32;				\
 630.926 +	;;					\
 630.927 +	br.ret.sptk.many rp;			\
 630.928 + END(ia64_set_##reg)
 630.929 +
 630.930 +SET_REG(b1);
 630.931 +SET_REG(b2);
 630.932 +SET_REG(b3);
 630.933 +SET_REG(b4);
 630.934 +SET_REG(b5);
 630.935 +
 630.936 +#endif /* CONFIG_IA64_BRL_EMU */
 630.937 +
 630.938 +#ifdef CONFIG_SMP
 630.939 +	/*
 630.940 +	 * This routine handles spinlock contention.  It uses a non-standard calling
 630.941 +	 * convention to avoid converting leaf routines into interior routines.  Because
 630.942 +	 * of this special convention, there are several restrictions:
 630.943 +	 *
 630.944 +	 * - do not use gp relative variables, this code is called from the kernel
 630.945 +	 *   and from modules, r1 is undefined.
 630.946 +	 * - do not use stacked registers, the caller owns them.
 630.947 +	 * - do not use the scratch stack space, the caller owns it.
 630.948 +	 * - do not use any registers other than the ones listed below
 630.949 +	 *
 630.950 +	 * Inputs:
 630.951 +	 *   ar.pfs - saved CFM of caller
 630.952 +	 *   ar.ccv - 0 (and available for use)
 630.953 +	 *   r27    - flags from spin_lock_irqsave or 0.  Must be preserved.
 630.954 +	 *   r28    - available for use.
 630.955 +	 *   r29    - available for use.
 630.956 +	 *   r30    - available for use.
 630.957 +	 *   r31    - address of lock, available for use.
 630.958 +	 *   b6     - return address
 630.959 +	 *   p14    - available for use.
 630.960 +	 *   p15    - used to track flag status.
 630.961 +	 *
 630.962 +	 * If you patch this code to use more registers, do not forget to update
 630.963 +	 * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
 630.964 +	 */
 630.965 +
 630.966 +#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
 630.967 +
 630.968 +GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
 630.969 +	.prologue
 630.970 +	.save ar.pfs, r0	// this code effectively has a zero frame size
 630.971 +	.save rp, r28
 630.972 +	.body
 630.973 +	nop 0
 630.974 +	tbit.nz p15,p0=r27,IA64_PSR_I_BIT
 630.975 +	.restore sp		// pop existing prologue after next insn
 630.976 +	mov b6 = r28
 630.977 +	.prologue
 630.978 +	.save ar.pfs, r0
 630.979 +	.altrp b6
 630.980 +	.body
 630.981 +	;;
 630.982 +(p15)	ssm psr.i		// reenable interrupts if they were on
 630.983 +				// DavidM says that srlz.d is slow and is not required in this case
 630.984 +.wait:
 630.985 +	// exponential backoff, kdb, lockmeter etc. go in here
 630.986 +	hint @pause
 630.987 +	ld4 r30=[r31]		// don't use ld4.bias; if it's contended, we won't write the word
 630.988 +	nop 0
 630.989 +	;;
 630.990 +	cmp4.ne p14,p0=r30,r0
 630.991 +(p14)	br.cond.sptk.few .wait
 630.992 +(p15)	rsm psr.i		// disable interrupts if we reenabled them
 630.993 +	br.cond.sptk.few b6	// lock is now free, try to acquire
 630.994 +	.global ia64_spinlock_contention_pre3_4_end	// for kernprof
 630.995 +ia64_spinlock_contention_pre3_4_end:
 630.996 +END(ia64_spinlock_contention_pre3_4)
 630.997 +
 630.998 +#else
 630.999 +
630.1000 +GLOBAL_ENTRY(ia64_spinlock_contention)
630.1001 +	.prologue
630.1002 +	.altrp b6
630.1003 +	.body
630.1004 +	tbit.nz p15,p0=r27,IA64_PSR_I_BIT
630.1005 +	;;
630.1006 +.wait:
630.1007 +(p15)	ssm psr.i		// reenable interrupts if they were on
630.1008 +				// DavidM says that srlz.d is slow and is not required in this case
630.1009 +.wait2:
630.1010 +	// exponential backoff, kdb, lockmeter etc. go in here
630.1011 +	hint @pause
630.1012 +	ld4 r30=[r31]		// don't use ld4.bias; if it's contended, we won't write the word
630.1013 +	;;
630.1014 +	cmp4.ne p14,p0=r30,r0
630.1015 +	mov r30 = 1
630.1016 +(p14)	br.cond.sptk.few .wait2
630.1017 +(p15)	rsm psr.i		// disable interrupts if we reenabled them
630.1018 +	;;
630.1019 +	cmpxchg4.acq r30=[r31], r30, ar.ccv
630.1020 +	;;
630.1021 +	cmp4.ne p14,p0=r0,r30
630.1022 +(p14)	br.cond.sptk.few .wait
630.1023 +
630.1024 +	br.ret.sptk.many b6	// lock is now taken
630.1025 +END(ia64_spinlock_contention)
630.1026 +
630.1027 +#endif
630.1028 +
630.1029 +#endif /* CONFIG_SMP */
   631.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   631.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c	Fri Aug 26 09:05:43 2005 +0000
   631.3 @@ -0,0 +1,381 @@
   631.4 +/*
   631.5 + * linux/arch/ia64/kernel/irq.c
   631.6 + *
   631.7 + * Copyright (C) 1998-2001 Hewlett-Packard Co
   631.8 + *	Stephane Eranian <eranian@hpl.hp.com>
   631.9 + *	David Mosberger-Tang <davidm@hpl.hp.com>
  631.10 + *
  631.11 + *  6/10/99: Updated to bring in sync with x86 version to facilitate
  631.12 + *	     support for SMP and different interrupt controllers.
  631.13 + *
  631.14 + * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
  631.15 + *                      PCI to vector allocation routine.
  631.16 + * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
  631.17 + *						Added CPU Hotplug handling for IPF.
  631.18 + */
  631.19 +
  631.20 +#include <linux/config.h>
  631.21 +#include <linux/module.h>
  631.22 +
  631.23 +#include <linux/jiffies.h>
  631.24 +#include <linux/errno.h>
  631.25 +#include <linux/init.h>
  631.26 +#include <linux/interrupt.h>
  631.27 +#include <linux/ioport.h>
  631.28 +#include <linux/kernel_stat.h>
  631.29 +#include <linux/slab.h>
  631.30 +#include <linux/ptrace.h>
  631.31 +#include <linux/random.h>	/* for rand_initialize_irq() */
  631.32 +#include <linux/signal.h>
  631.33 +#include <linux/smp.h>
  631.34 +#include <linux/smp_lock.h>
  631.35 +#include <linux/threads.h>
  631.36 +#include <linux/bitops.h>
  631.37 +
  631.38 +#include <asm/delay.h>
  631.39 +#include <asm/intrinsics.h>
  631.40 +#include <asm/io.h>
  631.41 +#include <asm/hw_irq.h>
  631.42 +#include <asm/machvec.h>
  631.43 +#include <asm/pgtable.h>
  631.44 +#include <asm/system.h>
  631.45 +
  631.46 +#ifdef CONFIG_PERFMON
  631.47 +# include <asm/perfmon.h>
  631.48 +#endif
  631.49 +
  631.50 +#define IRQ_DEBUG	0
  631.51 +
  631.52 +/* default base addr of IPI table */
  631.53 +void __iomem *ipi_base_addr = ((void __iomem *)
  631.54 +			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
  631.55 +
  631.56 +/*
  631.57 + * Legacy IRQ to IA-64 vector translation table.
  631.58 + */
  631.59 +__u8 isa_irq_to_vector_map[16] = {
  631.60 +	/* 8259 IRQ translation, first 16 entries */
  631.61 +	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
  631.62 +	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
  631.63 +};
  631.64 +EXPORT_SYMBOL(isa_irq_to_vector_map);
  631.65 +
  631.66 +static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
  631.67 +
  631.68 +int
  631.69 +assign_irq_vector (int irq)
  631.70 +{
  631.71 +	int pos, vector;
  631.72 + again:
  631.73 +	pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
  631.74 +	vector = IA64_FIRST_DEVICE_VECTOR + pos;
  631.75 +	if (vector > IA64_LAST_DEVICE_VECTOR)
  631.76 +		/* XXX could look for sharable vectors instead of panic'ing... */
  631.77 +		panic("assign_irq_vector: out of interrupt vectors!");
  631.78 +	if (test_and_set_bit(pos, ia64_vector_mask))
  631.79 +		goto again;
  631.80 +	return vector;
  631.81 +}
  631.82 +
  631.83 +void
  631.84 +free_irq_vector (int vector)
  631.85 +{
  631.86 +	int pos;
  631.87 +
  631.88 +	if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
  631.89 +		return;
  631.90 +
  631.91 +	pos = vector - IA64_FIRST_DEVICE_VECTOR;
  631.92 +	if (!test_and_clear_bit(pos, ia64_vector_mask))
  631.93 +		printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
  631.94 +}
  631.95 +
  631.96 +#ifdef CONFIG_SMP
  631.97 +#	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
  631.98 +#else
  631.99 +#	define IS_RESCHEDULE(vec)	(0)
 631.100 +#endif
 631.101 +/*
 631.102 + * That's where the IVT branches when we get an external
 631.103 + * interrupt. This branches to the correct hardware IRQ handler via
 631.104 + * function ptr.
 631.105 + */
 631.106 +void
 631.107 +ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 631.108 +{
 631.109 +	unsigned long saved_tpr;
 631.110 +
 631.111 +#if IRQ_DEBUG
 631.112 +#ifdef XEN
 631.113 +	xen_debug_irq(vector, regs);
 631.114 +#endif
 631.115 +	{
 631.116 +		unsigned long bsp, sp;
 631.117 +
 631.118 +		/*
 631.119 +		 * Note: if the interrupt happened while executing in
 631.120 +		 * the context switch routine (ia64_switch_to), we may
 631.121 +		 * get a spurious stack overflow here.  This is
 631.122 +		 * because the register and the memory stack are not
 631.123 +		 * switched atomically.
 631.124 +		 */
 631.125 +		bsp = ia64_getreg(_IA64_REG_AR_BSP);
 631.126 +		sp = ia64_getreg(_IA64_REG_SP);
 631.127 +
 631.128 +		if ((sp - bsp) < 1024) {
 631.129 +			static unsigned char count;
 631.130 +			static long last_time;
 631.131 +
 631.132 +			if (jiffies - last_time > 5*HZ)
 631.133 +				count = 0;
 631.134 +			if (++count < 5) {
 631.135 +				last_time = jiffies;
 631.136 +				printk("ia64_handle_irq: DANGER: less than "
 631.137 +				       "1KB of free stack space!!\n"
 631.138 +				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
 631.139 +			}
 631.140 +		}
 631.141 +	}
 631.142 +#endif /* IRQ_DEBUG */
 631.143 +
 631.144 +	/*
 631.145 +	 * Always set TPR to limit maximum interrupt nesting depth to
 631.146 +	 * 16 (without this, it would be ~240, which could easily lead
 631.147 +	 * to kernel stack overflows).
 631.148 +	 */
 631.149 +	irq_enter();
 631.150 +	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 631.151 +	ia64_srlz_d();
 631.152 +	while (vector != IA64_SPURIOUS_INT_VECTOR) {
 631.153 +		if (!IS_RESCHEDULE(vector)) {
 631.154 +			ia64_setreg(_IA64_REG_CR_TPR, vector);
 631.155 +			ia64_srlz_d();
 631.156 +
 631.157 +#ifdef XEN
 631.158 +			if (!xen_do_IRQ(vector))
 631.159 +#endif
 631.160 +			__do_IRQ(local_vector_to_irq(vector), regs);
 631.161 +
 631.162 +			/*
 631.163 +			 * Disable interrupts and send EOI:
 631.164 +			 */
 631.165 +			local_irq_disable();
 631.166 +			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
 631.167 +		}
 631.168 +		ia64_eoi();
 631.169 +		vector = ia64_get_ivr();
 631.170 +	}
 631.171 +	/*
 631.172 +	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
 631.173 +	 * handler needs to be able to wait for further keyboard interrupts, which can't
 631.174 +	 * come through until ia64_eoi() has been done.
 631.175 +	 */
 631.176 +	irq_exit();
 631.177 +}
 631.178 +
 631.179 +#ifdef  CONFIG_VTI
 631.180 +#define vmx_irq_enter()		\
 631.181 +	add_preempt_count(HARDIRQ_OFFSET);
 631.182 +
 631.183 +/* Now softirq will be checked when leaving hypervisor, or else
 631.184 + * scheduler irq will be executed too early.
 631.185 + */
 631.186 +#define vmx_irq_exit(void)	\
 631.187 +	sub_preempt_count(HARDIRQ_OFFSET);
 631.188 +/*
 631.189 + * That's where the IVT branches when we get an external
 631.190 + * interrupt. This branches to the correct hardware IRQ handler via
 631.191 + * function ptr.
 631.192 + */
 631.193 +void
 631.194 +vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 631.195 +{
 631.196 +	unsigned long saved_tpr;
 631.197 +	int	wake_dom0 = 0;
 631.198 +
 631.199 +
 631.200 +#if IRQ_DEBUG
 631.201 +	{
 631.202 +		unsigned long bsp, sp;
 631.203 +
 631.204 +		/*
 631.205 +		 * Note: if the interrupt happened while executing in
 631.206 +		 * the context switch routine (ia64_switch_to), we may
 631.207 +		 * get a spurious stack overflow here.  This is
 631.208 +		 * because the register and the memory stack are not
 631.209 +		 * switched atomically.
 631.210 +		 */
 631.211 +		bsp = ia64_getreg(_IA64_REG_AR_BSP);
 631.212 +		sp = ia64_getreg(_IA64_REG_AR_SP);
 631.213 +
 631.214 +		if ((sp - bsp) < 1024) {
 631.215 +			static unsigned char count;
 631.216 +			static long last_time;
 631.217 +
 631.218 +			if (jiffies - last_time > 5*HZ)
 631.219 +				count = 0;
 631.220 +			if (++count < 5) {
 631.221 +				last_time = jiffies;
 631.222 +				printk("ia64_handle_irq: DANGER: less than "
 631.223 +				       "1KB of free stack space!!\n"
 631.224 +				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
 631.225 +			}
 631.226 +		}
 631.227 +	}
 631.228 +#endif /* IRQ_DEBUG */
 631.229 +
 631.230 +	/*
 631.231 +	 * Always set TPR to limit maximum interrupt nesting depth to
 631.232 +	 * 16 (without this, it would be ~240, which could easily lead
 631.233 +	 * to kernel stack overflows).
 631.234 +	 */
 631.235 +	vmx_irq_enter();
 631.236 +	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 631.237 +	ia64_srlz_d();
 631.238 +	while (vector != IA64_SPURIOUS_INT_VECTOR) {
 631.239 +	    if (!IS_RESCHEDULE(vector)) {
 631.240 +		ia64_setreg(_IA64_REG_CR_TPR, vector);
 631.241 +		ia64_srlz_d();
 631.242 +
 631.243 +		if (vector != IA64_TIMER_VECTOR) {
 631.244 +			/* FIXME: Leave IRQ re-route later */
 631.245 +			vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
 631.246 +			wake_dom0 = 1;
 631.247 +		}
 631.248 +		else {	// FIXME: Handle Timer only now
 631.249 +			__do_IRQ(local_vector_to_irq(vector), regs);
 631.250 +		}
 631.251 +		
 631.252 +		/*
 631.253 +		 * Disable interrupts and send EOI:
 631.254 +		 */
 631.255 +		local_irq_disable();
 631.256 +		ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
 631.257 +	    }
 631.258 +	    else {
 631.259 +                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
 631.260 +            }
 631.261 +	    ia64_eoi();
 631.262 +	    vector = ia64_get_ivr();
 631.263 +	}
 631.264 +	/*
 631.265 +	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
 631.266 +	 * handler needs to be able to wait for further keyboard interrupts, which can't
 631.267 +	 * come through until ia64_eoi() has been done.
 631.268 +	 */
 631.269 +	vmx_irq_exit();
 631.270 +	if ( wake_dom0 && current != dom0 ) 
 631.271 +		domain_wake(dom0->vcpu[0]);
 631.272 +}
 631.273 +#endif
 631.274 +
 631.275 +
 631.276 +#ifdef CONFIG_HOTPLUG_CPU
 631.277 +/*
 631.278 + * This function emulates a interrupt processing when a cpu is about to be
 631.279 + * brought down.
 631.280 + */
 631.281 +void ia64_process_pending_intr(void)
 631.282 +{
 631.283 +	ia64_vector vector;
 631.284 +	unsigned long saved_tpr;
 631.285 +	extern unsigned int vectors_in_migration[NR_IRQS];
 631.286 +
 631.287 +	vector = ia64_get_ivr();
 631.288 +
 631.289 +	 irq_enter();
 631.290 +	 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 631.291 +	 ia64_srlz_d();
 631.292 +
 631.293 +	 /*
 631.294 +	  * Perform normal interrupt style processing
 631.295 +	  */
 631.296 +	while (vector != IA64_SPURIOUS_INT_VECTOR) {
 631.297 +		if (!IS_RESCHEDULE(vector)) {
 631.298 +			ia64_setreg(_IA64_REG_CR_TPR, vector);
 631.299 +			ia64_srlz_d();
 631.300 +
 631.301 +			/*
 631.302 +			 * Now try calling normal ia64_handle_irq as it would have got called
 631.303 +			 * from a real intr handler. Try passing null for pt_regs, hopefully
 631.304 +			 * it will work. I hope it works!.
 631.305 +			 * Probably could shared code.
 631.306 +			 */
 631.307 +			vectors_in_migration[local_vector_to_irq(vector)]=0;
 631.308 +			__do_IRQ(local_vector_to_irq(vector), NULL);
 631.309 +
 631.310 +			/*
 631.311 +			 * Disable interrupts and send EOI
 631.312 +			 */
 631.313 +			local_irq_disable();
 631.314 +			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
 631.315 +		}
 631.316 +		ia64_eoi();
 631.317 +		vector = ia64_get_ivr();
 631.318 +	}
 631.319 +	irq_exit();
 631.320 +}
 631.321 +#endif
 631.322 +
 631.323 +
 631.324 +#ifdef CONFIG_SMP
 631.325 +extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
 631.326 +
 631.327 +static struct irqaction ipi_irqaction = {
 631.328 +	.handler =	handle_IPI,
 631.329 +	.flags =	SA_INTERRUPT,
 631.330 +	.name =		"IPI"
 631.331 +};
 631.332 +#endif
 631.333 +
 631.334 +void
 631.335 +register_percpu_irq (ia64_vector vec, struct irqaction *action)
 631.336 +{
 631.337 +	irq_desc_t *desc;
 631.338 +	unsigned int irq;
 631.339 +
 631.340 +	for (irq = 0; irq < NR_IRQS; ++irq)
 631.341 +		if (irq_to_vector(irq) == vec) {
 631.342 +			desc = irq_descp(irq);
 631.343 +			desc->status |= IRQ_PER_CPU;
 631.344 +			desc->handler = &irq_type_ia64_lsapic;
 631.345 +			if (action)
 631.346 +				setup_irq(irq, action);
 631.347 +		}
 631.348 +}
 631.349 +
 631.350 +void __init
 631.351 +init_IRQ (void)
 631.352 +{
 631.353 +	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
 631.354 +#ifdef CONFIG_SMP
 631.355 +	register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
 631.356 +#endif
 631.357 +#ifdef CONFIG_PERFMON
 631.358 +	pfm_init_percpu();
 631.359 +#endif
 631.360 +	platform_irq_init();
 631.361 +}
 631.362 +
 631.363 +void
 631.364 +ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
 631.365 +{
 631.366 +	void __iomem *ipi_addr;
 631.367 +	unsigned long ipi_data;
 631.368 +	unsigned long phys_cpu_id;
 631.369 +
 631.370 +#ifdef CONFIG_SMP
 631.371 +	phys_cpu_id = cpu_physical_id(cpu);
 631.372 +#else
 631.373 +	phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
 631.374 +#endif
 631.375 +
 631.376 +	/*
 631.377 +	 * cpu number is in 8bit ID and 8bit EID
 631.378 +	 */
 631.379 +
 631.380 +	ipi_data = (delivery_mode << 8) | (vector & 0xff);
 631.381 +	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
 631.382 +
 631.383 +	writeq(ipi_data, ipi_addr);
 631.384 +}
   632.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   632.2 +++ b/xen/arch/ia64/linux-xen/mm_contig.c	Fri Aug 26 09:05:43 2005 +0000
   632.3 @@ -0,0 +1,305 @@
   632.4 +/*
   632.5 + * This file is subject to the terms and conditions of the GNU General Public
   632.6 + * License.  See the file "COPYING" in the main directory of this archive
   632.7 + * for more details.
   632.8 + *
   632.9 + * Copyright (C) 1998-2003 Hewlett-Packard Co
  632.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
  632.11 + *	Stephane Eranian <eranian@hpl.hp.com>
  632.12 + * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
  632.13 + * Copyright (C) 1999 VA Linux Systems
  632.14 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  632.15 + * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
  632.16 + *
  632.17 + * Routines used by ia64 machines with contiguous (or virtually contiguous)
  632.18 + * memory.
  632.19 + */
  632.20 +#include <linux/config.h>
  632.21 +#include <linux/bootmem.h>
  632.22 +#include <linux/efi.h>
  632.23 +#include <linux/mm.h>
  632.24 +#include <linux/swap.h>
  632.25 +
  632.26 +#include <asm/meminit.h>
  632.27 +#include <asm/pgalloc.h>
  632.28 +#include <asm/pgtable.h>
  632.29 +#include <asm/sections.h>
  632.30 +#include <asm/mca.h>
  632.31 +
  632.32 +#ifdef CONFIG_VIRTUAL_MEM_MAP
  632.33 +static unsigned long num_dma_physpages;
  632.34 +#endif
  632.35 +
  632.36 +/**
  632.37 + * show_mem - display a memory statistics summary
  632.38 + *
  632.39 + * Just walks the pages in the system and describes where they're allocated.
  632.40 + */
  632.41 +#ifndef XEN
  632.42 +void
  632.43 +show_mem (void)
  632.44 +{
  632.45 +	int i, total = 0, reserved = 0;
  632.46 +	int shared = 0, cached = 0;
  632.47 +
  632.48 +	printk("Mem-info:\n");
  632.49 +	show_free_areas();
  632.50 +
  632.51 +	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
  632.52 +	i = max_mapnr;
  632.53 +	while (i-- > 0) {
  632.54 +		if (!pfn_valid(i))
  632.55 +			continue;
  632.56 +		total++;
  632.57 +		if (PageReserved(mem_map+i))
  632.58 +			reserved++;
  632.59 +		else if (PageSwapCache(mem_map+i))
  632.60 +			cached++;
  632.61 +		else if (page_count(mem_map + i))
  632.62 +			shared += page_count(mem_map + i) - 1;
  632.63 +	}
  632.64 +	printk("%d pages of RAM\n", total);
  632.65 +	printk("%d reserved pages\n", reserved);
  632.66 +	printk("%d pages shared\n", shared);
  632.67 +	printk("%d pages swap cached\n", cached);
  632.68 +	printk("%ld pages in page table cache\n", pgtable_cache_size);
  632.69 +}
  632.70 +#endif
  632.71 +
  632.72 +/* physical address where the bootmem map is located */
  632.73 +unsigned long bootmap_start;
  632.74 +
  632.75 +/**
  632.76 + * find_max_pfn - adjust the maximum page number callback
  632.77 + * @start: start of range
  632.78 + * @end: end of range
  632.79 + * @arg: address of pointer to global max_pfn variable
  632.80 + *
  632.81 + * Passed as a callback function to efi_memmap_walk() to determine the highest
  632.82 + * available page frame number in the system.
  632.83 + */
  632.84 +int
  632.85 +find_max_pfn (unsigned long start, unsigned long end, void *arg)
  632.86 +{
  632.87 +	unsigned long *max_pfnp = arg, pfn;
  632.88 +
  632.89 +	pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
  632.90 +	if (pfn > *max_pfnp)
  632.91 +		*max_pfnp = pfn;
  632.92 +	return 0;
  632.93 +}
  632.94 +
  632.95 +/**
  632.96 + * find_bootmap_location - callback to find a memory area for the bootmap
  632.97 + * @start: start of region
  632.98 + * @end: end of region
  632.99 + * @arg: unused callback data
 632.100 + *
 632.101 + * Find a place to put the bootmap and return its starting address in
 632.102 + * bootmap_start.  This address must be page-aligned.
 632.103 + */
 632.104 +int
 632.105 +find_bootmap_location (unsigned long start, unsigned long end, void *arg)
 632.106 +{
 632.107 +	unsigned long needed = *(unsigned long *)arg;
 632.108 +	unsigned long range_start, range_end, free_start;
 632.109 +	int i;
 632.110 +
 632.111 +#if IGNORE_PFN0
 632.112 +	if (start == PAGE_OFFSET) {
 632.113 +		start += PAGE_SIZE;
 632.114 +		if (start >= end)
 632.115 +			return 0;
 632.116 +	}
 632.117 +#endif
 632.118 +
 632.119 +	free_start = PAGE_OFFSET;
 632.120 +
 632.121 +	for (i = 0; i < num_rsvd_regions; i++) {
 632.122 +		range_start = max(start, free_start);
 632.123 +		range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
 632.124 +
 632.125 +		free_start = PAGE_ALIGN(rsvd_region[i].end);
 632.126 +
 632.127 +		if (range_end <= range_start)
 632.128 +			continue; /* skip over empty range */
 632.129 +
 632.130 +		if (range_end - range_start >= needed) {
 632.131 +			bootmap_start = __pa(range_start);
 632.132 +			return -1;	/* done */
 632.133 +		}
 632.134 +
 632.135 +		/* nothing more available in this segment */
 632.136 +		if (range_end == end)
 632.137 +			return 0;
 632.138 +	}
 632.139 +	return 0;
 632.140 +}
 632.141 +
 632.142 +/**
 632.143 + * find_memory - setup memory map
 632.144 + *
 632.145 + * Walk the EFI memory map and find usable memory for the system, taking
 632.146 + * into account reserved areas.
 632.147 + */
 632.148 +#ifndef XEN
 632.149 +void
 632.150 +find_memory (void)
 632.151 +{
 632.152 +	unsigned long bootmap_size;
 632.153 +
 632.154 +	reserve_memory();
 632.155 +
 632.156 +	/* first find highest page frame number */
 632.157 +	max_pfn = 0;
 632.158 +	efi_memmap_walk(find_max_pfn, &max_pfn);
 632.159 +
 632.160 +	/* how many bytes to cover all the pages */
 632.161 +	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
 632.162 +
 632.163 +	/* look for a location to hold the bootmap */
 632.164 +	bootmap_start = ~0UL;
 632.165 +	efi_memmap_walk(find_bootmap_location, &bootmap_size);
 632.166 +	if (bootmap_start == ~0UL)
 632.167 +		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
 632.168 +
 632.169 +	bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
 632.170 +
 632.171 +	/* Free all available memory, then mark bootmem-map as being in use. */
 632.172 +	efi_memmap_walk(filter_rsvd_memory, free_bootmem);
 632.173 +	reserve_bootmem(bootmap_start, bootmap_size);
 632.174 +
 632.175 +	find_initrd();
 632.176 +}
 632.177 +#endif
 632.178 +
 632.179 +#ifdef CONFIG_SMP
 632.180 +/**
 632.181 + * per_cpu_init - setup per-cpu variables
 632.182 + *
 632.183 + * Allocate and setup per-cpu data areas.
 632.184 + */
 632.185 +void *
 632.186 +per_cpu_init (void)
 632.187 +{
 632.188 +	void *cpu_data;
 632.189 +	int cpu;
 632.190 +
 632.191 +	/*
 632.192 +	 * get_free_pages() cannot be used before cpu_init() done.  BSP
 632.193 +	 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
 632.194 +	 * get_zeroed_page().
 632.195 +	 */
 632.196 +	if (smp_processor_id() == 0) {
 632.197 +		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 632.198 +					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 632.199 +		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 632.200 +			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
 632.201 +			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
 632.202 +			cpu_data += PERCPU_PAGE_SIZE;
 632.203 +			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
 632.204 +		}
 632.205 +	}
 632.206 +	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 632.207 +}
 632.208 +#endif /* CONFIG_SMP */
 632.209 +
 632.210 +static int
 632.211 +count_pages (u64 start, u64 end, void *arg)
 632.212 +{
 632.213 +	unsigned long *count = arg;
 632.214 +
 632.215 +	*count += (end - start) >> PAGE_SHIFT;
 632.216 +	return 0;
 632.217 +}
 632.218 +
 632.219 +#ifdef CONFIG_VIRTUAL_MEM_MAP
 632.220 +static int
 632.221 +count_dma_pages (u64 start, u64 end, void *arg)
 632.222 +{
 632.223 +	unsigned long *count = arg;
 632.224 +
 632.225 +	if (start < MAX_DMA_ADDRESS)
 632.226 +		*count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
 632.227 +	return 0;
 632.228 +}
 632.229 +#endif
 632.230 +
 632.231 +/*
 632.232 + * Set up the page tables.
 632.233 + */
 632.234 +
 632.235 +#ifndef XEN
 632.236 +void
 632.237 +paging_init (void)
 632.238 +{
 632.239 +	unsigned long max_dma;
 632.240 +	unsigned long zones_size[MAX_NR_ZONES];
 632.241 +#ifdef CONFIG_VIRTUAL_MEM_MAP
 632.242 +	unsigned long zholes_size[MAX_NR_ZONES];
 632.243 +	unsigned long max_gap;
 632.244 +#endif
 632.245 +
 632.246 +	/* initialize mem_map[] */
 632.247 +
 632.248 +	memset(zones_size, 0, sizeof(zones_size));
 632.249 +
 632.250 +	num_physpages = 0;
 632.251 +	efi_memmap_walk(count_pages, &num_physpages);
 632.252 +
 632.253 +	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 632.254 +
 632.255 +#ifdef CONFIG_VIRTUAL_MEM_MAP
 632.256 +	memset(zholes_size, 0, sizeof(zholes_size));
 632.257 +
 632.258 +	num_dma_physpages = 0;
 632.259 +	efi_memmap_walk(count_dma_pages, &num_dma_physpages);
 632.260 +
 632.261 +	if (max_low_pfn < max_dma) {
 632.262 +		zones_size[ZONE_DMA] = max_low_pfn;
 632.263 +		zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
 632.264 +	} else {
 632.265 +		zones_size[ZONE_DMA] = max_dma;
 632.266 +		zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
 632.267 +		if (num_physpages > num_dma_physpages) {
 632.268 +			zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
 632.269 +			zholes_size[ZONE_NORMAL] =
 632.270 +				((max_low_pfn - max_dma) -
 632.271 +				 (num_physpages - num_dma_physpages));
 632.272 +		}
 632.273 +	}
 632.274 +
 632.275 +	max_gap = 0;
 632.276 +	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
 632.277 +	if (max_gap < LARGE_GAP) {
 632.278 +		vmem_map = (struct page *) 0;
 632.279 +		free_area_init_node(0, &contig_page_data, zones_size, 0,
 632.280 +				    zholes_size);
 632.281 +	} else {
 632.282 +		unsigned long map_size;
 632.283 +
 632.284 +		/* allocate virtual_mem_map */
 632.285 +
 632.286 +		map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
 632.287 +		vmalloc_end -= map_size;
 632.288 +		vmem_map = (struct page *) vmalloc_end;
 632.289 +		efi_memmap_walk(create_mem_map_page_table, NULL);
 632.290 +
 632.291 +		mem_map = contig_page_data.node_mem_map = vmem_map;
 632.292 +		free_area_init_node(0, &contig_page_data, zones_size,
 632.293 +				    0, zholes_size);
 632.294 +
 632.295 +		printk("Virtual mem_map starts at 0x%p\n", mem_map);
 632.296 +	}
 632.297 +#else /* !CONFIG_VIRTUAL_MEM_MAP */
 632.298 +	if (max_low_pfn < max_dma)
 632.299 +		zones_size[ZONE_DMA] = max_low_pfn;
 632.300 +	else {
 632.301 +		zones_size[ZONE_DMA] = max_dma;
 632.302 +		zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
 632.303 +	}
 632.304 +	free_area_init(zones_size);
 632.305 +#endif /* !CONFIG_VIRTUAL_MEM_MAP */
 632.306 +	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 632.307 +}
 632.308 +#endif /* !CONFIG_XEN */
   633.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   633.2 +++ b/xen/arch/ia64/linux-xen/pal.S	Fri Aug 26 09:05:43 2005 +0000
   633.3 @@ -0,0 +1,310 @@
   633.4 +/*
   633.5 + * PAL Firmware support
   633.6 + * IA-64 Processor Programmers Reference Vol 2
   633.7 + *
   633.8 + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
   633.9 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  633.10 + * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
  633.11 + *	David Mosberger <davidm@hpl.hp.com>
  633.12 + *	Stephane Eranian <eranian@hpl.hp.com>
  633.13 + *
  633.14 + * 05/22/2000 eranian Added support for stacked register calls
  633.15 + * 05/24/2000 eranian Added support for physical mode static calls
  633.16 + */
  633.17 +
  633.18 +#include <asm/asmmacro.h>
  633.19 +#include <asm/processor.h>
  633.20 +
  633.21 +	.data
  633.22 +pal_entry_point:
  633.23 +	data8 ia64_pal_default_handler
  633.24 +	.text
  633.25 +
  633.26 +/*
  633.27 + * Set the PAL entry point address.  This could be written in C code, but we do it here
  633.28 + * to keep it all in one module (besides, it's so trivial that it's
  633.29 + * not a big deal).
  633.30 + *
  633.31 + * in0		Address of the PAL entry point (text address, NOT a function descriptor).
  633.32 + */
  633.33 +GLOBAL_ENTRY(ia64_pal_handler_init)
  633.34 +	alloc r3=ar.pfs,1,0,0,0
  633.35 +	movl r2=pal_entry_point
  633.36 +	;;
  633.37 +	st8 [r2]=in0
  633.38 +	br.ret.sptk.many rp
  633.39 +END(ia64_pal_handler_init)
  633.40 +
  633.41 +/*
  633.42 + * Default PAL call handler.  This needs to be coded in assembly because it uses
  633.43 + * the static calling convention, i.e., the RSE may not be used and calls are
  633.44 + * done via "br.cond" (not "br.call").
  633.45 + */
  633.46 +GLOBAL_ENTRY(ia64_pal_default_handler)
  633.47 +	mov r8=-1
  633.48 +	br.cond.sptk.many rp
  633.49 +END(ia64_pal_default_handler)
  633.50 +
  633.51 +/*
  633.52 + * Make a PAL call using the static calling convention.
  633.53 + *
  633.54 + * in0         Index of PAL service
  633.55 + * in1 - in3   Remaining PAL arguments
  633.56 + * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
  633.57 + *
  633.58 + */
  633.59 +GLOBAL_ENTRY(ia64_pal_call_static)
  633.60 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
  633.61 +	alloc loc1 = ar.pfs,5,5,0,0
  633.62 +	movl loc2 = pal_entry_point
  633.63 +1:	{
  633.64 +	  mov r28 = in0
  633.65 +	  mov r29 = in1
  633.66 +	  mov r8 = ip
  633.67 +	}
  633.68 +	;;
  633.69 +	ld8 loc2 = [loc2]		// loc2 <- entry point
  633.70 +	tbit.nz p6,p7 = in4, 0
  633.71 +	adds r8 = 1f-1b,r8
  633.72 +	mov loc4=ar.rsc			// save RSE configuration
  633.73 +	;;
  633.74 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
  633.75 +	mov loc3 = psr
  633.76 +	mov loc0 = rp
  633.77 +	.body
  633.78 +	mov r30 = in2
  633.79 +
  633.80 +(p6)	rsm psr.i | psr.ic
  633.81 +	mov r31 = in3
  633.82 +	mov b7 = loc2
  633.83 +
  633.84 +(p7)	rsm psr.i
  633.85 +	;;
  633.86 +(p6)	srlz.i
  633.87 +	mov rp = r8
  633.88 +	br.cond.sptk.many b7
  633.89 +1:	mov psr.l = loc3
  633.90 +	mov ar.rsc = loc4		// restore RSE configuration
  633.91 +	mov ar.pfs = loc1
  633.92 +	mov rp = loc0
  633.93 +	;;
  633.94 +	srlz.d				// seralize restoration of psr.l
  633.95 +	br.ret.sptk.many b0
  633.96 +END(ia64_pal_call_static)
  633.97 +
  633.98 +/*
  633.99 + * Make a PAL call using the stacked registers calling convention.
 633.100 + *
 633.101 + * Inputs:
 633.102 + * 	in0         Index of PAL service
 633.103 + * 	in2 - in3   Remaning PAL arguments
 633.104 + */
 633.105 +GLOBAL_ENTRY(ia64_pal_call_stacked)
 633.106 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
 633.107 +	alloc loc1 = ar.pfs,4,4,4,0
 633.108 +	movl loc2 = pal_entry_point
 633.109 +
 633.110 +	mov r28  = in0			// Index MUST be copied to r28
 633.111 +	mov out0 = in0			// AND in0 of PAL function
 633.112 +	mov loc0 = rp
 633.113 +	.body
 633.114 +	;;
 633.115 +	ld8 loc2 = [loc2]		// loc2 <- entry point
 633.116 +	mov out1 = in1
 633.117 +	mov out2 = in2
 633.118 +	mov out3 = in3
 633.119 +	mov loc3 = psr
 633.120 +	;;
 633.121 +	rsm psr.i
 633.122 +	mov b7 = loc2
 633.123 +	;;
 633.124 +	br.call.sptk.many rp=b7		// now make the call
 633.125 +.ret0:	mov psr.l  = loc3
 633.126 +	mov ar.pfs = loc1
 633.127 +	mov rp = loc0
 633.128 +	;;
 633.129 +	srlz.d				// serialize restoration of psr.l
 633.130 +	br.ret.sptk.many b0
 633.131 +END(ia64_pal_call_stacked)
 633.132 +
 633.133 +/*
 633.134 + * Make a physical mode PAL call using the static registers calling convention.
 633.135 + *
 633.136 + * Inputs:
 633.137 + * 	in0         Index of PAL service
 633.138 + * 	in2 - in3   Remaning PAL arguments
 633.139 + *
 633.140 + * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
 633.141 + * So we don't need to clear them.
 633.142 + */
 633.143 +#define PAL_PSR_BITS_TO_CLEAR							\
 633.144 +	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT  | IA64_PSR_DB | IA64_PSR_RT |	\
 633.145 +	 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |		\
 633.146 +	 IA64_PSR_DFL | IA64_PSR_DFH)
 633.147 +
 633.148 +#define PAL_PSR_BITS_TO_SET							\
 633.149 +	(IA64_PSR_BN)
 633.150 +
 633.151 +
 633.152 +GLOBAL_ENTRY(ia64_pal_call_phys_static)
 633.153 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
 633.154 +	alloc loc1 = ar.pfs,4,7,0,0
 633.155 +	movl loc2 = pal_entry_point
 633.156 +1:	{
 633.157 +	  mov r28  = in0		// copy procedure index
 633.158 +	  mov r8   = ip			// save ip to compute branch
 633.159 +	  mov loc0 = rp			// save rp
 633.160 +	}
 633.161 +	.body
 633.162 +	;;
 633.163 +	ld8 loc2 = [loc2]		// loc2 <- entry point
 633.164 +	mov r29  = in1			// first argument
 633.165 +	mov r30  = in2			// copy arg2
 633.166 +	mov r31  = in3			// copy arg3
 633.167 +	;;
 633.168 +	mov loc3 = psr			// save psr
 633.169 +	adds r8  = 1f-1b,r8		// calculate return address for call
 633.170 +	;;
 633.171 +	mov loc4=ar.rsc			// save RSE configuration
 633.172 +#ifdef XEN
 633.173 +	dep.z loc2=loc2,0,60		// convert pal entry point to physical
 633.174 +#else // XEN
 633.175 +	dep.z loc2=loc2,0,61		// convert pal entry point to physical
 633.176 +#endif // XEN
 633.177 +	tpa r8=r8			// convert rp to physical
 633.178 +	;;
 633.179 +	mov b7 = loc2			// install target to branch reg
 633.180 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
 633.181 +	movl r16=PAL_PSR_BITS_TO_CLEAR
 633.182 +	movl r17=PAL_PSR_BITS_TO_SET
 633.183 +	;;
 633.184 +	or loc3=loc3,r17		// add in psr the bits to set
 633.185 +	;;
 633.186 +	andcm r16=loc3,r16		// removes bits to clear from psr
 633.187 +	br.call.sptk.many rp=ia64_switch_mode_phys
 633.188 +.ret1:	mov rp = r8			// install return address (physical)
 633.189 +	mov loc5 = r19
 633.190 +	mov loc6 = r20
 633.191 +	br.cond.sptk.many b7
 633.192 +1:
 633.193 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
 633.194 +	mov r16=loc3			// r16= original psr
 633.195 +	mov r19=loc5
 633.196 +	mov r20=loc6
 633.197 +	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
 633.198 +.ret2:
 633.199 +	mov psr.l = loc3		// restore init PSR
 633.200 +
 633.201 +	mov ar.pfs = loc1
 633.202 +	mov rp = loc0
 633.203 +	;;
 633.204 +	mov ar.rsc=loc4			// restore RSE configuration
 633.205 +	srlz.d				// seralize restoration of psr.l
 633.206 +	br.ret.sptk.many b0
 633.207 +END(ia64_pal_call_phys_static)
 633.208 +
 633.209 +/*
 633.210 + * Make a PAL call using the stacked registers in physical mode.
 633.211 + *
 633.212 + * Inputs:
 633.213 + * 	in0         Index of PAL service
 633.214 + * 	in2 - in3   Remaning PAL arguments
 633.215 + */
 633.216 +GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
 633.217 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
 633.218 +	alloc	loc1 = ar.pfs,5,7,4,0
 633.219 +	movl	loc2 = pal_entry_point
 633.220 +1:	{
 633.221 +	  mov r28  = in0		// copy procedure index
 633.222 +	  mov loc0 = rp		// save rp
 633.223 +	}
 633.224 +	.body
 633.225 +	;;
 633.226 +	ld8 loc2 = [loc2]		// loc2 <- entry point
 633.227 +	mov out0 = in0		// first argument
 633.228 +	mov out1 = in1		// copy arg2
 633.229 +	mov out2 = in2		// copy arg3
 633.230 +	mov out3 = in3		// copy arg3
 633.231 +	;;
 633.232 +	mov loc3 = psr		// save psr
 633.233 +	;;
 633.234 +	mov loc4=ar.rsc			// save RSE configuration
 633.235 +#ifdef XEN
 633.236 +	dep.z loc2=loc2,0,60		// convert pal entry point to physical
 633.237 +#else // XEN
 633.238 +	dep.z loc2=loc2,0,61		// convert pal entry point to physical
 633.239 +#endif // XEN
 633.240 +	;;
 633.241 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
 633.242 +	movl r16=PAL_PSR_BITS_TO_CLEAR
 633.243 +	movl r17=PAL_PSR_BITS_TO_SET
 633.244 +	;;
 633.245 +	or loc3=loc3,r17		// add in psr the bits to set
 633.246 +	mov b7 = loc2			// install target to branch reg
 633.247 +	;;
 633.248 +	andcm r16=loc3,r16		// removes bits to clear from psr
 633.249 +	br.call.sptk.many rp=ia64_switch_mode_phys
 633.250 +.ret6:
 633.251 +	mov loc5 = r19
 633.252 +	mov loc6 = r20
 633.253 +	br.call.sptk.many rp=b7		// now make the call
 633.254 +.ret7:
 633.255 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
 633.256 +	mov r16=loc3			// r16= original psr
 633.257 +	mov r19=loc5
 633.258 +	mov r20=loc6
 633.259 +	br.call.sptk.many rp=ia64_switch_mode_virt	// return to virtual mode
 633.260 +
 633.261 +.ret8:	mov psr.l  = loc3		// restore init PSR
 633.262 +	mov ar.pfs = loc1
 633.263 +	mov rp = loc0
 633.264 +	;;
 633.265 +	mov ar.rsc=loc4			// restore RSE configuration
 633.266 +	srlz.d				// seralize restoration of psr.l
 633.267 +	br.ret.sptk.many b0
 633.268 +END(ia64_pal_call_phys_stacked)
 633.269 +
 633.270 +/*
 633.271 + * Save scratch fp scratch regs which aren't saved in pt_regs already (fp10-fp15).
 633.272 + *
 633.273 + * NOTE: We need to do this since firmware (SAL and PAL) may use any of the scratch
 633.274 + * regs fp-low partition.
 633.275 + *
 633.276 + * Inputs:
 633.277 + *      in0	Address of stack storage for fp regs
 633.278 + */
 633.279 +GLOBAL_ENTRY(ia64_save_scratch_fpregs)
 633.280 +	alloc r3=ar.pfs,1,0,0,0
 633.281 +	add r2=16,in0
 633.282 +	;;
 633.283 +	stf.spill [in0] = f10,32
 633.284 +	stf.spill [r2]  = f11,32
 633.285 +	;;
 633.286 +	stf.spill [in0] = f12,32
 633.287 +	stf.spill [r2]  = f13,32
 633.288 +	;;
 633.289 +	stf.spill [in0] = f14,32
 633.290 +	stf.spill [r2]  = f15,32
 633.291 +	br.ret.sptk.many rp
 633.292 +END(ia64_save_scratch_fpregs)
 633.293 +
 633.294 +/*
 633.295 + * Load scratch fp scratch regs (fp10-fp15)
 633.296 + *
 633.297 + * Inputs:
 633.298 + *      in0	Address of stack storage for fp regs
 633.299 + */
 633.300 +GLOBAL_ENTRY(ia64_load_scratch_fpregs)
 633.301 +	alloc r3=ar.pfs,1,0,0,0
 633.302 +	add r2=16,in0
 633.303 +	;;
 633.304 +	ldf.fill  f10 = [in0],32
 633.305 +	ldf.fill  f11 = [r2],32
 633.306 +	;;
 633.307 +	ldf.fill  f12 = [in0],32
 633.308 +	ldf.fill  f13 = [r2],32
 633.309 +	;;
 633.310 +	ldf.fill  f14 = [in0],32
 633.311 +	ldf.fill  f15 = [r2],32
 633.312 +	br.ret.sptk.many rp
 633.313 +END(ia64_load_scratch_fpregs)
   634.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   634.2 +++ b/xen/arch/ia64/linux-xen/setup.c	Fri Aug 26 09:05:43 2005 +0000
   634.3 @@ -0,0 +1,772 @@
   634.4 +/*
   634.5 + * Architecture-specific setup.
   634.6 + *
   634.7 + * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
   634.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   634.9 + *	Stephane Eranian <eranian@hpl.hp.com>
  634.10 + * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
  634.11 + * Copyright (C) 1999 VA Linux Systems
  634.12 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  634.13 + *
  634.14 + * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
  634.15 + * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
  634.16 + * 03/31/00 R.Seth	cpu_initialized and current->processor fixes
  634.17 + * 02/04/00 D.Mosberger	some more get_cpuinfo fixes...
  634.18 + * 02/01/00 R.Seth	fixed get_cpuinfo for SMP
  634.19 + * 01/07/99 S.Eranian	added the support for command line argument
  634.20 + * 06/24/99 W.Drummond	added boot_cpu_data.
  634.21 + */
  634.22 +#include <linux/config.h>
  634.23 +#include <linux/module.h>
  634.24 +#include <linux/init.h>
  634.25 +
  634.26 +#include <linux/acpi.h>
  634.27 +#include <linux/bootmem.h>
  634.28 +#include <linux/console.h>
  634.29 +#include <linux/delay.h>
  634.30 +#include <linux/kernel.h>
  634.31 +#include <linux/reboot.h>
  634.32 +#include <linux/sched.h>
  634.33 +#include <linux/seq_file.h>
  634.34 +#include <linux/string.h>
  634.35 +#include <linux/threads.h>
  634.36 +#include <linux/tty.h>
  634.37 +#include <linux/serial.h>
  634.38 +#include <linux/serial_core.h>
  634.39 +#include <linux/efi.h>
  634.40 +#include <linux/initrd.h>
  634.41 +
  634.42 +#include <asm/ia32.h>
  634.43 +#include <asm/machvec.h>
  634.44 +#include <asm/mca.h>
  634.45 +#include <asm/meminit.h>
  634.46 +#include <asm/page.h>
  634.47 +#include <asm/patch.h>
  634.48 +#include <asm/pgtable.h>
  634.49 +#include <asm/processor.h>
  634.50 +#include <asm/sal.h>
  634.51 +#include <asm/sections.h>
  634.52 +#include <asm/serial.h>
  634.53 +#include <asm/setup.h>
  634.54 +#include <asm/smp.h>
  634.55 +#include <asm/system.h>
  634.56 +#include <asm/unistd.h>
  634.57 +#include <asm/vmx.h>
  634.58 +#include <asm/io.h>
  634.59 +
  634.60 +#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
  634.61 +# error "struct cpuinfo_ia64 too big!"
  634.62 +#endif
  634.63 +
  634.64 +#ifdef CONFIG_SMP
  634.65 +unsigned long __per_cpu_offset[NR_CPUS];
  634.66 +EXPORT_SYMBOL(__per_cpu_offset);
  634.67 +#endif
  634.68 +
  634.69 +DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
  634.70 +DEFINE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
  634.71 +DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
  634.72 +DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
  634.73 +unsigned long ia64_cycles_per_usec;
  634.74 +struct ia64_boot_param *ia64_boot_param;
  634.75 +struct screen_info screen_info;
  634.76 +
  634.77 +unsigned long ia64_max_cacheline_size;
  634.78 +unsigned long ia64_iobase;	/* virtual address for I/O accesses */
  634.79 +EXPORT_SYMBOL(ia64_iobase);
  634.80 +struct io_space io_space[MAX_IO_SPACES];
  634.81 +EXPORT_SYMBOL(io_space);
  634.82 +unsigned int num_io_spaces;
  634.83 +
  634.84 +unsigned char aux_device_present = 0xaa;        /* XXX remove this when legacy I/O is gone */
  634.85 +
  634.86 +/*
  634.87 + * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This
  634.88 + * mask specifies a mask of address bits that must be 0 in order for two buffers to be
  634.89 + * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
  634.90 + * address of the second buffer must be aligned to (merge_mask+1) in order to be
  634.91 + * mergeable).  By default, we assume there is no I/O MMU which can merge physically
  634.92 + * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
  634.93 + * page-size of 2^64.
  634.94 + */
  634.95 +unsigned long ia64_max_iommu_merge_mask = ~0UL;
  634.96 +EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
  634.97 +
  634.98 +/*
  634.99 + * We use a special marker for the end of memory and it uses the extra (+1) slot
 634.100 + */
 634.101 +struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
 634.102 +int num_rsvd_regions;
 634.103 +
 634.104 +
 634.105 +/*
 634.106 + * Filter incoming memory segments based on the primitive map created from the boot
 634.107 + * parameters. Segments contained in the map are removed from the memory ranges. A
 634.108 + * caller-specified function is called with the memory ranges that remain after filtering.
 634.109 + * This routine does not assume the incoming segments are sorted.
 634.110 + */
 634.111 +int
 634.112 +filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
 634.113 +{
 634.114 +	unsigned long range_start, range_end, prev_start;
 634.115 +	void (*func)(unsigned long, unsigned long, int);
 634.116 +	int i;
 634.117 +
 634.118 +#if IGNORE_PFN0
 634.119 +	if (start == PAGE_OFFSET) {
 634.120 +		printk(KERN_WARNING "warning: skipping physical page 0\n");
 634.121 +		start += PAGE_SIZE;
 634.122 +		if (start >= end) return 0;
 634.123 +	}
 634.124 +#endif
 634.125 +	/*
 634.126 +	 * lowest possible address(walker uses virtual)
 634.127 +	 */
 634.128 +	prev_start = PAGE_OFFSET;
 634.129 +	func = arg;
 634.130 +
 634.131 +	for (i = 0; i < num_rsvd_regions; ++i) {
 634.132 +		range_start = max(start, prev_start);
 634.133 +		range_end   = min(end, rsvd_region[i].start);
 634.134 +
 634.135 +		if (range_start < range_end)
 634.136 +#ifdef XEN
 634.137 +		{
 634.138 +		/* init_boot_pages requires "ps, pe" */
 634.139 +			printk("Init boot pages: 0x%lx -> 0x%lx.\n",
 634.140 +				__pa(range_start), __pa(range_end));
 634.141 +			(*func)(__pa(range_start), __pa(range_end), 0);
 634.142 +		}
 634.143 +#else
 634.144 +			call_pernode_memory(__pa(range_start), range_end - range_start, func);
 634.145 +#endif
 634.146 +
 634.147 +		/* nothing more available in this segment */
 634.148 +		if (range_end == end) return 0;
 634.149 +
 634.150 +		prev_start = rsvd_region[i].end;
 634.151 +	}
 634.152 +	/* end of memory marker allows full processing inside loop body */
 634.153 +	return 0;
 634.154 +}
 634.155 +
 634.156 +static void
 634.157 +sort_regions (struct rsvd_region *rsvd_region, int max)
 634.158 +{
 634.159 +	int j;
 634.160 +
 634.161 +	/* simple bubble sorting */
 634.162 +	while (max--) {
 634.163 +		for (j = 0; j < max; ++j) {
 634.164 +			if (rsvd_region[j].start > rsvd_region[j+1].start) {
 634.165 +				struct rsvd_region tmp;
 634.166 +				tmp = rsvd_region[j];
 634.167 +				rsvd_region[j] = rsvd_region[j + 1];
 634.168 +				rsvd_region[j + 1] = tmp;
 634.169 +			}
 634.170 +		}
 634.171 +	}
 634.172 +}
 634.173 +
 634.174 +/**
 634.175 + * reserve_memory - setup reserved memory areas
 634.176 + *
 634.177 + * Setup the reserved memory areas set aside for the boot parameters,
 634.178 + * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
 634.179 + * see include/asm-ia64/meminit.h if you need to define more.
 634.180 + */
 634.181 +void
 634.182 +reserve_memory (void)
 634.183 +{
 634.184 +	int n = 0;
 634.185 +
 634.186 +	/*
 634.187 +	 * none of the entries in this table overlap
 634.188 +	 */
 634.189 +	rsvd_region[n].start = (unsigned long) ia64_boot_param;
 634.190 +	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
 634.191 +	n++;
 634.192 +
 634.193 +	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
 634.194 +	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
 634.195 +	n++;
 634.196 +
 634.197 +	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
 634.198 +	rsvd_region[n].end   = (rsvd_region[n].start
 634.199 +				+ strlen(__va(ia64_boot_param->command_line)) + 1);
 634.200 +	n++;
 634.201 +
 634.202 +	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
 634.203 +#ifdef XEN
 634.204 +	/* Reserve xen image/bitmap/xen-heap */
 634.205 +	rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
 634.206 +#else
 634.207 +	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
 634.208 +#endif
 634.209 +	n++;
 634.210 +
 634.211 +#ifdef CONFIG_BLK_DEV_INITRD
 634.212 +	if (ia64_boot_param->initrd_start) {
 634.213 +		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
 634.214 +		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;
 634.215 +		n++;
 634.216 +	}
 634.217 +#endif
 634.218 +
 634.219 +	/* end of memory marker */
 634.220 +	rsvd_region[n].start = ~0UL;
 634.221 +	rsvd_region[n].end   = ~0UL;
 634.222 +	n++;
 634.223 +
 634.224 +	num_rsvd_regions = n;
 634.225 +
 634.226 +	sort_regions(rsvd_region, num_rsvd_regions);
 634.227 +}
 634.228 +
 634.229 +/**
 634.230 + * find_initrd - get initrd parameters from the boot parameter structure
 634.231 + *
 634.232 + * Grab the initrd start and end from the boot parameter struct given us by
 634.233 + * the boot loader.
 634.234 + */
 634.235 +void
 634.236 +find_initrd (void)
 634.237 +{
 634.238 +#ifdef CONFIG_BLK_DEV_INITRD
 634.239 +	if (ia64_boot_param->initrd_start) {
 634.240 +		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
 634.241 +		initrd_end   = initrd_start+ia64_boot_param->initrd_size;
 634.242 +
 634.243 +		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
 634.244 +		       initrd_start, ia64_boot_param->initrd_size);
 634.245 +	}
 634.246 +#endif
 634.247 +}
 634.248 +
 634.249 +static void __init
 634.250 +io_port_init (void)
 634.251 +{
 634.252 +	extern unsigned long ia64_iobase;
 634.253 +	unsigned long phys_iobase;
 634.254 +
 634.255 +	/*
 634.256 +	 *  Set `iobase' to the appropriate address in region 6 (uncached access range).
 634.257 +	 *
 634.258 +	 *  The EFI memory map is the "preferred" location to get the I/O port space base,
 634.259 +	 *  rather the relying on AR.KR0. This should become more clear in future SAL
 634.260 +	 *  specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
 634.261 +	 *  found in the memory map.
 634.262 +	 */
 634.263 +	phys_iobase = efi_get_iobase();
 634.264 +	if (phys_iobase)
 634.265 +		/* set AR.KR0 since this is all we use it for anyway */
 634.266 +		ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
 634.267 +	else {
 634.268 +		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
 634.269 +		printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
 634.270 +		       "to AR.KR0\n");
 634.271 +		printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
 634.272 +	}
 634.273 +	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
 634.274 +
 634.275 +	/* setup legacy IO port space */
 634.276 +	io_space[0].mmio_base = ia64_iobase;
 634.277 +	io_space[0].sparse = 1;
 634.278 +	num_io_spaces = 1;
 634.279 +}
 634.280 +
 634.281 +/**
 634.282 + * early_console_setup - setup debugging console
 634.283 + *
 634.284 + * Consoles started here require little enough setup that we can start using
 634.285 + * them very early in the boot process, either right after the machine
 634.286 + * vector initialization, or even before if the drivers can detect their hw.
 634.287 + *
 634.288 + * Returns non-zero if a console couldn't be setup.
 634.289 + */
 634.290 +static inline int __init
 634.291 +early_console_setup (char *cmdline)
 634.292 +{
 634.293 +#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
 634.294 +	{
 634.295 +		extern int sn_serial_console_early_setup(void);
 634.296 +		if (!sn_serial_console_early_setup())
 634.297 +			return 0;
 634.298 +	}
 634.299 +#endif
 634.300 +#ifdef CONFIG_EFI_PCDP
 634.301 +	if (!efi_setup_pcdp_console(cmdline))
 634.302 +		return 0;
 634.303 +#endif
 634.304 +#ifdef CONFIG_SERIAL_8250_CONSOLE
 634.305 +	if (!early_serial_console_init(cmdline))
 634.306 +		return 0;
 634.307 +#endif
 634.308 +
 634.309 +	return -1;
 634.310 +}
 634.311 +
 634.312 +static inline void
 634.313 +mark_bsp_online (void)
 634.314 +{
 634.315 +#ifdef CONFIG_SMP
 634.316 +	/* If we register an early console, allow CPU 0 to printk */
 634.317 +	cpu_set(smp_processor_id(), cpu_online_map);
 634.318 +#endif
 634.319 +}
 634.320 +
 634.321 +void __init
 634.322 +#ifdef XEN
 634.323 +early_setup_arch (char **cmdline_p)
 634.324 +#else
 634.325 +setup_arch (char **cmdline_p)
 634.326 +#endif
 634.327 +{
 634.328 +	unw_init();
 634.329 +
 634.330 +	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
 634.331 +
 634.332 +	*cmdline_p = __va(ia64_boot_param->command_line);
 634.333 +#ifdef XEN
 634.334 +	efi_init();
 634.335 +#else
 634.336 +	strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
 634.337 +
 634.338 +	efi_init();
 634.339 +	io_port_init();
 634.340 +#endif
 634.341 +
 634.342 +#ifdef CONFIG_IA64_GENERIC
 634.343 +	{
 634.344 +		const char *mvec_name = strstr (*cmdline_p, "machvec=");
 634.345 +		char str[64];
 634.346 +
 634.347 +		if (mvec_name) {
 634.348 +			const char *end;
 634.349 +			size_t len;
 634.350 +
 634.351 +			mvec_name += 8;
 634.352 +			end = strchr (mvec_name, ' ');
 634.353 +			if (end)
 634.354 +				len = end - mvec_name;
 634.355 +			else
 634.356 +				len = strlen (mvec_name);
 634.357 +			len = min(len, sizeof (str) - 1);
 634.358 +			strncpy (str, mvec_name, len);
 634.359 +			str[len] = '\0';
 634.360 +			mvec_name = str;
 634.361 +		} else
 634.362 +			mvec_name = acpi_get_sysname();
 634.363 +		machvec_init(mvec_name);
 634.364 +	}
 634.365 +#endif
 634.366 +
 634.367 +#ifdef XEN
 634.368 +	early_cmdline_parse(cmdline_p);
 634.369 +	cmdline_parse(*cmdline_p);
 634.370 +#undef CONFIG_ACPI_BOOT
 634.371 +#endif
 634.372 +	if (early_console_setup(*cmdline_p) == 0)
 634.373 +		mark_bsp_online();
 634.374 +
 634.375 +#ifdef CONFIG_ACPI_BOOT
 634.376 +	/* Initialize the ACPI boot-time table parser */
 634.377 +	acpi_table_init();
 634.378 +# ifdef CONFIG_ACPI_NUMA
 634.379 +	acpi_numa_init();
 634.380 +# endif
 634.381 +#else
 634.382 +# ifdef CONFIG_SMP
 634.383 +	smp_build_cpu_map();	/* happens, e.g., with the Ski simulator */
 634.384 +# endif
 634.385 +#endif /* CONFIG_APCI_BOOT */
 634.386 +
 634.387 +#ifndef XEN
 634.388 +	find_memory();
 634.389 +#else
 634.390 +	io_port_init();
 634.391 +}
 634.392 +
 634.393 +void __init
 634.394 +late_setup_arch (char **cmdline_p)
 634.395 +{
 634.396 +#undef CONFIG_ACPI_BOOT
 634.397 +	acpi_table_init();
 634.398 +#endif
 634.399 +	/* process SAL system table: */
 634.400 +	ia64_sal_init(efi.sal_systab);
 634.401 +
 634.402 +#ifdef CONFIG_SMP
 634.403 +	cpu_physical_id(0) = hard_smp_processor_id();
 634.404 +#endif
 634.405 +
 634.406 +#ifdef XEN
 634.407 +	identify_vmx_feature();
 634.408 +#endif
 634.409 +
 634.410 +	cpu_init();	/* initialize the bootstrap CPU */
 634.411 +
 634.412 +#ifdef CONFIG_ACPI_BOOT
 634.413 +	acpi_boot_init();
 634.414 +#endif
 634.415 +
 634.416 +#ifdef CONFIG_VT
 634.417 +	if (!conswitchp) {
 634.418 +# if defined(CONFIG_DUMMY_CONSOLE)
 634.419 +		conswitchp = &dummy_con;
 634.420 +# endif
 634.421 +# if defined(CONFIG_VGA_CONSOLE)
 634.422 +		/*
 634.423 +		 * Non-legacy systems may route legacy VGA MMIO range to system
 634.424 +		 * memory.  vga_con probes the MMIO hole, so memory looks like
 634.425 +		 * a VGA device to it.  The EFI memory map can tell us if it's
 634.426 +		 * memory so we can avoid this problem.
 634.427 +		 */
 634.428 +		if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
 634.429 +			conswitchp = &vga_con;
 634.430 +# endif
 634.431 +	}
 634.432 +#endif
 634.433 +
 634.434 +	/* enable IA-64 Machine Check Abort Handling unless disabled */
 634.435 +	if (!strstr(saved_command_line, "nomca"))
 634.436 +		ia64_mca_init();
 634.437 +
 634.438 +	platform_setup(cmdline_p);
 634.439 +	paging_init();
 634.440 +}
 634.441 +
 634.442 +/*
 634.443 + * Display cpu info for all cpu's.
 634.444 + */
 634.445 +static int
 634.446 +show_cpuinfo (struct seq_file *m, void *v)
 634.447 +{
 634.448 +#ifdef CONFIG_SMP
 634.449 +#	define lpj	c->loops_per_jiffy
 634.450 +#	define cpunum	c->cpu
 634.451 +#else
 634.452 +#	define lpj	loops_per_jiffy
 634.453 +#	define cpunum	0
 634.454 +#endif
 634.455 +	static struct {
 634.456 +		unsigned long mask;
 634.457 +		const char *feature_name;
 634.458 +	} feature_bits[] = {
 634.459 +		{ 1UL << 0, "branchlong" },
 634.460 +		{ 1UL << 1, "spontaneous deferral"},
 634.461 +		{ 1UL << 2, "16-byte atomic ops" }
 634.462 +	};
 634.463 +	char family[32], features[128], *cp, sep;
 634.464 +	struct cpuinfo_ia64 *c = v;
 634.465 +	unsigned long mask;
 634.466 +	int i;
 634.467 +
 634.468 +	mask = c->features;
 634.469 +
 634.470 +	switch (c->family) {
 634.471 +	      case 0x07:	memcpy(family, "Itanium", 8); break;
 634.472 +	      case 0x1f:	memcpy(family, "Itanium 2", 10); break;
 634.473 +	      default:		sprintf(family, "%u", c->family); break;
 634.474 +	}
 634.475 +
 634.476 +	/* build the feature string: */
 634.477 +	memcpy(features, " standard", 10);
 634.478 +	cp = features;
 634.479 +	sep = 0;
 634.480 +	for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
 634.481 +		if (mask & feature_bits[i].mask) {
 634.482 +			if (sep)
 634.483 +				*cp++ = sep;
 634.484 +			sep = ',';
 634.485 +			*cp++ = ' ';
 634.486 +			strcpy(cp, feature_bits[i].feature_name);
 634.487 +			cp += strlen(feature_bits[i].feature_name);
 634.488 +			mask &= ~feature_bits[i].mask;
 634.489 +		}
 634.490 +	}
 634.491 +	if (mask) {
 634.492 +		/* print unknown features as a hex value: */
 634.493 +		if (sep)
 634.494 +			*cp++ = sep;
 634.495 +		sprintf(cp, " 0x%lx", mask);
 634.496 +	}
 634.497 +
 634.498 +	seq_printf(m,
 634.499 +		   "processor  : %d\n"
 634.500 +		   "vendor     : %s\n"
 634.501 +		   "arch       : IA-64\n"
 634.502 +		   "family     : %s\n"
 634.503 +		   "model      : %u\n"
 634.504 +		   "revision   : %u\n"
 634.505 +		   "archrev    : %u\n"
 634.506 +		   "features   :%s\n"	/* don't change this---it _is_ right! */
 634.507 +		   "cpu number : %lu\n"
 634.508 +		   "cpu regs   : %u\n"
 634.509 +		   "cpu MHz    : %lu.%06lu\n"
 634.510 +		   "itc MHz    : %lu.%06lu\n"
 634.511 +		   "BogoMIPS   : %lu.%02lu\n\n",
 634.512 +		   cpunum, c->vendor, family, c->model, c->revision, c->archrev,
 634.513 +		   features, c->ppn, c->number,
 634.514 +		   c->proc_freq / 1000000, c->proc_freq % 1000000,
 634.515 +		   c->itc_freq / 1000000, c->itc_freq % 1000000,
 634.516 +		   lpj*HZ/500000, (lpj*HZ/5000) % 100);
 634.517 +	return 0;
 634.518 +}
 634.519 +
 634.520 +static void *
 634.521 +c_start (struct seq_file *m, loff_t *pos)
 634.522 +{
 634.523 +#ifdef CONFIG_SMP
 634.524 +	while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
 634.525 +		++*pos;
 634.526 +#endif
 634.527 +	return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
 634.528 +}
 634.529 +
 634.530 +static void *
 634.531 +c_next (struct seq_file *m, void *v, loff_t *pos)
 634.532 +{
 634.533 +	++*pos;
 634.534 +	return c_start(m, pos);
 634.535 +}
 634.536 +
 634.537 +static void
 634.538 +c_stop (struct seq_file *m, void *v)
 634.539 +{
 634.540 +}
 634.541 +
 634.542 +#ifndef XEN
 634.543 +struct seq_operations cpuinfo_op = {
 634.544 +	.start =	c_start,
 634.545 +	.next =		c_next,
 634.546 +	.stop =		c_stop,
 634.547 +	.show =		show_cpuinfo
 634.548 +};
 634.549 +#endif
 634.550 +
 634.551 +void
 634.552 +identify_cpu (struct cpuinfo_ia64 *c)
 634.553 +{
 634.554 +	union {
 634.555 +		unsigned long bits[5];
 634.556 +		struct {
 634.557 +			/* id 0 & 1: */
 634.558 +			char vendor[16];
 634.559 +
 634.560 +			/* id 2 */
 634.561 +			u64 ppn;		/* processor serial number */
 634.562 +
 634.563 +			/* id 3: */
 634.564 +			unsigned number		:  8;
 634.565 +			unsigned revision	:  8;
 634.566 +			unsigned model		:  8;
 634.567 +			unsigned family		:  8;
 634.568 +			unsigned archrev	:  8;
 634.569 +			unsigned reserved	: 24;
 634.570 +
 634.571 +			/* id 4: */
 634.572 +			u64 features;
 634.573 +		} field;
 634.574 +	} cpuid;
 634.575 +	pal_vm_info_1_u_t vm1;
 634.576 +	pal_vm_info_2_u_t vm2;
 634.577 +	pal_status_t status;
 634.578 +	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
 634.579 +	int i;
 634.580 +
 634.581 +	for (i = 0; i < 5; ++i)
 634.582 +		cpuid.bits[i] = ia64_get_cpuid(i);
 634.583 +
 634.584 +	memcpy(c->vendor, cpuid.field.vendor, 16);
 634.585 +#ifdef CONFIG_SMP
 634.586 +	c->cpu = smp_processor_id();
 634.587 +#endif
 634.588 +	c->ppn = cpuid.field.ppn;
 634.589 +	c->number = cpuid.field.number;
 634.590 +	c->revision = cpuid.field.revision;
 634.591 +	c->model = cpuid.field.model;
 634.592 +	c->family = cpuid.field.family;
 634.593 +	c->archrev = cpuid.field.archrev;
 634.594 +	c->features = cpuid.field.features;
 634.595 +
 634.596 +	status = ia64_pal_vm_summary(&vm1, &vm2);
 634.597 +	if (status == PAL_STATUS_SUCCESS) {
 634.598 +		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
 634.599 +		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
 634.600 +	}
 634.601 +	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
 634.602 +	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
 634.603 +
 634.604 +#ifdef XEN
 634.605 +	/* If vmx feature is on, do necessary initialization for vmx */
 634.606 +	if (vmx_enabled)
 634.607 +		vmx_init_env();
 634.608 +#endif
 634.609 +}
 634.610 +
 634.611 +void
 634.612 +setup_per_cpu_areas (void)
 634.613 +{
 634.614 +	/* start_kernel() requires this... */
 634.615 +}
 634.616 +
 634.617 +static void
 634.618 +get_max_cacheline_size (void)
 634.619 +{
 634.620 +	unsigned long line_size, max = 1;
 634.621 +	u64 l, levels, unique_caches;
 634.622 +        pal_cache_config_info_t cci;
 634.623 +        s64 status;
 634.624 +
 634.625 +        status = ia64_pal_cache_summary(&levels, &unique_caches);
 634.626 +        if (status != 0) {
 634.627 +                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
 634.628 +                       __FUNCTION__, status);
 634.629 +                max = SMP_CACHE_BYTES;
 634.630 +		goto out;
 634.631 +        }
 634.632 +
 634.633 +	for (l = 0; l < levels; ++l) {
 634.634 +		status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
 634.635 +						    &cci);
 634.636 +		if (status != 0) {
 634.637 +			printk(KERN_ERR
 634.638 +			       "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n",
 634.639 +			       __FUNCTION__, l, status);
 634.640 +			max = SMP_CACHE_BYTES;
 634.641 +		}
 634.642 +		line_size = 1 << cci.pcci_line_size;
 634.643 +		if (line_size > max)
 634.644 +			max = line_size;
 634.645 +        }
 634.646 +  out:
 634.647 +	if (max > ia64_max_cacheline_size)
 634.648 +		ia64_max_cacheline_size = max;
 634.649 +}
 634.650 +
 634.651 +/*
 634.652 + * cpu_init() initializes state that is per-CPU.  This function acts
 634.653 + * as a 'CPU state barrier', nothing should get across.
 634.654 + */
 634.655 +void
 634.656 +cpu_init (void)
 634.657 +{
 634.658 +	extern void __devinit ia64_mmu_init (void *);
 634.659 +	unsigned long num_phys_stacked;
 634.660 +	pal_vm_info_2_u_t vmi;
 634.661 +	unsigned int max_ctx;
 634.662 +	struct cpuinfo_ia64 *cpu_info;
 634.663 +	void *cpu_data;
 634.664 +
 634.665 +	cpu_data = per_cpu_init();
 634.666 +
 634.667 +	/*
 634.668 +	 * We set ar.k3 so that assembly code in MCA handler can compute
 634.669 +	 * physical addresses of per cpu variables with a simple:
 634.670 +	 *   phys = ar.k3 + &per_cpu_var
 634.671 +	 */
 634.672 +	ia64_set_kr(IA64_KR_PER_CPU_DATA,
 634.673 +		    ia64_tpa(cpu_data) - (long) __per_cpu_start);
 634.674 +
 634.675 +	get_max_cacheline_size();
 634.676 +
 634.677 +	/*
 634.678 +	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
 634.679 +	 * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
 634.680 +	 * depends on the data returned by identify_cpu().  We break the dependency by
 634.681 +	 * accessing cpu_data() through the canonical per-CPU address.
 634.682 +	 */
 634.683 +	cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
 634.684 +	identify_cpu(cpu_info);
 634.685 +
 634.686 +#ifdef CONFIG_MCKINLEY
 634.687 +	{
 634.688 +#		define FEATURE_SET 16
 634.689 +		struct ia64_pal_retval iprv;
 634.690 +
 634.691 +		if (cpu_info->family == 0x1f) {
 634.692 +			PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
 634.693 +			if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
 634.694 +				PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
 634.695 +				              (iprv.v1 | 0x80), FEATURE_SET, 0);
 634.696 +		}
 634.697 +	}
 634.698 +#endif
 634.699 +
 634.700 +	/* Clear the stack memory reserved for pt_regs: */
 634.701 +	memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
 634.702 +
 634.703 +	ia64_set_kr(IA64_KR_FPU_OWNER, 0);
 634.704 +
 634.705 +	/*
 634.706 +	 * Initialize default control register to defer all speculative faults.  The
 634.707 +	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
 634.708 +	 * the kernel must have recovery code for all speculative accesses).  Turn on
 634.709 +	 * dcr.lc as per recommendation by the architecture team.  Most IA-32 apps
 634.710 +	 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
 634.711 +	 * be fine).
 634.712 +	 */
 634.713 +	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
 634.714 +					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
 634.715 +	atomic_inc(&init_mm.mm_count);
 634.716 +	current->active_mm = &init_mm;
 634.717 +#ifdef XEN
 634.718 +	if (current->domain->arch.mm)
 634.719 +#else
 634.720 +	if (current->mm)
 634.721 +#endif
 634.722 +		BUG();
 634.723 +
 634.724 +	ia64_mmu_init(ia64_imva(cpu_data));
 634.725 +	ia64_mca_cpu_init(ia64_imva(cpu_data));
 634.726 +
 634.727 +#ifdef CONFIG_IA32_SUPPORT
 634.728 +	ia32_cpu_init();
 634.729 +#endif
 634.730 +
 634.731 +	/* Clear ITC to eliminiate sched_clock() overflows in human time.  */
 634.732 +	ia64_set_itc(0);
 634.733 +
 634.734 +	/* disable all local interrupt sources: */
 634.735 +	ia64_set_itv(1 << 16);
 634.736 +	ia64_set_lrr0(1 << 16);
 634.737 +	ia64_set_lrr1(1 << 16);
 634.738 +	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
 634.739 +	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
 634.740 +
 634.741 +	/* clear TPR & XTP to enable all interrupt classes: */
 634.742 +	ia64_setreg(_IA64_REG_CR_TPR, 0);
 634.743 +#ifdef CONFIG_SMP
 634.744 +	normal_xtp();
 634.745 +#endif
 634.746 +
 634.747 +	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
 634.748 +	if (ia64_pal_vm_summary(NULL, &vmi) == 0)
 634.749 +		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
 634.750 +	else {
 634.751 +		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
 634.752 +		max_ctx = (1U << 15) - 1;	/* use architected minimum */
 634.753 +	}
 634.754 +	while (max_ctx < ia64_ctx.max_ctx) {
 634.755 +		unsigned int old = ia64_ctx.max_ctx;
 634.756 +		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
 634.757 +			break;
 634.758 +	}
 634.759 +
 634.760 +	if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
 634.761 +		printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
 634.762 +		       "stacked regs\n");
 634.763 +		num_phys_stacked = 96;
 634.764 +	}
 634.765 +	/* size of physical stacked register partition plus 8 bytes: */
 634.766 +	__get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
 634.767 +	platform_cpu_init();
 634.768 +}
 634.769 +
 634.770 +void
 634.771 +check_bugs (void)
 634.772 +{
 634.773 +	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
 634.774 +			       (unsigned long) __end___mckinley_e9_bundles);
 634.775 +}
   635.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   635.2 +++ b/xen/arch/ia64/linux-xen/time.c	Fri Aug 26 09:05:43 2005 +0000
   635.3 @@ -0,0 +1,264 @@
   635.4 +/*
   635.5 + * linux/arch/ia64/kernel/time.c
   635.6 + *
   635.7 + * Copyright (C) 1998-2003 Hewlett-Packard Co
   635.8 + *	Stephane Eranian <eranian@hpl.hp.com>
   635.9 + *	David Mosberger <davidm@hpl.hp.com>
  635.10 + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  635.11 + * Copyright (C) 1999-2000 VA Linux Systems
  635.12 + * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
  635.13 + */
  635.14 +#include <linux/config.h>
  635.15 +
  635.16 +#include <linux/cpu.h>
  635.17 +#include <linux/init.h>
  635.18 +#include <linux/kernel.h>
  635.19 +#include <linux/module.h>
  635.20 +#include <linux/profile.h>
  635.21 +#include <linux/sched.h>
  635.22 +#include <linux/time.h>
  635.23 +#include <linux/interrupt.h>
  635.24 +#include <linux/efi.h>
  635.25 +#include <linux/profile.h>
  635.26 +#include <linux/timex.h>
  635.27 +
  635.28 +#include <asm/machvec.h>
  635.29 +#include <asm/delay.h>
  635.30 +#include <asm/hw_irq.h>
  635.31 +#include <asm/ptrace.h>
  635.32 +#include <asm/sal.h>
  635.33 +#include <asm/sections.h>
  635.34 +#include <asm/system.h>
  635.35 +#ifdef XEN
  635.36 +#include <linux/jiffies.h>	// not included by xen/sched.h
  635.37 +#endif
  635.38 +
  635.39 +extern unsigned long wall_jiffies;
  635.40 +
  635.41 +u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  635.42 +
  635.43 +EXPORT_SYMBOL(jiffies_64);
  635.44 +
  635.45 +#define TIME_KEEPER_ID	0	/* smp_processor_id() of time-keeper */
  635.46 +
  635.47 +#ifdef CONFIG_IA64_DEBUG_IRQ
  635.48 +
  635.49 +unsigned long last_cli_ip;
  635.50 +EXPORT_SYMBOL(last_cli_ip);
  635.51 +
  635.52 +#endif
  635.53 +
  635.54 +#ifndef XEN
  635.55 +static struct time_interpolator itc_interpolator = {
  635.56 +	.shift = 16,
  635.57 +	.mask = 0xffffffffffffffffLL,
  635.58 +	.source = TIME_SOURCE_CPU
  635.59 +};
  635.60 +
  635.61 +static irqreturn_t
  635.62 +timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
  635.63 +{
  635.64 +	unsigned long new_itm;
  635.65 +
  635.66 +	if (unlikely(cpu_is_offline(smp_processor_id()))) {
  635.67 +		return IRQ_HANDLED;
  635.68 +	}
  635.69 +
  635.70 +	platform_timer_interrupt(irq, dev_id, regs);
  635.71 +
  635.72 +	new_itm = local_cpu_data->itm_next;
  635.73 +
  635.74 +	if (!time_after(ia64_get_itc(), new_itm))
  635.75 +		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
  635.76 +		       ia64_get_itc(), new_itm);
  635.77 +
  635.78 +	profile_tick(CPU_PROFILING, regs);
  635.79 +
  635.80 +	while (1) {
  635.81 +		update_process_times(user_mode(regs));
  635.82 +
  635.83 +		new_itm += local_cpu_data->itm_delta;
  635.84 +
  635.85 +		if (smp_processor_id() == TIME_KEEPER_ID) {
  635.86 +			/*
  635.87 +			 * Here we are in the timer irq handler. We have irqs locally
  635.88 +			 * disabled, but we don't know if the timer_bh is running on
  635.89 +			 * another CPU. We need to avoid to SMP race by acquiring the
  635.90 +			 * xtime_lock.
  635.91 +			 */
  635.92 +			write_seqlock(&xtime_lock);
  635.93 +			do_timer(regs);
  635.94 +			local_cpu_data->itm_next = new_itm;
  635.95 +			write_sequnlock(&xtime_lock);
  635.96 +		} else
  635.97 +			local_cpu_data->itm_next = new_itm;
  635.98 +
  635.99 +		if (time_after(new_itm, ia64_get_itc()))
 635.100 +			break;
 635.101 +	}
 635.102 +
 635.103 +	do {
 635.104 +		/*
 635.105 +		 * If we're too close to the next clock tick for
 635.106 +		 * comfort, we increase the safety margin by
 635.107 +		 * intentionally dropping the next tick(s).  We do NOT
 635.108 +		 * update itm.next because that would force us to call
 635.109 +		 * do_timer() which in turn would let our clock run
 635.110 +		 * too fast (with the potentially devastating effect
 635.111 +		 * of losing monotony of time).
 635.112 +		 */
 635.113 +		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
 635.114 +			new_itm += local_cpu_data->itm_delta;
 635.115 +		ia64_set_itm(new_itm);
 635.116 +		/* double check, in case we got hit by a (slow) PMI: */
 635.117 +	} while (time_after_eq(ia64_get_itc(), new_itm));
 635.118 +	return IRQ_HANDLED;
 635.119 +}
 635.120 +#endif
 635.121 +
 635.122 +/*
 635.123 + * Encapsulate access to the itm structure for SMP.
 635.124 + */
 635.125 +void
 635.126 +ia64_cpu_local_tick (void)
 635.127 +{
 635.128 +	int cpu = smp_processor_id();
 635.129 +	unsigned long shift = 0, delta;
 635.130 +
 635.131 +	/* arrange for the cycle counter to generate a timer interrupt: */
 635.132 +	ia64_set_itv(IA64_TIMER_VECTOR);
 635.133 +
 635.134 +	delta = local_cpu_data->itm_delta;
 635.135 +	/*
 635.136 +	 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
 635.137 +	 * same time:
 635.138 +	 */
 635.139 +	if (cpu) {
 635.140 +		unsigned long hi = 1UL << ia64_fls(cpu);
 635.141 +		shift = (2*(cpu - hi) + 1) * delta/hi/2;
 635.142 +	}
 635.143 +	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
 635.144 +	ia64_set_itm(local_cpu_data->itm_next);
 635.145 +}
 635.146 +
 635.147 +static int nojitter;
 635.148 +
 635.149 +static int __init nojitter_setup(char *str)
 635.150 +{
 635.151 +	nojitter = 1;
 635.152 +	printk("Jitter checking for ITC timers disabled\n");
 635.153 +	return 1;
 635.154 +}
 635.155 +
 635.156 +__setup("nojitter", nojitter_setup);
 635.157 +
 635.158 +
 635.159 +void __devinit
 635.160 +ia64_init_itm (void)
 635.161 +{
 635.162 +	unsigned long platform_base_freq, itc_freq;
 635.163 +	struct pal_freq_ratio itc_ratio, proc_ratio;
 635.164 +	long status, platform_base_drift, itc_drift;
 635.165 +
 635.166 +	/*
 635.167 +	 * According to SAL v2.6, we need to use a SAL call to determine the platform base
 635.168 +	 * frequency and then a PAL call to determine the frequency ratio between the ITC
 635.169 +	 * and the base frequency.
 635.170 +	 */
 635.171 +	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
 635.172 +				    &platform_base_freq, &platform_base_drift);
 635.173 +	if (status != 0) {
 635.174 +		printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
 635.175 +	} else {
 635.176 +		status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
 635.177 +		if (status != 0)
 635.178 +			printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
 635.179 +	}
 635.180 +	if (status != 0) {
 635.181 +		/* invent "random" values */
 635.182 +		printk(KERN_ERR
 635.183 +		       "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
 635.184 +		platform_base_freq = 100000000;
 635.185 +		platform_base_drift = -1;	/* no drift info */
 635.186 +		itc_ratio.num = 3;
 635.187 +		itc_ratio.den = 1;
 635.188 +	}
 635.189 +	if (platform_base_freq < 40000000) {
 635.190 +		printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
 635.191 +		       platform_base_freq);
 635.192 +		platform_base_freq = 75000000;
 635.193 +		platform_base_drift = -1;
 635.194 +	}
 635.195 +	if (!proc_ratio.den)
 635.196 +		proc_ratio.den = 1;	/* avoid division by zero */
 635.197 +	if (!itc_ratio.den)
 635.198 +		itc_ratio.den = 1;	/* avoid division by zero */
 635.199 +
 635.200 +	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
 635.201 +
 635.202 +	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
 635.203 +	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
 635.204 +	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
 635.205 +	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
 635.206 +	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
 635.207 +
 635.208 +	if (platform_base_drift != -1) {
 635.209 +		itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
 635.210 +		printk("+/-%ldppm\n", itc_drift);
 635.211 +	} else {
 635.212 +		itc_drift = -1;
 635.213 +		printk("\n");
 635.214 +	}
 635.215 +
 635.216 +	local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
 635.217 +	local_cpu_data->itc_freq = itc_freq;
 635.218 +	local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
 635.219 +	local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
 635.220 +					+ itc_freq/2)/itc_freq;
 635.221 +
 635.222 +	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
 635.223 +#ifndef XEN
 635.224 +		itc_interpolator.frequency = local_cpu_data->itc_freq;
 635.225 +		itc_interpolator.drift = itc_drift;
 635.226 +#ifdef CONFIG_SMP
 635.227 +		/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
 635.228 +		 * Jitter compensation requires a cmpxchg which may limit
 635.229 +		 * the scalability of the syscalls for retrieving time.
 635.230 +		 * The ITC synchronization is usually successful to within a few
 635.231 +		 * ITC ticks but this is not a sure thing. If you need to improve
 635.232 +		 * timer performance in SMP situations then boot the kernel with the
 635.233 +		 * "nojitter" option. However, doing so may result in time fluctuating (maybe
 635.234 +		 * even going backward) if the ITC offsets between the individual CPUs
 635.235 +		 * are too large.
 635.236 +		 */
 635.237 +		if (!nojitter) itc_interpolator.jitter = 1;
 635.238 +#endif
 635.239 +		register_time_interpolator(&itc_interpolator);
 635.240 +#endif
 635.241 +	}
 635.242 +
 635.243 +	/* Setup the CPU local timer tick */
 635.244 +	ia64_cpu_local_tick();
 635.245 +}
 635.246 +
 635.247 +#ifndef XEN
 635.248 +static struct irqaction timer_irqaction = {
 635.249 +	.handler =	timer_interrupt,
 635.250 +	.flags =	SA_INTERRUPT,
 635.251 +	.name =		"timer"
 635.252 +};
 635.253 +
 635.254 +void __init
 635.255 +time_init (void)
 635.256 +{
 635.257 +	register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
 635.258 +	efi_gettimeofday(&xtime);
 635.259 +	ia64_init_itm();
 635.260 +
 635.261 +	/*
 635.262 +	 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
 635.263 +	 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
 635.264 +	 */
 635.265 +	set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
 635.266 +}
 635.267 +#endif
   636.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   636.2 +++ b/xen/arch/ia64/linux-xen/tlb.c	Fri Aug 26 09:05:43 2005 +0000
   636.3 @@ -0,0 +1,199 @@
   636.4 +/*
   636.5 + * TLB support routines.
   636.6 + *
   636.7 + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
   636.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   636.9 + *
  636.10 + * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
  636.11 + *		Modified RID allocation for SMP
  636.12 + *          Goutham Rao <goutham.rao@intel.com>
  636.13 + *              IPI based ptc implementation and A-step IPI implementation.
  636.14 + */
  636.15 +#include <linux/config.h>
  636.16 +#include <linux/module.h>
  636.17 +#include <linux/init.h>
  636.18 +#include <linux/kernel.h>
  636.19 +#include <linux/sched.h>
  636.20 +#include <linux/smp.h>
  636.21 +#include <linux/mm.h>
  636.22 +
  636.23 +#include <asm/delay.h>
  636.24 +#include <asm/mmu_context.h>
  636.25 +#include <asm/pgalloc.h>
  636.26 +#include <asm/pal.h>
  636.27 +#include <asm/tlbflush.h>
  636.28 +
  636.29 +static struct {
  636.30 +	unsigned long mask;	/* mask of supported purge page-sizes */
  636.31 +	unsigned long max_bits;	/* log2() of largest supported purge page-size */
  636.32 +} purge;
  636.33 +
  636.34 +struct ia64_ctx ia64_ctx = {
  636.35 +	.lock =		SPIN_LOCK_UNLOCKED,
  636.36 +	.next =		1,
  636.37 +	.limit =	(1 << 15) - 1,		/* start out with the safe (architected) limit */
  636.38 +	.max_ctx =	~0U
  636.39 +};
  636.40 +
  636.41 +DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
  636.42 +
  636.43 +/*
  636.44 + * Acquire the ia64_ctx.lock before calling this function!
  636.45 + */
  636.46 +void
  636.47 +wrap_mmu_context (struct mm_struct *mm)
  636.48 +{
  636.49 +#ifdef XEN
  636.50 +printf("wrap_mmu_context: called, not implemented\n");
  636.51 +#else
  636.52 +	unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
  636.53 +	struct task_struct *tsk;
  636.54 +	int i;
  636.55 +
  636.56 +	if (ia64_ctx.next > max_ctx)
  636.57 +		ia64_ctx.next = 300;	/* skip daemons */
  636.58 +	ia64_ctx.limit = max_ctx + 1;
  636.59 +
  636.60 +	/*
  636.61 +	 * Scan all the task's mm->context and set proper safe range
  636.62 +	 */
  636.63 +
  636.64 +	read_lock(&tasklist_lock);
  636.65 +  repeat:
  636.66 +	for_each_process(tsk) {
  636.67 +		if (!tsk->mm)
  636.68 +			continue;
  636.69 +		tsk_context = tsk->mm->context;
  636.70 +		if (tsk_context == ia64_ctx.next) {
  636.71 +			if (++ia64_ctx.next >= ia64_ctx.limit) {
  636.72 +				/* empty range: reset the range limit and start over */
  636.73 +				if (ia64_ctx.next > max_ctx)
  636.74 +					ia64_ctx.next = 300;
  636.75 +				ia64_ctx.limit = max_ctx + 1;
  636.76 +				goto repeat;
  636.77 +			}
  636.78 +		}
  636.79 +		if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit))
  636.80 +			ia64_ctx.limit = tsk_context;
  636.81 +	}
  636.82 +	read_unlock(&tasklist_lock);
  636.83 +	/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
  636.84 +	{
  636.85 +		int cpu = get_cpu(); /* prevent preemption/migration */
  636.86 +		for (i = 0; i < NR_CPUS; ++i)
  636.87 +			if (cpu_online(i) && (i != cpu))
  636.88 +				per_cpu(ia64_need_tlb_flush, i) = 1;
  636.89 +		put_cpu();
  636.90 +	}
  636.91 +	local_flush_tlb_all();
  636.92 +#endif
  636.93 +}
  636.94 +
  636.95 +void
  636.96 +ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
  636.97 +{
  636.98 +	static DEFINE_SPINLOCK(ptcg_lock);
  636.99 +
 636.100 +	/* HW requires global serialization of ptc.ga.  */
 636.101 +	spin_lock(&ptcg_lock);
 636.102 +	{
 636.103 +		do {
 636.104 +			/*
 636.105 +			 * Flush ALAT entries also.
 636.106 +			 */
 636.107 +			ia64_ptcga(start, (nbits<<2));
 636.108 +			ia64_srlz_i();
 636.109 +			start += (1UL << nbits);
 636.110 +		} while (start < end);
 636.111 +	}
 636.112 +	spin_unlock(&ptcg_lock);
 636.113 +}
 636.114 +
 636.115 +void
 636.116 +local_flush_tlb_all (void)
 636.117 +{
 636.118 +	unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
 636.119 +
 636.120 +	addr    = local_cpu_data->ptce_base;
 636.121 +	count0  = local_cpu_data->ptce_count[0];
 636.122 +	count1  = local_cpu_data->ptce_count[1];
 636.123 +	stride0 = local_cpu_data->ptce_stride[0];
 636.124 +	stride1 = local_cpu_data->ptce_stride[1];
 636.125 +
 636.126 +	local_irq_save(flags);
 636.127 +	for (i = 0; i < count0; ++i) {
 636.128 +		for (j = 0; j < count1; ++j) {
 636.129 +			ia64_ptce(addr);
 636.130 +			addr += stride1;
 636.131 +		}
 636.132 +		addr += stride0;
 636.133 +	}
 636.134 +	local_irq_restore(flags);
 636.135 +	ia64_srlz_i();			/* srlz.i implies srlz.d */
 636.136 +}
 636.137 +EXPORT_SYMBOL(local_flush_tlb_all);
 636.138 +
 636.139 +void
 636.140 +flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
 636.141 +{
 636.142 +#ifdef XEN
 636.143 +printf("flush_tlb_range: called, not implemented\n");
 636.144 +#else
 636.145 +	struct mm_struct *mm = vma->vm_mm;
 636.146 +	unsigned long size = end - start;
 636.147 +	unsigned long nbits;
 636.148 +
 636.149 +	if (mm != current->active_mm) {
 636.150 +		/* this does happen, but perhaps it's not worth optimizing for? */
 636.151 +#ifdef CONFIG_SMP
 636.152 +		flush_tlb_all();
 636.153 +#else
 636.154 +		mm->context = 0;
 636.155 +#endif
 636.156 +		return;
 636.157 +	}
 636.158 +
 636.159 +	nbits = ia64_fls(size + 0xfff);
 636.160 +	while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
 636.161 +		++nbits;
 636.162 +	if (nbits > purge.max_bits)
 636.163 +		nbits = purge.max_bits;
 636.164 +	start &= ~((1UL << nbits) - 1);
 636.165 +
 636.166 +# ifdef CONFIG_SMP
 636.167 +	platform_global_tlb_purge(start, end, nbits);
 636.168 +# else
 636.169 +	do {
 636.170 +		ia64_ptcl(start, (nbits<<2));
 636.171 +		start += (1UL << nbits);
 636.172 +	} while (start < end);
 636.173 +# endif
 636.174 +
 636.175 +	ia64_srlz_i();			/* srlz.i implies srlz.d */
 636.176 +#endif
 636.177 +}
 636.178 +EXPORT_SYMBOL(flush_tlb_range);
 636.179 +
 636.180 +void __devinit
 636.181 +ia64_tlb_init (void)
 636.182 +{
 636.183 +	ia64_ptce_info_t ptce_info;
 636.184 +	unsigned long tr_pgbits;
 636.185 +	long status;
 636.186 +
 636.187 +	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
 636.188 +		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
 636.189 +		       "defaulting to architected purge page-sizes.\n", status);
 636.190 +		purge.mask = 0x115557000UL;
 636.191 +	}
 636.192 +	purge.max_bits = ia64_fls(purge.mask);
 636.193 +
 636.194 +	ia64_get_ptce(&ptce_info);
 636.195 +	local_cpu_data->ptce_base = ptce_info.base;
 636.196 +	local_cpu_data->ptce_count[0] = ptce_info.count[0];
 636.197 +	local_cpu_data->ptce_count[1] = ptce_info.count[1];
 636.198 +	local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
 636.199 +	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
 636.200 +
 636.201 +	local_flush_tlb_all();		/* nuke left overs from bootstrapping... */
 636.202 +}
   637.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   637.2 +++ b/xen/arch/ia64/linux-xen/unaligned.c	Fri Aug 26 09:05:43 2005 +0000
   637.3 @@ -0,0 +1,1653 @@
   637.4 +/*
   637.5 + * Architecture-specific unaligned trap handling.
   637.6 + *
   637.7 + * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
   637.8 + *	Stephane Eranian <eranian@hpl.hp.com>
   637.9 + *	David Mosberger-Tang <davidm@hpl.hp.com>
  637.10 + *
  637.11 + * 2002/12/09   Fix rotating register handling (off-by-1 error, missing fr-rotation).  Fix
  637.12 + *		get_rse_reg() to not leak kernel bits to user-level (reading an out-of-frame
  637.13 + *		stacked register returns an undefined value; it does NOT trigger a
  637.14 + *		"rsvd register fault").
  637.15 + * 2001/10/11	Fix unaligned access to rotating registers in s/w pipelined loops.
  637.16 + * 2001/08/13	Correct size of extended floats (float_fsz) from 16 to 10 bytes.
  637.17 + * 2001/01/17	Add support emulation of unaligned kernel accesses.
  637.18 + */
  637.19 +#include <linux/kernel.h>
  637.20 +#include <linux/sched.h>
  637.21 +#include <linux/smp_lock.h>
  637.22 +#include <linux/tty.h>
  637.23 +
  637.24 +#include <asm/intrinsics.h>
  637.25 +#include <asm/processor.h>
  637.26 +#include <asm/rse.h>
  637.27 +#include <asm/uaccess.h>
  637.28 +#include <asm/unaligned.h>
  637.29 +
  637.30 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
  637.31 +
  637.32 +#undef DEBUG_UNALIGNED_TRAP
  637.33 +
  637.34 +#ifdef DEBUG_UNALIGNED_TRAP
  637.35 +# define DPRINT(a...)	do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0)
  637.36 +# define DDUMP(str,vp,len)	dump(str, vp, len)
  637.37 +
  637.38 +static void
  637.39 +dump (const char *str, void *vp, size_t len)
  637.40 +{
  637.41 +	unsigned char *cp = vp;
  637.42 +	int i;
  637.43 +
  637.44 +	printk("%s", str);
  637.45 +	for (i = 0; i < len; ++i)
  637.46 +		printk (" %02x", *cp++);
  637.47 +	printk("\n");
  637.48 +}
  637.49 +#else
  637.50 +# define DPRINT(a...)
  637.51 +# define DDUMP(str,vp,len)
  637.52 +#endif
  637.53 +
  637.54 +#define IA64_FIRST_STACKED_GR	32
  637.55 +#define IA64_FIRST_ROTATING_FR	32
  637.56 +#define SIGN_EXT9		0xffffffffffffff00ul
  637.57 +
  637.58 +/*
  637.59 + * For M-unit:
  637.60 + *
  637.61 + *  opcode |   m  |   x6    |
  637.62 + * --------|------|---------|
  637.63 + * [40-37] | [36] | [35:30] |
  637.64 + * --------|------|---------|
  637.65 + *     4   |   1  |    6    | = 11 bits
  637.66 + * --------------------------
  637.67 + * However bits [31:30] are not directly useful to distinguish between
  637.68 + * load/store so we can use [35:32] instead, which gives the following
  637.69 + * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
  637.70 + * checking the m-bit until later in the load/store emulation.
  637.71 + */
  637.72 +#define IA64_OPCODE_MASK	0x1ef
  637.73 +#define IA64_OPCODE_SHIFT	32
  637.74 +
  637.75 +/*
  637.76 + * Table C-28 Integer Load/Store
  637.77 + *
  637.78 + * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
  637.79 + *
  637.80 + * ld8.fill, st8.fill  MUST be aligned because the RNATs are based on
  637.81 + * the address (bits [8:3]), so we must failed.
  637.82 + */
  637.83 +#define LD_OP            0x080
  637.84 +#define LDS_OP           0x081
  637.85 +#define LDA_OP           0x082
  637.86 +#define LDSA_OP          0x083
  637.87 +#define LDBIAS_OP        0x084
  637.88 +#define LDACQ_OP         0x085
  637.89 +/* 0x086, 0x087 are not relevant */
  637.90 +#define LDCCLR_OP        0x088
  637.91 +#define LDCNC_OP         0x089
  637.92 +#define LDCCLRACQ_OP     0x08a
  637.93 +#define ST_OP            0x08c
  637.94 +#define STREL_OP         0x08d
  637.95 +/* 0x08e,0x8f are not relevant */
  637.96 +
  637.97 +/*
  637.98 + * Table C-29 Integer Load +Reg
  637.99 + *
 637.100 + * we use the ld->m (bit [36:36]) field to determine whether or not we have
 637.101 + * a load/store of this form.
 637.102 + */
 637.103 +
 637.104 +/*
 637.105 + * Table C-30 Integer Load/Store +Imm
 637.106 + *
 637.107 + * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
 637.108 + *
 637.109 + * ld8.fill, st8.fill  must be aligned because the Nat register are based on
 637.110 + * the address, so we must fail and the program must be fixed.
 637.111 + */
 637.112 +#define LD_IMM_OP            0x0a0
 637.113 +#define LDS_IMM_OP           0x0a1
 637.114 +#define LDA_IMM_OP           0x0a2
 637.115 +#define LDSA_IMM_OP          0x0a3
 637.116 +#define LDBIAS_IMM_OP        0x0a4
 637.117 +#define LDACQ_IMM_OP         0x0a5
 637.118 +/* 0x0a6, 0xa7 are not relevant */
 637.119 +#define LDCCLR_IMM_OP        0x0a8
 637.120 +#define LDCNC_IMM_OP         0x0a9
 637.121 +#define LDCCLRACQ_IMM_OP     0x0aa
 637.122 +#define ST_IMM_OP            0x0ac
 637.123 +#define STREL_IMM_OP         0x0ad
 637.124 +/* 0x0ae,0xaf are not relevant */
 637.125 +
 637.126 +/*
 637.127 + * Table C-32 Floating-point Load/Store
 637.128 + */
 637.129 +#define LDF_OP           0x0c0
 637.130 +#define LDFS_OP          0x0c1
 637.131 +#define LDFA_OP          0x0c2
 637.132 +#define LDFSA_OP         0x0c3
 637.133 +/* 0x0c6 is irrelevant */
 637.134 +#define LDFCCLR_OP       0x0c8
 637.135 +#define LDFCNC_OP        0x0c9
 637.136 +/* 0x0cb is irrelevant  */
 637.137 +#define STF_OP           0x0cc
 637.138 +
 637.139 +/*
 637.140 + * Table C-33 Floating-point Load +Reg
 637.141 + *
 637.142 + * we use the ld->m (bit [36:36]) field to determine whether or not we have
 637.143 + * a load/store of this form.
 637.144 + */
 637.145 +
 637.146 +/*
 637.147 + * Table C-34 Floating-point Load/Store +Imm
 637.148 + */
 637.149 +#define LDF_IMM_OP       0x0e0
 637.150 +#define LDFS_IMM_OP      0x0e1
 637.151 +#define LDFA_IMM_OP      0x0e2
 637.152 +#define LDFSA_IMM_OP     0x0e3
 637.153 +/* 0x0e6 is irrelevant */
 637.154 +#define LDFCCLR_IMM_OP   0x0e8
 637.155 +#define LDFCNC_IMM_OP    0x0e9
 637.156 +#define STF_IMM_OP       0x0ec
 637.157 +
 637.158 +typedef struct {
 637.159 +	unsigned long	 qp:6;	/* [0:5]   */
 637.160 +	unsigned long    r1:7;	/* [6:12]  */
 637.161 +	unsigned long   imm:7;	/* [13:19] */
 637.162 +	unsigned long    r3:7;	/* [20:26] */
 637.163 +	unsigned long     x:1;  /* [27:27] */
 637.164 +	unsigned long  hint:2;	/* [28:29] */
 637.165 +	unsigned long x6_sz:2;	/* [30:31] */
 637.166 +	unsigned long x6_op:4;	/* [32:35], x6 = x6_sz|x6_op */
 637.167 +	unsigned long     m:1;	/* [36:36] */
 637.168 +	unsigned long    op:4;	/* [37:40] */
 637.169 +	unsigned long   pad:23; /* [41:63] */
 637.170 +} load_store_t;
 637.171 +
 637.172 +
 637.173 +typedef enum {
 637.174 +	UPD_IMMEDIATE,	/* ldXZ r1=[r3],imm(9) */
 637.175 +	UPD_REG		/* ldXZ r1=[r3],r2     */
 637.176 +} update_t;
 637.177 +
 637.178 +/*
 637.179 + * We use tables to keep track of the offsets of registers in the saved state.
 637.180 + * This way we save having big switch/case statements.
 637.181 + *
 637.182 + * We use bit 0 to indicate switch_stack or pt_regs.
 637.183 + * The offset is simply shifted by 1 bit.
 637.184 + * A 2-byte value should be enough to hold any kind of offset
 637.185 + *
 637.186 + * In case the calling convention changes (and thus pt_regs/switch_stack)
 637.187 + * simply use RSW instead of RPT or vice-versa.
 637.188 + */
 637.189 +
 637.190 +#define RPO(x)	((size_t) &((struct pt_regs *)0)->x)
 637.191 +#define RSO(x)	((size_t) &((struct switch_stack *)0)->x)
 637.192 +
 637.193 +#define RPT(x)		(RPO(x) << 1)
 637.194 +#define RSW(x)		(1| RSO(x)<<1)
 637.195 +
 637.196 +#define GR_OFFS(x)	(gr_info[x]>>1)
 637.197 +#define GR_IN_SW(x)	(gr_info[x] & 0x1)
 637.198 +
 637.199 +#define FR_OFFS(x)	(fr_info[x]>>1)
 637.200 +#define FR_IN_SW(x)	(fr_info[x] & 0x1)
 637.201 +
 637.202 +static u16 gr_info[32]={
 637.203 +	0,			/* r0 is read-only : WE SHOULD NEVER GET THIS */
 637.204 +
 637.205 +	RPT(r1), RPT(r2), RPT(r3),
 637.206 +
 637.207 +#ifdef  CONFIG_VTI
 637.208 +	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
 637.209 +#else   //CONFIG_VTI
 637.210 +	RSW(r4), RSW(r5), RSW(r6), RSW(r7),
 637.211 +#endif  //CONFIG_VTI
 637.212 +
 637.213 +	RPT(r8), RPT(r9), RPT(r10), RPT(r11),
 637.214 +	RPT(r12), RPT(r13), RPT(r14), RPT(r15),
 637.215 +
 637.216 +	RPT(r16), RPT(r17), RPT(r18), RPT(r19),
 637.217 +	RPT(r20), RPT(r21), RPT(r22), RPT(r23),
 637.218 +	RPT(r24), RPT(r25), RPT(r26), RPT(r27),
 637.219 +	RPT(r28), RPT(r29), RPT(r30), RPT(r31)
 637.220 +};
 637.221 +
 637.222 +static u16 fr_info[32]={
 637.223 +	0,			/* constant : WE SHOULD NEVER GET THIS */
 637.224 +	0,			/* constant : WE SHOULD NEVER GET THIS */
 637.225 +
 637.226 +	RSW(f2), RSW(f3), RSW(f4), RSW(f5),
 637.227 +
 637.228 +	RPT(f6), RPT(f7), RPT(f8), RPT(f9),
 637.229 +	RPT(f10), RPT(f11),
 637.230 +
 637.231 +	RSW(f12), RSW(f13), RSW(f14),
 637.232 +	RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
 637.233 +	RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
 637.234 +	RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
 637.235 +	RSW(f30), RSW(f31)
 637.236 +};
 637.237 +
 637.238 +/* Invalidate ALAT entry for integer register REGNO.  */
 637.239 +static void
 637.240 +invala_gr (int regno)
 637.241 +{
 637.242 +#	define F(reg)	case reg: ia64_invala_gr(reg); break
 637.243 +
 637.244 +	switch (regno) {
 637.245 +		F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
 637.246 +		F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
 637.247 +		F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
 637.248 +		F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
 637.249 +		F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
 637.250 +		F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
 637.251 +		F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
 637.252 +		F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
 637.253 +		F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
 637.254 +		F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
 637.255 +		F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
 637.256 +		F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
 637.257 +		F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
 637.258 +		F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
 637.259 +		F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
 637.260 +		F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
 637.261 +	}
 637.262 +#	undef F
 637.263 +}
 637.264 +
 637.265 +/* Invalidate ALAT entry for floating-point register REGNO.  */
 637.266 +static void
 637.267 +invala_fr (int regno)
 637.268 +{
 637.269 +#	define F(reg)	case reg: ia64_invala_fr(reg); break
 637.270 +
 637.271 +	switch (regno) {
 637.272 +		F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
 637.273 +		F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
 637.274 +		F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
 637.275 +		F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
 637.276 +		F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
 637.277 +		F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
 637.278 +		F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
 637.279 +		F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
 637.280 +		F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
 637.281 +		F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
 637.282 +		F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
 637.283 +		F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
 637.284 +		F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
 637.285 +		F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
 637.286 +		F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
 637.287 +		F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
 637.288 +	}
 637.289 +#	undef F
 637.290 +}
 637.291 +
 637.292 +static inline unsigned long
 637.293 +rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
 637.294 +{
 637.295 +	reg += rrb;
 637.296 +	if (reg >= sor)
 637.297 +		reg -= sor;
 637.298 +	return reg;
 637.299 +}
 637.300 +
 637.301 +#ifdef CONFIG_VTI
 637.302 +static void
 637.303 +set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
 637.304 +{
 637.305 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.306 +	unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
 637.307 +	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
 637.308 +	unsigned long rnats, nat_mask;
 637.309 +    unsigned long old_rsc,new_rsc;
 637.310 +	unsigned long on_kbs,rnat;
 637.311 +	long sof = (regs->cr_ifs) & 0x7f;
 637.312 +	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
 637.313 +	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
 637.314 +	long ridx = r1 - 32;
 637.315 +
 637.316 +	if (ridx >= sof) {
 637.317 +		/* this should never happen, as the "rsvd register fault" has higher priority */
 637.318 +		DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
 637.319 +		return;
 637.320 +	}
 637.321 +
 637.322 +	if (ridx < sor)
 637.323 +		ridx = rotate_reg(sor, rrb_gr, ridx);
 637.324 +
 637.325 +    old_rsc=ia64_get_rsc();
 637.326 +    new_rsc=old_rsc&(~0x3);
 637.327 +    ia64_set_rsc(new_rsc);
 637.328 +
 637.329 +    bspstore = ia64_get_bspstore();
 637.330 +    bsp =kbs + (regs->loadrs >> 19);//16+3
 637.331 +
 637.332 +	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
 637.333 +    nat_mask = 1UL << ia64_rse_slot_num(addr);
 637.334 +	rnat_addr = ia64_rse_rnat_addr(addr);
 637.335 +
 637.336 +    if(addr >= bspstore){
 637.337 +
 637.338 +        ia64_flushrs ();
 637.339 +        ia64_mf ();
 637.340 +		*addr = val;
 637.341 +        bspstore = ia64_get_bspstore();
 637.342 +    	rnat = ia64_get_rnat ();
 637.343 +        if(bspstore < rnat_addr){
 637.344 +            rnat=rnat&(~nat_mask);
 637.345 +        }else{
 637.346 +            *rnat_addr = (*rnat_addr)&(~nat_mask);
 637.347 +        }
 637.348 +        ia64_mf();
 637.349 +        ia64_loadrs();
 637.350 +        ia64_set_rnat(rnat);
 637.351 +    }else{
 637.352 +
 637.353 +    	rnat = ia64_get_rnat ();
 637.354 +		*addr = val;
 637.355 +        if(bspstore < rnat_addr){
 637.356 +            rnat=rnat&(~nat_mask);
 637.357 +        }else{
 637.358 +            *rnat_addr = (*rnat_addr)&(~nat_mask);
 637.359 +        }
 637.360 +        ia64_set_bspstore (bspstore);
 637.361 +        ia64_set_rnat(rnat);
 637.362 +    }
 637.363 +    ia64_set_rsc(old_rsc);
 637.364 +}
 637.365 +
 637.366 +
 637.367 +static void
 637.368 +get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
 637.369 +{
 637.370 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.371 +	unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
 637.372 +	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
 637.373 +	unsigned long rnats, nat_mask;
 637.374 +	unsigned long on_kbs;
 637.375 +    unsigned long old_rsc, new_rsc;
 637.376 +	long sof = (regs->cr_ifs) & 0x7f;
 637.377 +	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
 637.378 +	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
 637.379 +	long ridx = r1 - 32;
 637.380 +
 637.381 +	if (ridx >= sof) {
 637.382 +		/* read of out-of-frame register returns an undefined value; 0 in our case.  */
 637.383 +		DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
 637.384 +		panic("wrong stack register number");
 637.385 +	}
 637.386 +
 637.387 +	if (ridx < sor)
 637.388 +		ridx = rotate_reg(sor, rrb_gr, ridx);
 637.389 +
 637.390 +    old_rsc=ia64_get_rsc();
 637.391 +    new_rsc=old_rsc&(~(0x3));
 637.392 +    ia64_set_rsc(new_rsc);
 637.393 +
 637.394 +    bspstore = ia64_get_bspstore();
 637.395 +    bsp =kbs + (regs->loadrs >> 19); //16+3;
 637.396 +
 637.397 +	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
 637.398 +    nat_mask = 1UL << ia64_rse_slot_num(addr);
 637.399 +	rnat_addr = ia64_rse_rnat_addr(addr);
 637.400 +
 637.401 +    if(addr >= bspstore){
 637.402 +
 637.403 +        ia64_flushrs ();
 637.404 +        ia64_mf ();
 637.405 +        bspstore = ia64_get_bspstore();
 637.406 +    }
 637.407 +	*val=*addr;
 637.408 +    if(bspstore < rnat_addr){
 637.409 +        *nat=!!(ia64_get_rnat()&nat_mask);
 637.410 +    }else{
 637.411 +        *nat = !!((*rnat_addr)&nat_mask);
 637.412 +    }
 637.413 +    ia64_set_rsc(old_rsc);
 637.414 +}
 637.415 +#else // CONFIG_VTI
 637.416 +static void
 637.417 +set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
 637.418 +{
 637.419 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.420 +	unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
 637.421 +	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
 637.422 +	unsigned long rnats, nat_mask;
 637.423 +	unsigned long on_kbs;
 637.424 +	long sof = (regs->cr_ifs) & 0x7f;
 637.425 +	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
 637.426 +	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
 637.427 +	long ridx = r1 - 32;
 637.428 +
 637.429 +	if (ridx >= sof) {
 637.430 +		/* this should never happen, as the "rsvd register fault" has higher priority */
 637.431 +		DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
 637.432 +		return;
 637.433 +	}
 637.434 +
 637.435 +	if (ridx < sor)
 637.436 +		ridx = rotate_reg(sor, rrb_gr, ridx);
 637.437 +
 637.438 +	DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
 637.439 +	       r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
 637.440 +
 637.441 +	on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
 637.442 +	addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
 637.443 +	if (addr >= kbs) {
 637.444 +		/* the register is on the kernel backing store: easy... */
 637.445 +		rnat_addr = ia64_rse_rnat_addr(addr);
 637.446 +		if ((unsigned long) rnat_addr >= sw->ar_bspstore)
 637.447 +			rnat_addr = &sw->ar_rnat;
 637.448 +		nat_mask = 1UL << ia64_rse_slot_num(addr);
 637.449 +
 637.450 +		*addr = val;
 637.451 +		if (nat)
 637.452 +			*rnat_addr |=  nat_mask;
 637.453 +		else
 637.454 +			*rnat_addr &= ~nat_mask;
 637.455 +		return;
 637.456 +	}
 637.457 +
 637.458 +	if (!user_stack(current, regs)) {
 637.459 +		DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1);
 637.460 +		return;
 637.461 +	}
 637.462 +
 637.463 +	bspstore = (unsigned long *)regs->ar_bspstore;
 637.464 +	ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
 637.465 +	bsp     = ia64_rse_skip_regs(ubs_end, -sof);
 637.466 +	addr    = ia64_rse_skip_regs(bsp, ridx);
 637.467 +
 637.468 +	DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
 637.469 +
 637.470 +	ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
 637.471 +
 637.472 +	rnat_addr = ia64_rse_rnat_addr(addr);
 637.473 +
 637.474 +	ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
 637.475 +	DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
 637.476 +	       (void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1);
 637.477 +
 637.478 +	nat_mask = 1UL << ia64_rse_slot_num(addr);
 637.479 +	if (nat)
 637.480 +		rnats |=  nat_mask;
 637.481 +	else
 637.482 +		rnats &= ~nat_mask;
 637.483 +	ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
 637.484 +
 637.485 +	DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
 637.486 +}
 637.487 +
 637.488 +
 637.489 +static void
 637.490 +get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat)
 637.491 +{
 637.492 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.493 +	unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
 637.494 +	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
 637.495 +	unsigned long rnats, nat_mask;
 637.496 +	unsigned long on_kbs;
 637.497 +	long sof = (regs->cr_ifs) & 0x7f;
 637.498 +	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
 637.499 +	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
 637.500 +	long ridx = r1 - 32;
 637.501 +
 637.502 +	if (ridx >= sof) {
 637.503 +		/* read of out-of-frame register returns an undefined value; 0 in our case.  */
 637.504 +		DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
 637.505 +		goto fail;
 637.506 +	}
 637.507 +
 637.508 +	if (ridx < sor)
 637.509 +		ridx = rotate_reg(sor, rrb_gr, ridx);
 637.510 +
 637.511 +	DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
 637.512 +	       r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
 637.513 +
 637.514 +	on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
 637.515 +	addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
 637.516 +	if (addr >= kbs) {
 637.517 +		/* the register is on the kernel backing store: easy... */
 637.518 +		*val = *addr;
 637.519 +		if (nat) {
 637.520 +			rnat_addr = ia64_rse_rnat_addr(addr);
 637.521 +			if ((unsigned long) rnat_addr >= sw->ar_bspstore)
 637.522 +				rnat_addr = &sw->ar_rnat;
 637.523 +			nat_mask = 1UL << ia64_rse_slot_num(addr);
 637.524 +			*nat = (*rnat_addr & nat_mask) != 0;
 637.525 +		}
 637.526 +		return;
 637.527 +	}
 637.528 +
 637.529 +	if (!user_stack(current, regs)) {
 637.530 +		DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);
 637.531 +		goto fail;
 637.532 +	}
 637.533 +
 637.534 +	bspstore = (unsigned long *)regs->ar_bspstore;
 637.535 +	ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
 637.536 +	bsp     = ia64_rse_skip_regs(ubs_end, -sof);
 637.537 +	addr    = ia64_rse_skip_regs(bsp, ridx);
 637.538 +
 637.539 +	DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
 637.540 +
 637.541 +	ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
 637.542 +
 637.543 +	if (nat) {
 637.544 +		rnat_addr = ia64_rse_rnat_addr(addr);
 637.545 +		nat_mask = 1UL << ia64_rse_slot_num(addr);
 637.546 +
 637.547 +		DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
 637.548 +
 637.549 +		ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
 637.550 +		*nat = (rnats & nat_mask) != 0;
 637.551 +	}
 637.552 +	return;
 637.553 +
 637.554 +  fail:
 637.555 +	*val = 0;
 637.556 +	if (nat)
 637.557 +		*nat = 0;
 637.558 +	return;
 637.559 +}
 637.560 +#endif // CONFIG_VTI
 637.561 +
 637.562 +
 637.563 +#ifdef XEN
 637.564 +void
 637.565 +#else
 637.566 +static void
 637.567 +#endif
 637.568 +setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
 637.569 +{
 637.570 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.571 +	unsigned long addr;
 637.572 +	unsigned long bitmask;
 637.573 +	unsigned long *unat;
 637.574 +
 637.575 +	/*
 637.576 +	 * First takes care of stacked registers
 637.577 +	 */
 637.578 +	if (regnum >= IA64_FIRST_STACKED_GR) {
 637.579 +		set_rse_reg(regs, regnum, val, nat);
 637.580 +		return;
 637.581 +	}
 637.582 +
 637.583 +	/*
 637.584 +	 * Using r0 as a target raises a General Exception fault which has higher priority
 637.585 +	 * than the Unaligned Reference fault.
 637.586 +	 */
 637.587 +
 637.588 +	/*
 637.589 +	 * Now look at registers in [0-31] range and init correct UNAT
 637.590 +	 */
 637.591 +	if (GR_IN_SW(regnum)) {
 637.592 +		addr = (unsigned long)sw;
 637.593 +		unat = &sw->ar_unat;
 637.594 +	} else {
 637.595 +		addr = (unsigned long)regs;
 637.596 +#ifdef CONFIG_VTI
 637.597 +		unat = &regs->eml_unat;
 637.598 +#else //CONFIG_VTI
 637.599 +		unat = &sw->caller_unat;
 637.600 +#endif  //CONFIG_VTI
 637.601 +	}
 637.602 +	DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
 637.603 +	       addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
 637.604 +	/*
 637.605 +	 * add offset from base of struct
 637.606 +	 * and do it !
 637.607 +	 */
 637.608 +	addr += GR_OFFS(regnum);
 637.609 +
 637.610 +	*(unsigned long *)addr = val;
 637.611 +
 637.612 +	/*
 637.613 +	 * We need to clear the corresponding UNAT bit to fully emulate the load
 637.614 +	 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
 637.615 +	 */
 637.616 +	bitmask   = 1UL << (addr >> 3 & 0x3f);
 637.617 +	DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat);
 637.618 +	if (nat) {
 637.619 +		*unat |= bitmask;
 637.620 +	} else {
 637.621 +		*unat &= ~bitmask;
 637.622 +	}
 637.623 +	DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat);
 637.624 +}
 637.625 +
 637.626 +/*
 637.627 + * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the
 637.628 + * range from 32-127, result is in the range from 0-95.
 637.629 + */
 637.630 +static inline unsigned long
 637.631 +fph_index (struct pt_regs *regs, long regnum)
 637.632 +{
 637.633 +	unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
 637.634 +	return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
 637.635 +}
 637.636 +
 637.637 +static void
 637.638 +setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
 637.639 +{
 637.640 +	struct switch_stack *sw = (struct switch_stack *)regs - 1;
 637.641 +	unsigned long addr;
 637.642 +
 637.643 +	/*
 637.644 +	 * From EAS-2.5: FPDisableFault has higher priority than Unaligned
 637.645 +	 * Fault. Thus, when we get here, we know the partition is enabled.
 637.646 +	 * To update f32-f127, there are three choices:
 637.647 +	 *
 637.648 +	 *	(1) save f32-f127 to thread.fph and update the values there
 637.649 +	 *	(2) use a gigantic switch statement to directly access the registers
 637.650 +	 *	(3) generate code on the fly to update the desired register
 637.651 +	 *
 637.652 +	 * For now, we are using approach (1).
 637.653 +	 */
 637.654 +	if (regnum >= IA64_FIRST_ROTATING_FR) {
 637.655 +		ia64_sync_fph(current);
 637.656 +#ifdef XEN
 637.657 +		current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
 637.658 +#else
 637.659 +		current->thread.fph[fph_index(regs, regnum)] = *fpval;
 637.660 +#endif
 637.661 +	} else {
 637.662 +		/*
 637.663 +		 * pt_regs or switch_stack ?
 637.664 +		 */
 637.665 +		if (FR_IN_SW(regnum)) {
 637.666 +			addr = (unsigned long)sw;
 637.667 +		} else {
 637.668 +			addr = (unsigned long)regs;
 637.669 +		}
 637.670 +
 637.671 +		DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
 637.672 +
 637.673 +		addr += FR_OFFS(regnum);
 637.674 +		*(struct ia64_fpreg *)addr = *fpval;
 637.675 +
 637.676 +		/*
 637.677 +		 * mark the low partition as being used now
 637.678 +		 *
 637.679 +		 * It is highly unlikely that this bit is not already set, but
 637.680 +		 * let's do it for safety.
 637.681 +		 */
 637.682 +		regs->cr_ipsr |= IA64_PSR_MFL;
 637.683 +	}
 637.684 +}
 637.685 +
 637.686 +/*
 637.687 + * Those 2 inline functions generate the spilled versions of the constant floating point
 637.688 + * registers which can be used with stfX
 637.689 + */
 637.690 +static inline void
 637.691 +float_spill_f0 (struct ia64_fpreg *final)
 637.692 +{
 637.693 +	ia64_stf_spill(final, 0);
 637.694 +}
 637.695 +
 637.696 +static inline void
 637.697 +float_spill_f1 (struct ia64_fpreg *final)
 637.698 +{
 637.699 +	ia64_stf_spill(final, 1);
 637.700 +}
 637.701 +
 637.702 +static void
 637.703 +getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
 637.704 +{
 637.705 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.706 +	unsigned long addr;
 637.707 +
 637.708 +	/*
 637.709 +	 * From EAS-2.5: FPDisableFault has higher priority than
 637.710 +	 * Unaligned Fault. Thus, when we get here, we know the partition is
 637.711 +	 * enabled.
 637.712 +	 *
 637.713 +	 * When regnum > 31, the register is still live and we need to force a save
 637.714 +	 * to current->thread.fph to get access to it.  See discussion in setfpreg()
 637.715 +	 * for reasons and other ways of doing this.
 637.716 +	 */
 637.717 +	if (regnum >= IA64_FIRST_ROTATING_FR) {
 637.718 +		ia64_flush_fph(current);
 637.719 +#ifdef XEN
 637.720 +		*fpval = current->arch._thread.fph[fph_index(regs, regnum)];
 637.721 +#else
 637.722 +		*fpval = current->thread.fph[fph_index(regs, regnum)];
 637.723 +#endif
 637.724 +	} else {
 637.725 +		/*
 637.726 +		 * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
 637.727 +		 * not saved, we must generate their spilled form on the fly
 637.728 +		 */
 637.729 +		switch(regnum) {
 637.730 +		case 0:
 637.731 +			float_spill_f0(fpval);
 637.732 +			break;
 637.733 +		case 1:
 637.734 +			float_spill_f1(fpval);
 637.735 +			break;
 637.736 +		default:
 637.737 +			/*
 637.738 +			 * pt_regs or switch_stack ?
 637.739 +			 */
 637.740 +			addr =  FR_IN_SW(regnum) ? (unsigned long)sw
 637.741 +						 : (unsigned long)regs;
 637.742 +
 637.743 +			DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
 637.744 +			       FR_IN_SW(regnum), addr, FR_OFFS(regnum));
 637.745 +
 637.746 +			addr  += FR_OFFS(regnum);
 637.747 +			*fpval = *(struct ia64_fpreg *)addr;
 637.748 +		}
 637.749 +	}
 637.750 +}
 637.751 +
 637.752 +
 637.753 +#ifdef XEN
 637.754 +void
 637.755 +#else
 637.756 +static void
 637.757 +#endif
 637.758 +getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
 637.759 +{
 637.760 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
 637.761 +	unsigned long addr, *unat;
 637.762 +
 637.763 +	if (regnum >= IA64_FIRST_STACKED_GR) {
 637.764 +		get_rse_reg(regs, regnum, val, nat);
 637.765 +		return;
 637.766 +	}
 637.767 +
 637.768 +	/*
 637.769 +	 * take care of r0 (read-only always evaluate to 0)
 637.770 +	 */
 637.771 +	if (regnum == 0) {
 637.772 +		*val = 0;
 637.773 +		if (nat)
 637.774 +			*nat = 0;
 637.775 +		return;
 637.776 +	}
 637.777 +
 637.778 +	/*
 637.779 +	 * Now look at registers in [0-31] range and init correct UNAT
 637.780 +	 */
 637.781 +	if (GR_IN_SW(regnum)) {
 637.782 +		addr = (unsigned long)sw;
 637.783 +		unat = &sw->ar_unat;
 637.784 +	} else {
 637.785 +		addr = (unsigned long)regs;
 637.786 +#ifdef  CONFIG_VTI
 637.787 +		unat = &regs->eml_unat;;
 637.788 +#else   //CONFIG_VTI
 637.789 +		unat = &sw->caller_unat;
 637.790 +#endif  //CONFIG_VTI
 637.791 +	}
 637.792 +
 637.793 +	DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
 637.794 +
 637.795 +	addr += GR_OFFS(regnum);
 637.796 +
 637.797 +	*val  = *(unsigned long *)addr;
 637.798 +
 637.799 +	/*
 637.800 +	 * do it only when requested
 637.801 +	 */
 637.802 +	if (nat)
 637.803 +		*nat  = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
 637.804 +}
 637.805 +
 637.806 +static void
 637.807 +emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
 637.808 +{
 637.809 +	/*
 637.810 +	 * IMPORTANT:
 637.811 +	 * Given the way we handle unaligned speculative loads, we should
 637.812 +	 * not get to this point in the code but we keep this sanity check,
 637.813 +	 * just in case.
 637.814 +	 */
 637.815 +	if (ld.x6_op == 1 || ld.x6_op == 3) {
 637.816 +		printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__);
 637.817 +		die_if_kernel("unaligned reference on speculative load with register update\n",
 637.818 +			      regs, 30);
 637.819 +	}
 637.820 +
 637.821 +
 637.822 +	/*
 637.823 +	 * at this point, we know that the base register to update is valid i.e.,
 637.824 +	 * it's not r0
 637.825 +	 */
 637.826 +	if (type == UPD_IMMEDIATE) {
 637.827 +		unsigned long imm;
 637.828 +
 637.829 +		/*
 637.830 +		 * Load +Imm: ldXZ r1=[r3],imm(9)
 637.831 +		 *
 637.832 +		 *
 637.833 +		 * form imm9: [13:19] contain the first 7 bits
 637.834 +		 */
 637.835 +		imm = ld.x << 7 | ld.imm;
 637.836 +
 637.837 +		/*
 637.838 +		 * sign extend (1+8bits) if m set
 637.839 +		 */
 637.840 +		if (ld.m) imm |= SIGN_EXT9;
 637.841 +
 637.842 +		/*
 637.843 +		 * ifa == r3 and we know that the NaT bit on r3 was clear so
 637.844 +		 * we can directly use ifa.
 637.845 +		 */
 637.846 +		ifa += imm;
 637.847 +
 637.848 +		setreg(ld.r3, ifa, 0, regs);
 637.849 +
 637.850 +		DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa);
 637.851 +
 637.852 +	} else if (ld.m) {
 637.853 +		unsigned long r2;
 637.854 +		int nat_r2;
 637.855 +
 637.856 +		/*
 637.857 +		 * Load +Reg Opcode: ldXZ r1=[r3],r2
 637.858 +		 *
 637.859 +		 * Note: that we update r3 even in the case of ldfX.a
 637.860 +		 * (where the load does not happen)
 637.861 +		 *
 637.862 +		 * The way the load algorithm works, we know that r3 does not
 637.863 +		 * have its NaT bit set (would have gotten NaT consumption
 637.864 +		 * before getting the unaligned fault). So we can use ifa
 637.865 +		 * which equals r3 at this point.
 637.866 +		 *
 637.867 +		 * IMPORTANT:
 637.868 +		 * The above statement holds ONLY because we know that we
 637.869 +		 * never reach this code when trying to do a ldX.s.
 637.870 +		 * If we ever make it to here on an ldfX.s then
 637.871 +		 */
 637.872 +		getreg(ld.imm, &r2, &nat_r2, regs);
 637.873 +
 637.874 +		ifa += r2;
 637.875 +
 637.876 +		/*
 637.877 +		 * propagate Nat r2 -> r3
 637.878 +		 */
 637.879 +		setreg(ld.r3, ifa, nat_r2, regs);
 637.880 +
 637.881 +		DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2);
 637.882 +	}
 637.883 +}
 637.884 +
 637.885 +
 637.886 +static int
 637.887 +emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
 637.888 +{
 637.889 +	unsigned int len = 1 << ld.x6_sz;
 637.890 +	unsigned long val = 0;
 637.891 +
 637.892 +	/*
 637.893 +	 * r0, as target, doesn't need to be checked because Illegal Instruction
 637.894 +	 * faults have higher priority than unaligned faults.
 637.895 +	 *
 637.896 +	 * r0 cannot be found as the base as it would never generate an
 637.897 +	 * unaligned reference.
 637.898 +	 */
 637.899 +
 637.900 +	/*
 637.901 +	 * ldX.a we will emulate load and also invalidate the ALAT entry.
 637.902 +	 * See comment below for explanation on how we handle ldX.a
 637.903 +	 */
 637.904 +
 637.905 +	if (len != 2 && len != 4 && len != 8) {
 637.906 +		DPRINT("unknown size: x6=%d\n", ld.x6_sz);
 637.907 +		return -1;
 637.908 +	}
 637.909 +	/* this assumes little-endian byte-order: */
 637.910 +	if (copy_from_user(&val, (void __user *) ifa, len))
 637.911 +		return -1;
 637.912 +	setreg(ld.r1, val, 0, regs);
 637.913 +
 637.914 +	/*
 637.915 +	 * check for updates on any kind of loads
 637.916 +	 */
 637.917 +	if (ld.op == 0x5 || ld.m)
 637.918 +		emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
 637.919 +
 637.920 +	/*
 637.921 +	 * handling of various loads (based on EAS2.4):
 637.922 +	 *
 637.923 +	 * ldX.acq (ordered load):
 637.924 +	 *	- acquire semantics would have been used, so force fence instead.
 637.925 +	 *
 637.926 +	 * ldX.c.clr (check load and clear):
 637.927 +	 *	- if we get to this handler, it's because the entry was not in the ALAT.
 637.928 +	 *	  Therefore the operation reverts to a normal load
 637.929 +	 *
 637.930 +	 * ldX.c.nc (check load no clear):
 637.931 +	 *	- same as previous one
 637.932 +	 *
 637.933 +	 * ldX.c.clr.acq (ordered check load and clear):
 637.934 +	 *	- same as above for c.clr part. The load needs to have acquire semantics. So
 637.935 +	 *	  we use the fence semantics which is stronger and thus ensures correctness.
 637.936 +	 *
 637.937 +	 * ldX.a (advanced load):
 637.938 +	 *	- suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
 637.939 +	 *	  address doesn't match requested size alignment. This means that we would
 637.940 +	 *	  possibly need more than one load to get the result.
 637.941 +	 *
 637.942 +	 *	  The load part can be handled just like a normal load, however the difficult
 637.943 +	 *	  part is to get the right thing into the ALAT. The critical piece of information
 637.944 +	 *	  in the base address of the load & size. To do that, a ld.a must be executed,
 637.945 +	 *	  clearly any address can be pushed into the table by using ld1.a r1=[r3]. Now
 637.946 +	 *	  if we use the same target register, we will be okay for the check.a instruction.
 637.947 +	 *	  If we look at the store, basically a stX [r3]=r1 checks the ALAT  for any entry
 637.948 +	 *	  which would overlap within [r3,r3+X] (the size of the load was store in the
 637.949 +	 *	  ALAT). If such an entry is found the entry is invalidated. But this is not good
 637.950 +	 *	  enough, take the following example:
 637.951 +	 *		r3=3
 637.952 +	 *		ld4.a r1=[r3]
 637.953 +	 *
 637.954 +	 *	  Could be emulated by doing:
 637.955 +	 *		ld1.a r1=[r3],1
 637.956 +	 *		store to temporary;
 637.957 +	 *		ld1.a r1=[r3],1
 637.958 +	 *		store & shift to temporary;
 637.959 +	 *		ld1.a r1=[r3],1
 637.960 +	 *		store & shift to temporary;
 637.961 +	 *		ld1.a r1=[r3]
 637.962 +	 *		store & shift to temporary;
 637.963 +	 *		r1=temporary
 637.964 +	 *
 637.965 +	 *	  So in this case, you would get the right value is r1 but the wrong info in
 637.966 +	 *	  the ALAT.  Notice that you could do it in reverse to finish with address 3
 637.967 +	 *	  but you would still get the size wrong.  To get the size right, one needs to
 637.968 +	 *	  execute exactly the same kind of load. You could do it from a aligned
 637.969 +	 *	  temporary location, but you would get the address wrong.
 637.970 +	 *
 637.971 +	 *	  So no matter what, it is not possible to emulate an advanced load
 637.972 +	 *	  correctly. But is that really critical ?
 637.973 +	 *
 637.974 +	 *	  We will always convert ld.a into a normal load with ALAT invalidated.  This
 637.975 +	 *	  will enable compiler to do optimization where certain code path after ld.a
 637.976 +	 *	  is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
 637.977 +	 *
 637.978 +	 *	  If there is a store after the advanced load, one must either do a ld.c.* or
 637.979 +	 *	  chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
 637.980 +	 *	  entry found in ALAT), and that's perfectly ok because:
 637.981 +	 *
 637.982 +	 *		- ld.c.*, if the entry is not present a  normal load is executed
 637.983 +	 *		- chk.a.*, if the entry is not present, execution jumps to recovery code
 637.984 +	 *
 637.985 +	 *	  In either case, the load can be potentially retried in another form.
 637.986 +	 *
 637.987 +	 *	  ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
 637.988 +	 *	  up a stale entry later). The register base update MUST also be performed.
 637.989 +	 */
 637.990 +
 637.991 +	/*
 637.992 +	 * when the load has the .acq completer then
 637.993 +	 * use ordering fence.
 637.994 +	 */
 637.995 +	if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
 637.996 +		mb();
 637.997 +
 637.998 +	/*
 637.999 +	 * invalidate ALAT entry in case of advanced load
637.1000 +	 */
637.1001 +	if (ld.x6_op == 0x2)
637.1002 +		invala_gr(ld.r1);
637.1003 +
637.1004 +	return 0;
637.1005 +}
637.1006 +
637.1007 +static int
637.1008 +emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
637.1009 +{
637.1010 +	unsigned long r2;
637.1011 +	unsigned int len = 1 << ld.x6_sz;
637.1012 +
637.1013 +	/*
637.1014 +	 * if we get to this handler, Nat bits on both r3 and r2 have already
637.1015 +	 * been checked. so we don't need to do it
637.1016 +	 *
637.1017 +	 * extract the value to be stored
637.1018 +	 */
637.1019 +	getreg(ld.imm, &r2, NULL, regs);
637.1020 +
637.1021 +	/*
637.1022 +	 * we rely on the macros in unaligned.h for now i.e.,
637.1023 +	 * we let the compiler figure out how to read memory gracefully.
637.1024 +	 *
637.1025 +	 * We need this switch/case because the way the inline function
637.1026 +	 * works. The code is optimized by the compiler and looks like
637.1027 +	 * a single switch/case.
637.1028 +	 */
637.1029 +	DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
637.1030 +
637.1031 +	if (len != 2 && len != 4 && len != 8) {
637.1032 +		DPRINT("unknown size: x6=%d\n", ld.x6_sz);
637.1033 +		return -1;
637.1034 +	}
637.1035 +
637.1036 +	/* this assumes little-endian byte-order: */
637.1037 +	if (copy_to_user((void __user *) ifa, &r2, len))
637.1038 +		return -1;
637.1039 +
637.1040 +	/*
637.1041 +	 * stX [r3]=r2,imm(9)
637.1042 +	 *
637.1043 +	 * NOTE:
637.1044 +	 * ld.r3 can never be r0, because r0 would not generate an
637.1045 +	 * unaligned access.
637.1046 +	 */
637.1047 +	if (ld.op == 0x5) {
637.1048 +		unsigned long imm;
637.1049 +
637.1050 +		/*
637.1051 +		 * form imm9: [12:6] contain first 7bits
637.1052 +		 */
637.1053 +		imm = ld.x << 7 | ld.r1;
637.1054 +		/*
637.1055 +		 * sign extend (8bits) if m set
637.1056 +		 */
637.1057 +		if (ld.m) imm |= SIGN_EXT9;
637.1058 +		/*
637.1059 +		 * ifa == r3 (NaT is necessarily cleared)
637.1060 +		 */
637.1061 +		ifa += imm;
637.1062 +
637.1063 +		DPRINT("imm=%lx r3=%lx\n", imm, ifa);
637.1064 +
637.1065 +		setreg(ld.r3, ifa, 0, regs);
637.1066 +	}
637.1067 +	/*
637.1068 +	 * we don't have alat_invalidate_multiple() so we need
637.1069 +	 * to do the complete flush :-<<
637.1070 +	 */
637.1071 +	ia64_invala();
637.1072 +
637.1073 +	/*
637.1074 +	 * stX.rel: use fence instead of release
637.1075 +	 */
637.1076 +	if (ld.x6_op == 0xd)
637.1077 +		mb();
637.1078 +
637.1079 +	return 0;
637.1080 +}
637.1081 +
637.1082 +/*
637.1083 + * floating point operations sizes in bytes
637.1084 + */
637.1085 +static const unsigned char float_fsz[4]={
637.1086 +	10, /* extended precision (e) */
637.1087 +	8,  /* integer (8)            */
637.1088 +	4,  /* single precision (s)   */
637.1089 +	8   /* double precision (d)   */
637.1090 +};
637.1091 +
637.1092 +static inline void
637.1093 +mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1094 +{
637.1095 +	ia64_ldfe(6, init);
637.1096 +	ia64_stop();
637.1097 +	ia64_stf_spill(final, 6);
637.1098 +}
637.1099 +
637.1100 +static inline void
637.1101 +mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1102 +{
637.1103 +	ia64_ldf8(6, init);
637.1104 +	ia64_stop();
637.1105 +	ia64_stf_spill(final, 6);
637.1106 +}
637.1107 +
637.1108 +static inline void
637.1109 +mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1110 +{
637.1111 +	ia64_ldfs(6, init);
637.1112 +	ia64_stop();
637.1113 +	ia64_stf_spill(final, 6);
637.1114 +}
637.1115 +
637.1116 +static inline void
637.1117 +mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1118 +{
637.1119 +	ia64_ldfd(6, init);
637.1120 +	ia64_stop();
637.1121 +	ia64_stf_spill(final, 6);
637.1122 +}
637.1123 +
637.1124 +static inline void
637.1125 +float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1126 +{
637.1127 +	ia64_ldf_fill(6, init);
637.1128 +	ia64_stop();
637.1129 +	ia64_stfe(final, 6);
637.1130 +}
637.1131 +
637.1132 +static inline void
637.1133 +float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1134 +{
637.1135 +	ia64_ldf_fill(6, init);
637.1136 +	ia64_stop();
637.1137 +	ia64_stf8(final, 6);
637.1138 +}
637.1139 +
637.1140 +static inline void
637.1141 +float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1142 +{
637.1143 +	ia64_ldf_fill(6, init);
637.1144 +	ia64_stop();
637.1145 +	ia64_stfs(final, 6);
637.1146 +}
637.1147 +
637.1148 +static inline void
637.1149 +float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
637.1150 +{
637.1151 +	ia64_ldf_fill(6, init);
637.1152 +	ia64_stop();
637.1153 +	ia64_stfd(final, 6);
637.1154 +}
637.1155 +
637.1156 +static int
637.1157 +emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
637.1158 +{
637.1159 +	struct ia64_fpreg fpr_init[2];
637.1160 +	struct ia64_fpreg fpr_final[2];
637.1161 +	unsigned long len = float_fsz[ld.x6_sz];
637.1162 +
637.1163 +	/*
637.1164 +	 * fr0 & fr1 don't need to be checked because Illegal Instruction faults have
637.1165 +	 * higher priority than unaligned faults.
637.1166 +	 *
637.1167 +	 * r0 cannot be found as the base as it would never generate an unaligned
637.1168 +	 * reference.
637.1169 +	 */
637.1170 +
637.1171 +	/*
637.1172 +	 * make sure we get clean buffers
637.1173 +	 */
637.1174 +	memset(&fpr_init, 0, sizeof(fpr_init));
637.1175 +	memset(&fpr_final, 0, sizeof(fpr_final));
637.1176 +
637.1177 +	/*
637.1178 +	 * ldfpX.a: we don't try to emulate anything but we must
637.1179 +	 * invalidate the ALAT entry and execute updates, if any.
637.1180 +	 */
637.1181 +	if (ld.x6_op != 0x2) {
637.1182 +		/*
637.1183 +		 * This assumes little-endian byte-order.  Note that there is no "ldfpe"
637.1184 +		 * instruction:
637.1185 +		 */
637.1186 +		if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
637.1187 +		    || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
637.1188 +			return -1;
637.1189 +
637.1190 +		DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
637.1191 +		DDUMP("frp_init =", &fpr_init, 2*len);
637.1192 +		/*
637.1193 +		 * XXX fixme
637.1194 +		 * Could optimize inlines by using ldfpX & 2 spills
637.1195 +		 */
637.1196 +		switch( ld.x6_sz ) {
637.1197 +			case 0:
637.1198 +				mem2float_extended(&fpr_init[0], &fpr_final[0]);
637.1199 +				mem2float_extended(&fpr_init[1], &fpr_final[1]);
637.1200 +				break;
637.1201 +			case 1:
637.1202 +				mem2float_integer(&fpr_init[0], &fpr_final[0]);
637.1203 +				mem2float_integer(&fpr_init[1], &fpr_final[1]);
637.1204 +				break;
637.1205 +			case 2:
637.1206 +				mem2float_single(&fpr_init[0], &fpr_final[0]);
637.1207 +				mem2float_single(&fpr_init[1], &fpr_final[1]);
637.1208 +				break;
637.1209 +			case 3:
637.1210 +				mem2float_double(&fpr_init[0], &fpr_final[0]);
637.1211 +				mem2float_double(&fpr_init[1], &fpr_final[1]);
637.1212 +				break;
637.1213 +		}
637.1214 +		DDUMP("fpr_final =", &fpr_final, 2*len);
637.1215 +		/*
637.1216 +		 * XXX fixme
637.1217 +		 *
637.1218 +		 * A possible optimization would be to drop fpr_final and directly
637.1219 +		 * use the storage from the saved context i.e., the actual final
637.1220 +		 * destination (pt_regs, switch_stack or thread structure).
637.1221 +		 */
637.1222 +		setfpreg(ld.r1, &fpr_final[0], regs);
637.1223 +		setfpreg(ld.imm, &fpr_final[1], regs);
637.1224 +	}
637.1225 +
637.1226 +	/*
637.1227 +	 * Check for updates: only immediate updates are available for this
637.1228 +	 * instruction.
637.1229 +	 */
637.1230 +	if (ld.m) {
637.1231 +		/*
637.1232 +		 * the immediate is implicit given the ldsz of the operation:
637.1233 +		 * single: 8 (2x4) and for  all others it's 16 (2x8)
637.1234 +		 */
637.1235 +		ifa += len<<1;
637.1236 +
637.1237 +		/*
637.1238 +		 * IMPORTANT:
637.1239 +		 * the fact that we force the NaT of r3 to zero is ONLY valid
637.1240 +		 * as long as we don't come here with a ldfpX.s.
637.1241 +		 * For this reason we keep this sanity check
637.1242 +		 */
637.1243 +		if (ld.x6_op == 1 || ld.x6_op == 3)
637.1244 +			printk(KERN_ERR "%s: register update on speculative load pair, error\n",
637.1245 +			       __FUNCTION__);
637.1246 +
637.1247 +		setreg(ld.r3, ifa, 0, regs);
637.1248 +	}
637.1249 +
637.1250 +	/*
637.1251 +	 * Invalidate ALAT entries, if any, for both registers.
637.1252 +	 */
637.1253 +	if (ld.x6_op == 0x2) {
637.1254 +		invala_fr(ld.r1);
637.1255 +		invala_fr(ld.imm);
637.1256 +	}
637.1257 +	return 0;
637.1258 +}
637.1259 +
637.1260 +
637.1261 +static int
637.1262 +emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
637.1263 +{
637.1264 +	struct ia64_fpreg fpr_init;
637.1265 +	struct ia64_fpreg fpr_final;
637.1266 +	unsigned long len = float_fsz[ld.x6_sz];
637.1267 +
637.1268 +	/*
637.1269 +	 * fr0 & fr1 don't need to be checked because Illegal Instruction
637.1270 +	 * faults have higher priority than unaligned faults.
637.1271 +	 *
637.1272 +	 * r0 cannot be found as the base as it would never generate an
637.1273 +	 * unaligned reference.
637.1274 +	 */
637.1275 +
637.1276 +	/*
637.1277 +	 * make sure we get clean buffers
637.1278 +	 */
637.1279 +	memset(&fpr_init,0, sizeof(fpr_init));
637.1280 +	memset(&fpr_final,0, sizeof(fpr_final));
637.1281 +
637.1282 +	/*
637.1283 +	 * ldfX.a we don't try to emulate anything but we must
637.1284 +	 * invalidate the ALAT entry.
637.1285 +	 * See comments in ldX for descriptions on how the various loads are handled.
637.1286 +	 */
637.1287 +	if (ld.x6_op != 0x2) {
637.1288 +		if (copy_from_user(&fpr_init, (void __user *) ifa, len))
637.1289 +			return -1;
637.1290 +
637.1291 +		DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
637.1292 +		DDUMP("fpr_init =", &fpr_init, len);
637.1293 +		/*
637.1294 +		 * we only do something for x6_op={0,8,9}
637.1295 +		 */
637.1296 +		switch( ld.x6_sz ) {
637.1297 +			case 0:
637.1298 +				mem2float_extended(&fpr_init, &fpr_final);
637.1299 +				break;
637.1300 +			case 1:
637.1301 +				mem2float_integer(&fpr_init, &fpr_final);
637.1302 +				break;
637.1303 +			case 2:
637.1304 +				mem2float_single(&fpr_init, &fpr_final);
637.1305 +				break;
637.1306 +			case 3:
637.1307 +				mem2float_double(&fpr_init, &fpr_final);
637.1308 +				break;
637.1309 +		}
637.1310 +		DDUMP("fpr_final =", &fpr_final, len);
637.1311 +		/*
637.1312 +		 * XXX fixme
637.1313 +		 *
637.1314 +		 * A possible optimization would be to drop fpr_final and directly
637.1315 +		 * use the storage from the saved context i.e., the actual final
637.1316 +		 * destination (pt_regs, switch_stack or thread structure).
637.1317 +		 */
637.1318 +		setfpreg(ld.r1, &fpr_final, regs);
637.1319 +	}
637.1320 +
637.1321 +	/*
637.1322 +	 * check for updates on any loads
637.1323 +	 */
637.1324 +	if (ld.op == 0x7 || ld.m)
637.1325 +		emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
637.1326 +
637.1327 +	/*
637.1328 +	 * invalidate ALAT entry in case of advanced floating point loads
637.1329 +	 */
637.1330 +	if (ld.x6_op == 0x2)
637.1331 +		invala_fr(ld.r1);
637.1332 +
637.1333 +	return 0;
637.1334 +}
637.1335 +
637.1336 +
637.1337 +static int
637.1338 +emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
637.1339 +{
637.1340 +	struct ia64_fpreg fpr_init;
637.1341 +	struct ia64_fpreg fpr_final;
637.1342 +	unsigned long len = float_fsz[ld.x6_sz];
637.1343 +
637.1344 +	/*
637.1345 +	 * make sure we get clean buffers
637.1346 +	 */
637.1347 +	memset(&fpr_init,0, sizeof(fpr_init));
637.1348 +	memset(&fpr_final,0, sizeof(fpr_final));
637.1349 +
637.1350 +	/*
637.1351 +	 * if we get to this handler, Nat bits on both r3 and r2 have already
637.1352 +	 * been checked. so we don't need to do it
637.1353 +	 *
637.1354 +	 * extract the value to be stored
637.1355 +	 */
637.1356 +	getfpreg(ld.imm, &fpr_init, regs);
637.1357 +	/*
637.1358 +	 * during this step, we extract the spilled registers from the saved
637.1359 +	 * context i.e., we refill. Then we store (no spill) to temporary
637.1360 +	 * aligned location
637.1361 +	 */
637.1362 +	switch( ld.x6_sz ) {
637.1363 +		case 0:
637.1364 +			float2mem_extended(&fpr_init, &fpr_final);
637.1365 +			break;
637.1366 +		case 1:
637.1367 +			float2mem_integer(&fpr_init, &fpr_final);
637.1368 +			break;
637.1369 +		case 2:
637.1370 +			float2mem_single(&fpr_init, &fpr_final);
637.1371 +			break;
637.1372 +		case 3:
637.1373 +			float2mem_double(&fpr_init, &fpr_final);
637.1374 +			break;
637.1375 +	}
637.1376 +	DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
637.1377 +	DDUMP("fpr_init =", &fpr_init, len);
637.1378 +	DDUMP("fpr_final =", &fpr_final, len);
637.1379 +
637.1380 +	if (copy_to_user((void __user *) ifa, &fpr_final, len))
637.1381 +		return -1;
637.1382 +
637.1383 +	/*
637.1384 +	 * stfX [r3]=r2,imm(9)
637.1385 +	 *
637.1386 +	 * NOTE:
637.1387 +	 * ld.r3 can never be r0, because r0 would not generate an
637.1388 +	 * unaligned access.
637.1389 +	 */
637.1390 +	if (ld.op == 0x7) {
637.1391 +		unsigned long imm;
637.1392 +
637.1393 +		/*
637.1394 +		 * form imm9: [12:6] contain first 7bits
637.1395 +		 */
637.1396 +		imm = ld.x << 7 | ld.r1;
637.1397 +		/*
637.1398 +		 * sign extend (8bits) if m set
637.1399 +		 */
637.1400 +		if (ld.m)
637.1401 +			imm |= SIGN_EXT9;
637.1402 +		/*
637.1403 +		 * ifa == r3 (NaT is necessarily cleared)
637.1404 +		 */
637.1405 +		ifa += imm;
637.1406 +
637.1407 +		DPRINT("imm=%lx r3=%lx\n", imm, ifa);
637.1408 +
637.1409 +		setreg(ld.r3, ifa, 0, regs);
637.1410 +	}
637.1411 +	/*
637.1412 +	 * we don't have alat_invalidate_multiple() so we need
637.1413 +	 * to do the complete flush :-<<
637.1414 +	 */
637.1415 +	ia64_invala();
637.1416 +
637.1417 +	return 0;
637.1418 +}
637.1419 +
637.1420 +/*
637.1421 + * Make sure we log the unaligned access, so that user/sysadmin can notice it and
637.1422 + * eventually fix the program.  However, we don't want to do that for every access so we
637.1423 + * pace it with jiffies.  This isn't really MP-safe, but it doesn't really have to be
637.1424 + * either...
637.1425 + */
637.1426 +static int
637.1427 +within_logging_rate_limit (void)
637.1428 +{
637.1429 +	static unsigned long count, last_time;
637.1430 +
637.1431 +	if (jiffies - last_time > 5*HZ)
637.1432 +		count = 0;
637.1433 +	if (++count < 5) {
637.1434 +		last_time = jiffies;
637.1435 +		return 1;
637.1436 +	}
637.1437 +	return 0;
637.1438 +
637.1439 +}
637.1440 +
637.1441 +void
637.1442 +ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
637.1443 +{
637.1444 +#ifdef XEN
637.1445 +printk("ia64_handle_unaligned: called, not working yet\n");
637.1446 +#else
637.1447 +	struct ia64_psr *ipsr = ia64_psr(regs);
637.1448 +	mm_segment_t old_fs = get_fs();
637.1449 +	unsigned long bundle[2];
637.1450 +	unsigned long opcode;
637.1451 +	struct siginfo si;
637.1452 +	const struct exception_table_entry *eh = NULL;
637.1453 +	union {
637.1454 +		unsigned long l;
637.1455 +		load_store_t insn;
637.1456 +	} u;
637.1457 +	int ret = -1;
637.1458 +
637.1459 +	if (ia64_psr(regs)->be) {
637.1460 +		/* we don't support big-endian accesses */
637.1461 +		die_if_kernel("big-endian unaligned accesses are not supported", regs, 0);
637.1462 +		goto force_sigbus;
637.1463 +	}
637.1464 +
637.1465 +	/*
637.1466 +	 * Treat kernel accesses for which there is an exception handler entry the same as
637.1467 +	 * user-level unaligned accesses.  Otherwise, a clever program could trick this
637.1468 +	 * handler into reading an arbitrary kernel addresses...
637.1469 +	 */
637.1470 +	if (!user_mode(regs))
637.1471 +		eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
637.1472 +	if (user_mode(regs) || eh) {
637.1473 +		if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
637.1474 +			goto force_sigbus;
637.1475 +
637.1476 +		if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
637.1477 +		    && within_logging_rate_limit())
637.1478 +		{
637.1479 +			char buf[200];	/* comm[] is at most 16 bytes... */
637.1480 +			size_t len;
637.1481 +
637.1482 +			len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
637.1483 +				      "ip=0x%016lx\n\r", current->comm, current->pid,
637.1484 +				      ifa, regs->cr_iip + ipsr->ri);
637.1485 +			/*
637.1486 +			 * Don't call tty_write_message() if we're in the kernel; we might
637.1487 +			 * be holding locks...
637.1488 +			 */
637.1489 +			if (user_mode(regs))
637.1490 +				tty_write_message(current->signal->tty, buf);
637.1491 +			buf[len-1] = '\0';	/* drop '\r' */
637.1492 +			printk(KERN_WARNING "%s", buf);	/* watch for command names containing %s */
637.1493 +		}
637.1494 +	} else {
637.1495 +		if (within_logging_rate_limit())
637.1496 +			printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
637.1497 +			       ifa, regs->cr_iip + ipsr->ri);
637.1498 +		set_fs(KERNEL_DS);
637.1499 +	}
637.1500 +
637.1501 +	DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
637.1502 +	       regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
637.1503 +
637.1504 +	if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
637.1505 +		goto failure;
637.1506 +
637.1507 +	/*
637.1508 +	 * extract the instruction from the bundle given the slot number
637.1509 +	 */
637.1510 +	switch (ipsr->ri) {
637.1511 +	      case 0: u.l = (bundle[0] >>  5); break;
637.1512 +	      case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
637.1513 +	      case 2: u.l = (bundle[1] >> 23); break;
637.1514 +	}
637.1515 +	opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
637.1516 +
637.1517 +	DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "
637.1518 +	       "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,
637.1519 +	       u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op);
637.1520 +
637.1521 +	/*
637.1522 +	 * IMPORTANT:
637.1523 +	 * Notice that the switch statement DOES not cover all possible instructions
637.1524 +	 * that DO generate unaligned references. This is made on purpose because for some
637.1525 +	 * instructions it DOES NOT make sense to try and emulate the access. Sometimes it
637.1526 +	 * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e.,
637.1527 +	 * the program will get a signal and die:
637.1528 +	 *
637.1529 +	 *	load/store:
637.1530 +	 *		- ldX.spill
637.1531 +	 *		- stX.spill
637.1532 +	 *	Reason: RNATs are based on addresses
637.1533 +	 *
637.1534 +	 *	synchronization:
637.1535 +	 *		- cmpxchg
637.1536 +	 *		- fetchadd
637.1537 +	 *		- xchg
637.1538 +	 *	Reason: ATOMIC operations cannot be emulated properly using multiple
637.1539 +	 *	        instructions.
637.1540 +	 *
637.1541 +	 *	speculative loads:
637.1542 +	 *		- ldX.sZ
637.1543 +	 *	Reason: side effects, code must be ready to deal with failure so simpler
637.1544 +	 *		to let the load fail.
637.1545 +	 * ---------------------------------------------------------------------------------
637.1546 +	 * XXX fixme
637.1547 +	 *
637.1548 +	 * I would like to get rid of this switch case and do something
637.1549 +	 * more elegant.
637.1550 +	 */
637.1551 +	switch (opcode) {
637.1552 +	      case LDS_OP:
637.1553 +	      case LDSA_OP:
637.1554 +	      case LDS_IMM_OP:
637.1555 +	      case LDSA_IMM_OP:
637.1556 +	      case LDFS_OP:
637.1557 +	      case LDFSA_OP:
637.1558 +	      case LDFS_IMM_OP:
637.1559 +		/*
637.1560 +		 * The instruction will be retried with deferred exceptions turned on, and
637.1561 +		 * we should get Nat bit installed
637.1562 +		 *
637.1563 +		 * IMPORTANT: When PSR_ED is set, the register & immediate update forms
637.1564 +		 * are actually executed even though the operation failed. So we don't
637.1565 +		 * need to take care of this.
637.1566 +		 */
637.1567 +		DPRINT("forcing PSR_ED\n");
637.1568 +		regs->cr_ipsr |= IA64_PSR_ED;
637.1569 +		goto done;
637.1570 +
637.1571 +	      case LD_OP:
637.1572 +	      case LDA_OP:
637.1573 +	      case LDBIAS_OP:
637.1574 +	      case LDACQ_OP:
637.1575 +	      case LDCCLR_OP:
637.1576 +	      case LDCNC_OP:
637.1577 +	      case LDCCLRACQ_OP:
637.1578 +	      case LD_IMM_OP:
637.1579 +	      case LDA_IMM_OP:
637.1580 +	      case LDBIAS_IMM_OP:
637.1581 +	      case LDACQ_IMM_OP:
637.1582 +	      case LDCCLR_IMM_OP:
637.1583 +	      case LDCNC_IMM_OP:
637.1584 +	      case LDCCLRACQ_IMM_OP:
637.1585 +		ret = emulate_load_int(ifa, u.insn, regs);
637.1586 +		break;
637.1587 +
637.1588 +	      case ST_OP:
637.1589 +	      case STREL_OP:
637.1590 +	      case ST_IMM_OP:
637.1591 +	      case STREL_IMM_OP:
637.1592 +		ret = emulate_store_int(ifa, u.insn, regs);
637.1593 +		break;
637.1594 +
637.1595 +	      case LDF_OP:
637.1596 +	      case LDFA_OP:
637.1597 +	      case LDFCCLR_OP:
637.1598 +	      case LDFCNC_OP:
637.1599 +	      case LDF_IMM_OP:
637.1600 +	      case LDFA_IMM_OP:
637.1601 +	      case LDFCCLR_IMM_OP:
637.1602 +	      case LDFCNC_IMM_OP:
637.1603 +		if (u.insn.x)
637.1604 +			ret = emulate_load_floatpair(ifa, u.insn, regs);
637.1605 +		else
637.1606 +			ret = emulate_load_float(ifa, u.insn, regs);
637.1607 +		break;
637.1608 +
637.1609 +	      case STF_OP:
637.1610 +	      case STF_IMM_OP:
637.1611 +		ret = emulate_store_float(ifa, u.insn, regs);
637.1612 +		break;
637.1613 +
637.1614 +	      default:
637.1615 +		goto failure;
637.1616 +	}
637.1617 +	DPRINT("ret=%d\n", ret);
637.1618 +	if (ret)
637.1619 +		goto failure;
637.1620 +
637.1621 +	if (ipsr->ri == 2)
637.1622 +		/*
637.1623 +		 * given today's architecture this case is not likely to happen because a
637.1624 +		 * memory access instruction (M) can never be in the last slot of a
637.1625 +		 * bundle. But let's keep it for now.
637.1626 +		 */
637.1627 +		regs->cr_iip += 16;
637.1628 +	ipsr->ri = (ipsr->ri + 1) & 0x3;
637.1629 +
637.1630 +	DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
637.1631 +  done:
637.1632 +	set_fs(old_fs);		/* restore original address limit */
637.1633 +	return;
637.1634 +
637.1635 +  failure:
637.1636 +	/* something went wrong... */
637.1637 +	if (!user_mode(regs)) {
637.1638 +		if (eh) {
637.1639 +			ia64_handle_exception(regs, eh);
637.1640 +			goto done;
637.1641 +		}
637.1642 +		die_if_kernel("error during unaligned kernel access\n", regs, ret);
637.1643 +		/* NOT_REACHED */
637.1644 +	}
637.1645 +  force_sigbus:
637.1646 +	si.si_signo = SIGBUS;
637.1647 +	si.si_errno = 0;
637.1648 +	si.si_code = BUS_ADRALN;
637.1649 +	si.si_addr = (void __user *) ifa;
637.1650 +	si.si_flags = 0;
637.1651 +	si.si_isr = 0;
637.1652 +	si.si_imm = 0;
637.1653 +	force_sig_info(SIGBUS, &si, current);
637.1654 +	goto done;
637.1655 +#endif
637.1656 +}
   638.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   638.2 +++ b/xen/arch/ia64/linux/cmdline.c	Fri Aug 26 09:05:43 2005 +0000
   638.3 @@ -0,0 +1,120 @@
   638.4 +/*
   638.5 + * linux/lib/cmdline.c
   638.6 + * Helper functions generally used for parsing kernel command line
   638.7 + * and module options.
   638.8 + *
   638.9 + * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
  638.10 + *
  638.11 + * This source code is licensed under the GNU General Public License,
  638.12 + * Version 2.  See the file COPYING for more details.
  638.13 + *
  638.14 + * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
  638.15 + *
  638.16 + */
  638.17 +
  638.18 +#include <linux/module.h>
  638.19 +#include <linux/kernel.h>
  638.20 +#include <linux/string.h>
  638.21 +
  638.22 +
  638.23 +/**
  638.24 + *	get_option - Parse integer from an option string
  638.25 + *	@str: option string
  638.26 + *	@pint: (output) integer value parsed from @str
  638.27 + *
  638.28 + *	Read an int from an option string; if available accept a subsequent
  638.29 + *	comma as well.
  638.30 + *
  638.31 + *	Return values:
  638.32 + *	0 : no int in string
  638.33 + *	1 : int found, no subsequent comma
  638.34 + *	2 : int found including a subsequent comma
  638.35 + */
  638.36 +
  638.37 +int get_option (char **str, int *pint)
  638.38 +{
  638.39 +	char *cur = *str;
  638.40 +
  638.41 +	if (!cur || !(*cur))
  638.42 +		return 0;
  638.43 +	*pint = simple_strtol (cur, str, 0);
  638.44 +	if (cur == *str)
  638.45 +		return 0;
  638.46 +	if (**str == ',') {
  638.47 +		(*str)++;
  638.48 +		return 2;
  638.49 +	}
  638.50 +
  638.51 +	return 1;
  638.52 +}
  638.53 +
  638.54 +/**
  638.55 + *	get_options - Parse a string into a list of integers
  638.56 + *	@str: String to be parsed
  638.57 + *	@nints: size of integer array
  638.58 + *	@ints: integer array
  638.59 + *
  638.60 + *	This function parses a string containing a comma-separated
  638.61 + *	list of integers.  The parse halts when the array is
  638.62 + *	full, or when no more numbers can be retrieved from the
  638.63 + *	string.
  638.64 + *
  638.65 + *	Return value is the character in the string which caused
  638.66 + *	the parse to end (typically a null terminator, if @str is
  638.67 + *	completely parseable).
  638.68 + */
  638.69 + 
  638.70 +char *get_options(const char *str, int nints, int *ints)
  638.71 +{
  638.72 +	int res, i = 1;
  638.73 +
  638.74 +	while (i < nints) {
  638.75 +		res = get_option ((char **)&str, ints + i);
  638.76 +		if (res == 0)
  638.77 +			break;
  638.78 +		i++;
  638.79 +		if (res == 1)
  638.80 +			break;
  638.81 +	}
  638.82 +	ints[0] = i - 1;
  638.83 +	return (char *)str;
  638.84 +}
  638.85 +
  638.86 +/**
  638.87 + *	memparse - parse a string with mem suffixes into a number
  638.88 + *	@ptr: Where parse begins
  638.89 + *	@retptr: (output) Pointer to next char after parse completes
  638.90 + *
  638.91 + *	Parses a string into a number.  The number stored at @ptr is
  638.92 + *	potentially suffixed with %K (for kilobytes, or 1024 bytes),
  638.93 + *	%M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
  638.94 + *	1073741824).  If the number is suffixed with K, M, or G, then
  638.95 + *	the return value is the number multiplied by one kilobyte, one
  638.96 + *	megabyte, or one gigabyte, respectively.
  638.97 + */
  638.98 +
  638.99 +unsigned long long memparse (char *ptr, char **retptr)
 638.100 +{
 638.101 +	unsigned long long ret = simple_strtoull (ptr, retptr, 0);
 638.102 +
 638.103 +	switch (**retptr) {
 638.104 +	case 'G':
 638.105 +	case 'g':
 638.106 +		ret <<= 10;
 638.107 +	case 'M':
 638.108 +	case 'm':
 638.109 +		ret <<= 10;
 638.110 +	case 'K':
 638.111 +	case 'k':
 638.112 +		ret <<= 10;
 638.113 +		(*retptr)++;
 638.114 +	default:
 638.115 +		break;
 638.116 +	}
 638.117 +	return ret;
 638.118 +}
 638.119 +
 638.120 +
 638.121 +EXPORT_SYMBOL(memparse);
 638.122 +EXPORT_SYMBOL(get_option);
 638.123 +EXPORT_SYMBOL(get_options);
   639.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   639.2 +++ b/xen/arch/ia64/linux/efi_stub.S	Fri Aug 26 09:05:43 2005 +0000
   639.3 @@ -0,0 +1,86 @@
   639.4 +/*
   639.5 + * EFI call stub.
   639.6 + *
   639.7 + * Copyright (C) 1999-2001 Hewlett-Packard Co
   639.8 + *	David Mosberger <davidm@hpl.hp.com>
   639.9 + *
  639.10 + * This stub allows us to make EFI calls in physical mode with interrupts
  639.11 + * turned off.  We need this because we can't call SetVirtualMap() until
  639.12 + * the kernel has booted far enough to allow allocation of struct vma_struct
  639.13 + * entries (which we would need to map stuff with memory attributes other
  639.14 + * than uncached or writeback...).  Since the GetTime() service gets called
  639.15 + * earlier than that, we need to be able to make physical mode EFI calls from
  639.16 + * the kernel.
  639.17 + */
  639.18 +
  639.19 +/*
  639.20 + * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
  639.21 + * Abstraction Layer Specification", revision 2.6e).  Note that
  639.22 + * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
  639.23 + * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
  639.24 + * (the br.ia instruction fails unless psr.dfl and psr.dfh are
  639.25 + * cleared).  Fortunately, SAL promises not to touch the floating
  639.26 + * point regs, so at least we don't have to save f2-f127.
  639.27 + */
  639.28 +#define PSR_BITS_TO_CLEAR						\
  639.29 +	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT |		\
  639.30 +	 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |	\
  639.31 +	 IA64_PSR_DFL | IA64_PSR_DFH)
  639.32 +
  639.33 +#define PSR_BITS_TO_SET							\
  639.34 +	(IA64_PSR_BN)
  639.35 +
  639.36 +#include <asm/processor.h>
  639.37 +#include <asm/asmmacro.h>
  639.38 +
  639.39 +/*
  639.40 + * Inputs:
  639.41 + *	in0 = address of function descriptor of EFI routine to call
  639.42 + *	in1..in7 = arguments to routine
  639.43 + *
  639.44 + * Outputs:
  639.45 + *	r8 = EFI_STATUS returned by called function
  639.46 + */
  639.47 +
  639.48 +GLOBAL_ENTRY(efi_call_phys)
  639.49 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
  639.50 +	alloc loc1=ar.pfs,8,7,7,0
  639.51 +	ld8 r2=[in0],8			// load EFI function's entry point
  639.52 +	mov loc0=rp
  639.53 +	.body
  639.54 +	;;
  639.55 +	mov loc2=gp			// save global pointer
  639.56 +	mov loc4=ar.rsc			// save RSE configuration
  639.57 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
  639.58 +	;;
  639.59 +	ld8 gp=[in0]			// load EFI function's global pointer
  639.60 +	movl r16=PSR_BITS_TO_CLEAR
  639.61 +	mov loc3=psr			// save processor status word
  639.62 +	movl r17=PSR_BITS_TO_SET
  639.63 +	;;
  639.64 +	or loc3=loc3,r17
  639.65 +	mov b6=r2
  639.66 +	;;
  639.67 +	andcm r16=loc3,r16		// get psr with IT, DT, and RT bits cleared
  639.68 +	br.call.sptk.many rp=ia64_switch_mode_phys
  639.69 +.ret0:	mov out4=in5
  639.70 +	mov out0=in1
  639.71 +	mov out1=in2
  639.72 +	mov out2=in3
  639.73 +	mov out3=in4
  639.74