]> xenbits.xensource.com Git - xen.git/commitdiff
bitkeeper revision 1.1159.1.272 (417d33eaZEb_tnkLdhSsCNFpEXrEiw)
authorcl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Mon, 25 Oct 2004 17:12:10 +0000 (17:12 +0000)
committercl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Mon, 25 Oct 2004 17:12:10 +0000 (17:12 +0000)
Update to Linux 2.6.9.

274 files changed:
.rootkeys
linux-2.6.8.1-patches/agpgart.patch [deleted file]
linux-2.6.8.1-patches/drm.patch [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/Kconfig [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/Kconfig.drivers [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/boot/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/configs/xen0_defconfig [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/configs/xenU_defconfig [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/Kconfig [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/common.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/entry.S [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/head.S [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ioport.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/irq.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ldt.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/pci-dma.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/process.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/setup.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/signal.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/sysenter.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/time.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/traps.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.S [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.lds [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/fault.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/highmem.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/init.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/ioremap.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pgtable.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/direct.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/irq.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/ctrl_if.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/empty.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/evtchn.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/fixup.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/gnttab.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/process.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/reboot.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c [deleted file]
linux-2.6.8.1-xen-sparse/arch/xen/kernel/xen_proc.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/char/mem.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/char/tty_io.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkback/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkback/blkback.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkback/common.h [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkback/control.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkback/interface.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkback/vbd.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Kconfig [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/blkfront.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/block.h [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/vbd.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/console/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/console/console.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/evtchn.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netback/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netback/common.h [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netback/control.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netback/interface.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Kconfig [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/Makefile [deleted file]
linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/privcmd.c [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-generic/pgtable.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/desc.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/fixmap.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/highmem.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/io.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mmu_context.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/msr.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/page.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/param.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pci.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgalloc.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/processor.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/ptrace.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/segment.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/setup.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/system.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/timer.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/tlbflush.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/vga.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/xor.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/ctrl_if.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/evtchn.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/gnttab.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/multicall.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/proc_cmd.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/queues.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/suspend.h [deleted file]
linux-2.6.8.1-xen-sparse/include/asm-xen/xen_proc.h [deleted file]
linux-2.6.8.1-xen-sparse/include/linux/bio.h [deleted file]
linux-2.6.8.1-xen-sparse/include/linux/page-flags.h [deleted file]
linux-2.6.8.1-xen-sparse/include/linux/skbuff.h [deleted file]
linux-2.6.8.1-xen-sparse/mkbuildtree [deleted file]
linux-2.6.8.1-xen-sparse/mm/memory.c [deleted file]
linux-2.6.8.1-xen-sparse/mm/page_alloc.c [deleted file]
linux-2.6.8.1-xen-sparse/net/core/skbuff.c [deleted file]
linux-2.6.8.1-xen-sparse/net/ipv4/raw.c [deleted file]
linux-2.6.9-patches/agpgart.patch [new file with mode: 0644]
linux-2.6.9-patches/drm.patch [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/Kconfig [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/Kconfig.drivers [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/boot/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/configs/xenU_defconfig [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/head.S [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ioport.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/irq.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/pci-dma.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/process.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/signal.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/sysenter.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/time.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.S [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.lds [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/fault.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/highmem.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/pageattr.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/pci/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/pci/direct.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/i386/pci/irq.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/ctrl_if.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/empty.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/evtchn.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/fixup.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/gnttab.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/process.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/reboot.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/skbuff.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/arch/xen/kernel/xen_proc.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/char/mem.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/char/tty_io.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/balloon/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkback/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkback/blkback.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkback/common.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkback/control.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkback/interface.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkback/vbd.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkfront/Kconfig [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkfront/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkfront/block.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/blkfront/vbd.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/console/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/console/console.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/evtchn/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netback/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netback/common.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netback/control.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netback/interface.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netback/netback.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netfront/Kconfig [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netfront/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/privcmd/Makefile [new file with mode: 0644]
linux-2.6.9-xen-sparse/drivers/xen/privcmd/privcmd.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-generic/pgtable.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/desc.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/fixmap.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/highmem.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mmu_context.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/page.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/param.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pci.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgalloc.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/processor.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/ptrace.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/segment.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/setup.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/timer.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/tlbflush.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/vga.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/xor.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/ctrl_if.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/evtchn.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/gnttab.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/multicall.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/proc_cmd.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/queues.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/suspend.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/asm-xen/xen_proc.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/linux/bio.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/linux/page-flags.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/include/linux/skbuff.h [new file with mode: 0644]
linux-2.6.9-xen-sparse/mkbuildtree [new file with mode: 0755]
linux-2.6.9-xen-sparse/mm/memory.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/mm/page_alloc.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/net/core/skbuff.c [new file with mode: 0644]
linux-2.6.9-xen-sparse/net/ipv4/raw.c [new file with mode: 0644]

index 3e0e1982a34bebea4168797b8eff93a01238ca07..15d72abcfaf97f0bf7541dde663f588d2e46a665 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.27-xen-sparse/mm/swapfile.c
 41180721bNns9Na7w1nJ0ZVt8bhUNA linux-2.4.27-xen-sparse/mm/vmalloc.c
 41505c57WAd5l1rlfCLNSCpx9J13vA linux-2.4.27-xen-sparse/net/core/skbuff.c
-413aa1d0oNP8HXLvfPuMe6cSroUfSA linux-2.6.8.1-patches/agpgart.patch
-413aa1d0ewvSv-ohnNnQQNGsbPTTNA linux-2.6.8.1-patches/drm.patch
-40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.8.1-xen-sparse/arch/xen/Kconfig
-40f56237utH41NPukqHksuNf29IC9A linux-2.6.8.1-xen-sparse/arch/xen/Kconfig.drivers
-40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.8.1-xen-sparse/arch/xen/Makefile
-40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.8.1-xen-sparse/arch/xen/boot/Makefile
-40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.8.1-xen-sparse/arch/xen/configs/xen0_defconfig
-40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.8.1-xen-sparse/arch/xen/configs/xenU_defconfig
-40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.8.1-xen-sparse/arch/xen/i386/Kconfig
-40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.8.1-xen-sparse/arch/xen/i386/Makefile
-40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/Makefile
-40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
-40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/common.c
-40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/entry.S
-40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/head.S
-40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
-40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ioport.c
-40f562382aC3_Gt4RG-4ZsfvDRUg3Q linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/irq.c
-40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ldt.c
-4107adf1cNtsuOxOB4T6paAoY2R2PA linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/pci-dma.c
-40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/process.c
-40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/setup.c
-40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/signal.c
-40f56238UL9uv78ODDzMwLL9yryeFw linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/sysenter.c
-40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/time.c
-40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/Makefile
-40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
-40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/traps.c
-40f56238qASEI_IOhCKWNuwFKNZrKQ linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
-40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.S
-40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
-40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile
-40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/fault.c
-4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/highmem.c
-40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c
-40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/init.c
-41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/ioremap.c
-413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c
-40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pgtable.c
-4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/Makefile
-4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/direct.c
-4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/irq.c
-40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.8.1-xen-sparse/arch/xen/kernel/Makefile
-40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.8.1-xen-sparse/arch/xen/kernel/ctrl_if.c
-40f56239pYRq5yshPTkv3ujXKc8K6g linux-2.6.8.1-xen-sparse/arch/xen/kernel/empty.c
-40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.8.1-xen-sparse/arch/xen/kernel/evtchn.c
-4110f478aeQWllIN7J4kouAHiAqrPw linux-2.6.8.1-xen-sparse/arch/xen/kernel/fixup.c
-412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.8.1-xen-sparse/arch/xen/kernel/gnttab.c
-40f56239sFcjHiIRmnObRIDF-zaeKQ linux-2.6.8.1-xen-sparse/arch/xen/kernel/process.c
-40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.8.1-xen-sparse/arch/xen/kernel/reboot.c
-414c113396tK1HTVeUalm3u-1DF16g linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c
-3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.8.1-xen-sparse/arch/xen/kernel/xen_proc.c
-41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.8.1-xen-sparse/drivers/Makefile
-4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.8.1-xen-sparse/drivers/char/mem.c
-4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.8.1-xen-sparse/drivers/char/tty_io.c
-40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.8.1-xen-sparse/drivers/xen/Makefile
-41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile
-3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c
-410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.8.1-xen-sparse/drivers/xen/blkback/Makefile
-4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.8.1-xen-sparse/drivers/xen/blkback/blkback.c
-4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.8.1-xen-sparse/drivers/xen/blkback/common.h
-4087cf0dxlh29iw0w-9rxOCEGCjPcw linux-2.6.8.1-xen-sparse/drivers/xen/blkback/control.c
-4087cf0dbuoH20fMjNZjcgrRK-1msQ linux-2.6.8.1-xen-sparse/drivers/xen/blkback/interface.c
-4087cf0dk97tacDzxfByWV7JifUYqA linux-2.6.8.1-xen-sparse/drivers/xen/blkback/vbd.c
-40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Kconfig
-40f562395atl9x4suKGhPkjqLOXESg linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Makefile
-40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/blkfront.c
-40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/block.h
-40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/vbd.c
-40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.8.1-xen-sparse/drivers/xen/console/Makefile
-3e5a4e651TH-SXHoufurnWjgl5bfOA linux-2.6.8.1-xen-sparse/drivers/xen/console/console.c
-40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/Makefile
-40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/evtchn.c
-410a9817HEVJvred5Oy_uKH3HFJC5Q linux-2.6.8.1-xen-sparse/drivers/xen/netback/Makefile
-4097ba831lpGeLlPg-bfV8XarVVuoQ linux-2.6.8.1-xen-sparse/drivers/xen/netback/common.h
-4097ba83wvv8yi5P5xugCUBAdb6O-A linux-2.6.8.1-xen-sparse/drivers/xen/netback/control.c
-4097ba83byY5bTSugJGZ1exTxIcMKw linux-2.6.8.1-xen-sparse/drivers/xen/netback/interface.c
-4087cf0dGmSbFhFZyIZBJzvqxY-qBw linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c
-40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Kconfig
-40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Makefile
-405853f6nbeazrNyEWNHBuoSg2PiPA linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c
-4108f5c1ppFXVpQzCOAZ6xXYubsjKA linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/Makefile
-3e5a4e65IUfzzMu2kZFlGEB8-rpTaA linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/privcmd.c
-412f47e4RKD-R5IS5gEXvcT8L4v8gA linux-2.6.8.1-xen-sparse/include/asm-generic/pgtable.h
-40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/desc.h
-4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
-40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/fixmap.h
-4118b6a418gnL6AZsTdglC92YGqYTg linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/highmem.h
-40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/io.h
-40f5623am9BzluYFuV6EQfTd-so3dA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
-40f5623adZQ1IZGPxbDXONjyZGYuTA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
-40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
-40f5623aMQZoYuf4ml9v69N3gu8ing linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h
-40f5623a8LroVMnZ5YRzJJmIc-zHlw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h
-40f5623an3wOvFKmpIvqSxQfWzklVQ linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h
-40f5623ayR1vnzfF__htza35a8Ft-g linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h
-40f5623a4YdRdVzYWJzOOoqe8mnrXA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h
-40f5623aDLxmbOtUHvkWztKjAO4EjA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h
-41062ab7HMSSuaUv3_Z4agLpjSO88A linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h
-40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
-40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
-4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
-40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/msr.h
-40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/page.h
-40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/param.h
-41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pci.h
-40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
-412e01beTwiaC8sYY4XJP8PxLST5CA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
-40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
-40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable.h
-40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/processor.h
-412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/ptrace.h
-40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/segment.h
-40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/setup.h
-40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
-40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/system.h
-40f5623bSgGrvrGRpD71K-lIYqaGgg linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/timer.h
-40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
-41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/vga.h
-40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/xor.h
-40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.8.1-xen-sparse/include/asm-xen/ctrl_if.h
-40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.8.1-xen-sparse/include/asm-xen/evtchn.h
-412dfaeazclyNDM0cpnp60Yo4xulpQ linux-2.6.8.1-xen-sparse/include/asm-xen/gnttab.h
-40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h
-40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.8.1-xen-sparse/include/asm-xen/multicall.h
-3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.8.1-xen-sparse/include/asm-xen/proc_cmd.h
-4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.8.1-xen-sparse/include/asm-xen/queues.h
-3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.8.1-xen-sparse/include/asm-xen/suspend.h
-3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.8.1-xen-sparse/include/asm-xen/xen_proc.h
-4124d8c4aocX7A-jIbuGraWN84pxGQ linux-2.6.8.1-xen-sparse/include/linux/bio.h
-4124f66fp5QwbDHEfoUIa7pqO5Xhag linux-2.6.8.1-xen-sparse/include/linux/page-flags.h
-4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.8.1-xen-sparse/include/linux/skbuff.h
-40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.8.1-xen-sparse/mkbuildtree
-412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.8.1-xen-sparse/mm/memory.c
-410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.8.1-xen-sparse/mm/page_alloc.c
-41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.8.1-xen-sparse/net/core/skbuff.c
-4149ec79wMpIHdvbntxqVGLRZZjPxw linux-2.6.8.1-xen-sparse/net/ipv4/raw.c
+413aa1d0oNP8HXLvfPuMe6cSroUfSA linux-2.6.9-patches/agpgart.patch
+413aa1d0ewvSv-ohnNnQQNGsbPTTNA linux-2.6.9-patches/drm.patch
+40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.9-xen-sparse/arch/xen/Kconfig
+40f56237utH41NPukqHksuNf29IC9A linux-2.6.9-xen-sparse/arch/xen/Kconfig.drivers
+40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.9-xen-sparse/arch/xen/Makefile
+40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.9-xen-sparse/arch/xen/boot/Makefile
+40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig
+40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.9-xen-sparse/arch/xen/configs/xenU_defconfig
+40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig
+40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.9-xen-sparse/arch/xen/i386/Makefile
+40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.9-xen-sparse/arch/xen/i386/kernel/Makefile
+40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
+40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c
+40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
+40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.9-xen-sparse/arch/xen/i386/kernel/head.S
+40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
+40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ioport.c
+40f562382aC3_Gt4RG-4ZsfvDRUg3Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/irq.c
+40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c
+4107adf1cNtsuOxOB4T6paAoY2R2PA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/pci-dma.c
+40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.9-xen-sparse/arch/xen/i386/kernel/process.c
+40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c
+40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/signal.c
+40f56238UL9uv78ODDzMwLL9yryeFw linux-2.6.9-xen-sparse/arch/xen/i386/kernel/sysenter.c
+40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/time.c
+40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/Makefile
+40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
+40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c
+40f56238qASEI_IOhCKWNuwFKNZrKQ linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
+40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.S
+40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
+40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.9-xen-sparse/arch/xen/i386/mm/Makefile
+40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.9-xen-sparse/arch/xen/i386/mm/fault.c
+4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6.9-xen-sparse/arch/xen/i386/mm/highmem.c
+40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c
+40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c
+41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c
+413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.9-xen-sparse/arch/xen/i386/mm/pageattr.c
+40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c
+4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.9-xen-sparse/arch/xen/i386/pci/Makefile
+4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.9-xen-sparse/arch/xen/i386/pci/direct.c
+4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6.9-xen-sparse/arch/xen/i386/pci/irq.c
+40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.9-xen-sparse/arch/xen/kernel/Makefile
+40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.9-xen-sparse/arch/xen/kernel/ctrl_if.c
+40f56239pYRq5yshPTkv3ujXKc8K6g linux-2.6.9-xen-sparse/arch/xen/kernel/empty.c
+40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.9-xen-sparse/arch/xen/kernel/evtchn.c
+4110f478aeQWllIN7J4kouAHiAqrPw linux-2.6.9-xen-sparse/arch/xen/kernel/fixup.c
+412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.9-xen-sparse/arch/xen/kernel/gnttab.c
+40f56239sFcjHiIRmnObRIDF-zaeKQ linux-2.6.9-xen-sparse/arch/xen/kernel/process.c
+40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.9-xen-sparse/arch/xen/kernel/reboot.c
+414c113396tK1HTVeUalm3u-1DF16g linux-2.6.9-xen-sparse/arch/xen/kernel/skbuff.c
+3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.9-xen-sparse/arch/xen/kernel/xen_proc.c
+41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.9-xen-sparse/drivers/Makefile
+4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.9-xen-sparse/drivers/char/mem.c
+4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.9-xen-sparse/drivers/char/tty_io.c
+40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.9-xen-sparse/drivers/xen/Makefile
+41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.9-xen-sparse/drivers/xen/balloon/Makefile
+3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c
+410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.9-xen-sparse/drivers/xen/blkback/Makefile
+4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.9-xen-sparse/drivers/xen/blkback/blkback.c
+4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.9-xen-sparse/drivers/xen/blkback/common.h
+4087cf0dxlh29iw0w-9rxOCEGCjPcw linux-2.6.9-xen-sparse/drivers/xen/blkback/control.c
+4087cf0dbuoH20fMjNZjcgrRK-1msQ linux-2.6.9-xen-sparse/drivers/xen/blkback/interface.c
+4087cf0dk97tacDzxfByWV7JifUYqA linux-2.6.9-xen-sparse/drivers/xen/blkback/vbd.c
+40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.9-xen-sparse/drivers/xen/blkfront/Kconfig
+40f562395atl9x4suKGhPkjqLOXESg linux-2.6.9-xen-sparse/drivers/xen/blkfront/Makefile
+40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c
+40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.9-xen-sparse/drivers/xen/blkfront/block.h
+40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.9-xen-sparse/drivers/xen/blkfront/vbd.c
+40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.9-xen-sparse/drivers/xen/console/Makefile
+3e5a4e651TH-SXHoufurnWjgl5bfOA linux-2.6.9-xen-sparse/drivers/xen/console/console.c
+40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.9-xen-sparse/drivers/xen/evtchn/Makefile
+40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c
+410a9817HEVJvred5Oy_uKH3HFJC5Q linux-2.6.9-xen-sparse/drivers/xen/netback/Makefile
+4097ba831lpGeLlPg-bfV8XarVVuoQ linux-2.6.9-xen-sparse/drivers/xen/netback/common.h
+4097ba83wvv8yi5P5xugCUBAdb6O-A linux-2.6.9-xen-sparse/drivers/xen/netback/control.c
+4097ba83byY5bTSugJGZ1exTxIcMKw linux-2.6.9-xen-sparse/drivers/xen/netback/interface.c
+4087cf0dGmSbFhFZyIZBJzvqxY-qBw linux-2.6.9-xen-sparse/drivers/xen/netback/netback.c
+40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.9-xen-sparse/drivers/xen/netfront/Kconfig
+40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.9-xen-sparse/drivers/xen/netfront/Makefile
+405853f6nbeazrNyEWNHBuoSg2PiPA linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c
+4108f5c1ppFXVpQzCOAZ6xXYubsjKA linux-2.6.9-xen-sparse/drivers/xen/privcmd/Makefile
+3e5a4e65IUfzzMu2kZFlGEB8-rpTaA linux-2.6.9-xen-sparse/drivers/xen/privcmd/privcmd.c
+412f47e4RKD-R5IS5gEXvcT8L4v8gA linux-2.6.9-xen-sparse/include/asm-generic/pgtable.h
+40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/desc.h
+4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
+40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/fixmap.h
+4118b6a418gnL6AZsTdglC92YGqYTg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/highmem.h
+40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h
+40f5623am9BzluYFuV6EQfTd-so3dA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
+40f5623adZQ1IZGPxbDXONjyZGYuTA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
+40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
+40f5623aMQZoYuf4ml9v69N3gu8ing linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h
+40f5623a8LroVMnZ5YRzJJmIc-zHlw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h
+40f5623ayR1vnzfF__htza35a8Ft-g linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h
+40f5623a4YdRdVzYWJzOOoqe8mnrXA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h
+40f5623aDLxmbOtUHvkWztKjAO4EjA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h
+41062ab7HMSSuaUv3_Z4agLpjSO88A linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h
+40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
+40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
+4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
+40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h
+40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/page.h
+40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/param.h
+41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pci.h
+40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
+412e01beTwiaC8sYY4XJP8PxLST5CA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
+40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
+40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h
+40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/processor.h
+412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/ptrace.h
+40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/segment.h
+40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/setup.h
+40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
+40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h
+40f5623bSgGrvrGRpD71K-lIYqaGgg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/timer.h
+40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
+41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/vga.h
+40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/xor.h
+40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.9-xen-sparse/include/asm-xen/ctrl_if.h
+40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.9-xen-sparse/include/asm-xen/evtchn.h
+412dfaeazclyNDM0cpnp60Yo4xulpQ linux-2.6.9-xen-sparse/include/asm-xen/gnttab.h
+40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h
+40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.9-xen-sparse/include/asm-xen/multicall.h
+3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.9-xen-sparse/include/asm-xen/proc_cmd.h
+4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.9-xen-sparse/include/asm-xen/queues.h
+3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.9-xen-sparse/include/asm-xen/suspend.h
+3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.9-xen-sparse/include/asm-xen/xen_proc.h
+4124d8c4aocX7A-jIbuGraWN84pxGQ linux-2.6.9-xen-sparse/include/linux/bio.h
+4124f66fp5QwbDHEfoUIa7pqO5Xhag linux-2.6.9-xen-sparse/include/linux/page-flags.h
+4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.9-xen-sparse/include/linux/skbuff.h
+40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.9-xen-sparse/mkbuildtree
+412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.9-xen-sparse/mm/memory.c
+410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.9-xen-sparse/mm/page_alloc.c
+41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.9-xen-sparse/net/core/skbuff.c
+4149ec79wMpIHdvbntxqVGLRZZjPxw linux-2.6.9-xen-sparse/net/ipv4/raw.c
 413cb1e4zst25MDYjg63Y-NGC5_pLg netbsd-2.0-xen-sparse/Makefile
 413cb1e5c_Mkxf_X0zimEhTKI_l4DA netbsd-2.0-xen-sparse/mkbuildtree
 413cb1e5kY_Zil7-b0kI6hvCIxBEYg netbsd-2.0-xen-sparse/nbconfig-xen
diff --git a/linux-2.6.8.1-patches/agpgart.patch b/linux-2.6.8.1-patches/agpgart.patch
deleted file mode 100644 (file)
index aba8b20..0000000
+++ /dev/null
@@ -1,346 +0,0 @@
---- linux-2.6.8.1/drivers/char/agp/ali-agp.c   2004-08-14 11:55:35.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/ali-agp.c      2004-09-05 05:55:58.876495340 +0100
-@@ -150,7 +150,7 @@
-       pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
-       pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
-                       (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
--                        virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
-+                        virt_to_bus(addr)) | ALI_CACHE_FLUSH_EN ));
-       return addr;
- }
-@@ -174,7 +174,7 @@
-       pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
-       pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
-                       (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
--                        virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
-+                        virt_to_bus(addr)) | ALI_CACHE_FLUSH_EN));
-       agp_generic_destroy_page(addr);
- }
---- linux-2.6.8.1/drivers/char/agp/amd-k7-agp.c        2004-08-14 11:56:24.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/amd-k7-agp.c   2004-09-05 05:55:58.877495108 +0100
-@@ -43,7 +43,7 @@
-       SetPageReserved(virt_to_page(page_map->real));
-       global_cache_flush();
--      page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
-+      page_map->remapped = ioremap_nocache(virt_to_bus(page_map->real),
-                                           PAGE_SIZE);
-       if (page_map->remapped == NULL) {
-               ClearPageReserved(virt_to_page(page_map->real));
-@@ -152,7 +152,7 @@
-       agp_bridge->gatt_table_real = (u32 *)page_dir.real;
-       agp_bridge->gatt_table = (u32 *)page_dir.remapped;
--      agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
-+      agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
-       /* Get the address for the gart region.
-        * This is a bus address even on the alpha, b/c its
-@@ -166,7 +166,7 @@
-       /* Calculate the agp offset */
-       for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
-               page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
--                      virt_to_phys(amd_irongate_private.gatt_pages[i]->real);
-+                      virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
-               page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
-       }
---- linux-2.6.8.1/drivers/char/agp/amd64-agp.c 2004-08-14 11:55:47.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/amd64-agp.c    2004-09-05 05:55:58.877495108 +0100
-@@ -212,7 +212,7 @@
- static int amd_8151_configure(void)
- {
--      unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
-+      unsigned long gatt_bus = virt_to_bus(agp_bridge->gatt_table_real);
-       /* Configure AGP regs in each x86-64 host bridge. */
-       for_each_nb() {
-@@ -521,7 +521,7 @@
- {
-       struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
--      release_mem_region(virt_to_phys(bridge->gatt_table_real),
-+      release_mem_region(virt_to_bus(bridge->gatt_table_real),
-                          amd64_aperture_sizes[bridge->aperture_size_idx].size);
-       agp_remove_bridge(bridge);
-       agp_put_bridge(bridge);
---- linux-2.6.8.1/drivers/char/agp/ati-agp.c   2004-08-14 11:55:48.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/ati-agp.c      2004-09-05 05:55:58.877495108 +0100
-@@ -64,7 +64,7 @@
-       /* CACHE_FLUSH(); */
-       global_cache_flush();
--      page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
-+      page_map->remapped = ioremap_nocache(virt_to_bus(page_map->real),
-                                           PAGE_SIZE);
-       if (page_map->remapped == NULL || err) {
-               ClearPageReserved(virt_to_page(page_map->real));
---- linux-2.6.8.1/drivers/char/agp/backend.c   2004-08-14 11:55:47.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/backend.c      2004-09-05 05:55:58.878494876 +0100
-@@ -142,7 +142,7 @@
-                       return -ENOMEM;
-               }
--              bridge->scratch_page_real = virt_to_phys(addr);
-+              bridge->scratch_page_real = virt_to_bus(addr);
-               bridge->scratch_page =
-                   bridge->driver->mask_memory(bridge->scratch_page_real, 0);
-       }
-@@ -186,7 +186,7 @@
- err_out:
-       if (bridge->driver->needs_scratch_page)
-               bridge->driver->agp_destroy_page(
--                              phys_to_virt(bridge->scratch_page_real));
-+                              bus_to_virt(bridge->scratch_page_real));
-       if (got_gatt)
-               bridge->driver->free_gatt_table();
-       if (got_keylist) {
-@@ -211,7 +211,7 @@
-       if (bridge->driver->agp_destroy_page &&
-           bridge->driver->needs_scratch_page)
-               bridge->driver->agp_destroy_page(
--                              phys_to_virt(bridge->scratch_page_real));
-+                              bus_to_virt(bridge->scratch_page_real));
- }
- static const drm_agp_t drm_agp = {
---- linux-2.6.8.1/drivers/char/agp/generic.c   2004-08-14 11:55:10.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/generic.c      2004-09-05 05:55:58.879494644 +0100
-@@ -127,7 +127,7 @@
-       }
-       if (curr->page_count != 0) {
-               for (i = 0; i < curr->page_count; i++) {
--                      agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
-+                      agp_bridge->driver->agp_destroy_page(bus_to_virt(curr->memory[i]));
-               }
-       }
-       agp_free_key(curr->key);
-@@ -181,7 +181,7 @@
-                       return NULL;
-               }
-               new->memory[i] =
--                      agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
-+                      agp_bridge->driver->mask_memory(virt_to_bus(addr), type);
-               new->page_count++;
-       }
-@@ -636,6 +636,7 @@
-       int i;
-       void *temp;
-       struct page *page;
-+      dma_addr_t dma;
-       /* The generic routines can't handle 2 level gatt's */
-       if (agp_bridge->driver->size_type == LVL2_APER_SIZE)
-@@ -674,8 +675,10 @@
-                               break;
-                       }
--                      table = (char *) __get_free_pages(GFP_KERNEL,
--                                                        page_order);
-+                      table = dma_alloc_coherent(
-+                                      &agp_bridge->dev->dev,
-+                                      PAGE_SIZE << page_order, &dma,
-+                                      GFP_KERNEL);
-                       if (table == NULL) {
-                               i++;
-@@ -706,7 +709,9 @@
-               size = ((struct aper_size_info_fixed *) temp)->size;
-               page_order = ((struct aper_size_info_fixed *) temp)->page_order;
-               num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
--              table = (char *) __get_free_pages(GFP_KERNEL, page_order);
-+              table = dma_alloc_coherent(
-+                              &agp_bridge->dev->dev,
-+                              PAGE_SIZE << page_order, &dma, GFP_KERNEL);
-       }
-       if (table == NULL)
-@@ -721,7 +726,7 @@
-       agp_gatt_table = (void *)table;
-       agp_bridge->driver->cache_flush();
--      agp_bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
-+      agp_bridge->gatt_table = ioremap_nocache(virt_to_bus(table),
-                                       (PAGE_SIZE * (1 << page_order)));
-       agp_bridge->driver->cache_flush();
-@@ -729,11 +734,12 @@
-               for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
-                       ClearPageReserved(page);
--              free_pages((unsigned long) table, page_order);
-+              dma_free_coherent(&agp_bridge->dev->dev, PAGE_SIZE<<page_order,
-+                                      table, dma);
-               return -ENOMEM;
-       }
--      agp_bridge->gatt_bus_addr = virt_to_phys(agp_bridge->gatt_table_real);
-+      agp_bridge->gatt_bus_addr = virt_to_bus(table);
-       /* AK: bogus, should encode addresses > 4GB */
-       for (i = 0; i < num_entries; i++)
-@@ -785,7 +791,8 @@
-       for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
-               ClearPageReserved(page);
--      free_pages((unsigned long) agp_bridge->gatt_table_real, page_order);
-+      dma_free_coherent(&agp_bridge->dev->dev, PAGE_SIZE<<page_order,
-+              agp_bridge->gatt_table_real, agp_bridge->gatt_bus_addr);
-       agp_gatt_table = NULL;
-       agp_bridge->gatt_table = NULL;
---- linux-2.6.8.1/drivers/char/agp/hp-agp.c    2004-08-14 11:55:59.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/hp-agp.c       2004-09-05 05:55:58.879494644 +0100
-@@ -110,7 +110,7 @@
-       hp->gart_size = HP_ZX1_GART_SIZE;
-       hp->gatt_entries = hp->gart_size / hp->io_page_size;
--      hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
-+      hp->io_pdir = bus_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
-       hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
-       if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
-@@ -248,7 +248,7 @@
-       agp_bridge->mode = INREG32(hp->lba_regs, hp->lba_cap_offset + PCI_AGP_STATUS);
-       if (hp->io_pdir_owner) {
--              OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir));
-+              OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_bus(hp->io_pdir));
-               OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
-               OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
-               OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
---- linux-2.6.8.1/drivers/char/agp/i460-agp.c  2004-08-14 11:55:34.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/i460-agp.c     2004-09-05 05:55:58.879494644 +0100
-@@ -371,7 +371,7 @@
-       }
-       memset(lp->alloced_map, 0, map_size);
--      lp->paddr = virt_to_phys(lpage);
-+      lp->paddr = virt_to_bus(lpage);
-       lp->refcount = 0;
-       atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
-       return 0;
-@@ -382,7 +382,7 @@
-       kfree(lp->alloced_map);
-       lp->alloced_map = NULL;
--      free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
-+      free_pages((unsigned long) bus_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
-       atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
- }
---- linux-2.6.8.1/drivers/char/agp/intel-agp.c 2004-08-14 11:55:32.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/intel-agp.c    2004-09-05 05:55:58.880494412 +0100
-@@ -285,7 +285,7 @@
-       if (new == NULL)
-               return NULL;
--      new->memory[0] = virt_to_phys(addr);
-+      new->memory[0] = virt_to_bus(addr);
-       if (pg_count == 4) {
-               /* kludge to get 4 physical pages for ARGB cursor */
-               new->memory[1] = new->memory[0] + PAGE_SIZE;
-@@ -328,10 +328,10 @@
-       agp_free_key(curr->key);
-       if(curr->type == AGP_PHYS_MEMORY) {
-               if (curr->page_count == 4)
--                      i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
-+                      i8xx_destroy_pages(bus_to_virt(curr->memory[0]));
-               else
-                       agp_bridge->driver->agp_destroy_page(
--                               phys_to_virt(curr->memory[0]));
-+                               bus_to_virt(curr->memory[0]));
-               vfree(curr->memory);
-       }
-       kfree(curr);
---- linux-2.6.8.1/drivers/char/agp/intel-mch-agp.c     2004-08-14 11:54:49.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/intel-mch-agp.c        2004-09-05 05:55:58.880494412 +0100
-@@ -51,7 +51,7 @@
-       if (new == NULL)
-               return NULL;
--      new->memory[0] = agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
-+      new->memory[0] = agp_bridge->driver->mask_memory(virt_to_bus(addr), type);
-       new->page_count = 1;
-       new->num_scratch_pages = 1;
-       new->type = AGP_PHYS_MEMORY;
-@@ -63,7 +63,7 @@
- {
-       agp_free_key(curr->key);
-       if(curr->type == AGP_PHYS_MEMORY) {
--              agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[0]));
-+              agp_bridge->driver->agp_destroy_page(bus_to_virt(curr->memory[0]));
-               vfree(curr->memory);
-       }
-       kfree(curr);
---- linux-2.6.8.1/drivers/char/agp/sworks-agp.c        2004-08-14 11:55:10.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/sworks-agp.c   2004-09-05 05:55:58.881494180 +0100
-@@ -51,7 +51,7 @@
-       }
-       SetPageReserved(virt_to_page(page_map->real));
-       global_cache_flush();
--      page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
-+      page_map->remapped = ioremap_nocache(virt_to_bus(page_map->real), 
-                                           PAGE_SIZE);
-       if (page_map->remapped == NULL) {
-               ClearPageReserved(virt_to_page(page_map->real));
-@@ -164,7 +164,7 @@
-       for(i = 0; i < 1024; i++) {
-               serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge->scratch_page;
-               page_dir.remapped[i] =
--                      virt_to_phys(serverworks_private.scratch_dir.real);
-+                      virt_to_bus(serverworks_private.scratch_dir.real);
-               page_dir.remapped[i] |= 0x00000001;
-       }
-@@ -177,7 +177,7 @@
-       agp_bridge->gatt_table_real = (u32 *)page_dir.real;
-       agp_bridge->gatt_table = (u32 *)page_dir.remapped;
--      agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
-+      agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
-       /* Get the address for the gart region.
-        * This is a bus address even on the alpha, b/c its
-@@ -191,7 +191,7 @@
-       for(i = 0; i < value->num_entries / 1024; i++) {
-               page_dir.remapped[i] =
--                      virt_to_phys(serverworks_private.gatt_pages[i]->real);
-+                      virt_to_bus(serverworks_private.gatt_pages[i]->real);
-               page_dir.remapped[i] |= 0x00000001;
-       }
---- linux-2.6.8.1/drivers/char/agp/uninorth-agp.c      2004-08-14 11:55:32.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/agp/uninorth-agp.c 2004-09-05 05:55:58.881494180 +0100
-@@ -200,7 +200,7 @@
-       agp_bridge->gatt_table_real = (u32 *) table;
-       agp_bridge->gatt_table = (u32 *)table;
--      agp_bridge->gatt_bus_addr = virt_to_phys(table);
-+      agp_bridge->gatt_bus_addr = virt_to_bus(table);
-       for (i = 0; i < num_entries; i++) {
-               agp_bridge->gatt_table[i] =
---- linux-2.6.8.1/include/asm-i386/agp.h       2004-08-14 11:54:47.000000000 +0100
-+++ linux-2.6.8.1-xen0/include/asm-i386/agp.h  2004-09-05 05:57:26.040268956 +0100
-@@ -3,6 +3,7 @@
- #include <asm/pgtable.h>
- #include <asm/cacheflush.h>
-+#include <asm/system.h>
- /* 
-  * Functions to keep the agpgart mappings coherent with the MMU.
-@@ -19,6 +20,6 @@
- /* Could use CLFLUSH here if the cpu supports it. But then it would
-    need to be called for each cacheline of the whole page so it may not be 
-    worth it. Would need a page for it. */
--#define flush_agp_cache() asm volatile("wbinvd":::"memory")
-+#define flush_agp_cache() wbinvd()
- #endif
diff --git a/linux-2.6.8.1-patches/drm.patch b/linux-2.6.8.1-patches/drm.patch
deleted file mode 100644 (file)
index 3412ce6..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
---- linux-2.6.8.1/drivers/char/drm/ati_pcigart.h       2004-08-14 11:56:14.000000000 +0100
-+++ linux-2.6.8.1-xen0/drivers/char/drm/ati_pcigart.h  2004-09-05 06:14:51.751782846 +0100
-@@ -158,7 +158,7 @@
-       ret = 1;
- #if defined(__i386__) || defined(__x86_64__)
--      asm volatile ( "wbinvd" ::: "memory" );
-+      wbinvd();
- #else
-       mb();
- #endif
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/Kconfig b/linux-2.6.8.1-xen-sparse/arch/xen/Kconfig
deleted file mode 100644 (file)
index 57e3eba..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
-config XEN
-       bool
-       default y
-       help
-         This is the Linux Xen port.
-
-config ARCH_XEN
-       bool
-       default y
-
-
-config NO_IDLE_HZ
-       bool
-       default y
-
-
-menu "XEN"
-
-config XEN_PRIVILEGED_GUEST
-       bool "Privileged Guest (domain 0)"
-       default n
-        select XEN_PHYSDEV_ACCESS
-       help
-         Support for privileged operation (domain 0)
-
-config XEN_PHYSDEV_ACCESS
-       bool "Physical device access"
-       default y if XEN_PRIVILEGED_GUEST
-       default n if !XEN_PRIVILEGED_GUEST
-       help
-         Assume access is available to physical hardware devices
-          (e.g., hard drives, network cards). This allows you to configure
-          such devices and also includes some low-level support that is
-          otherwise not compiled into the kernel.
-
-config XEN_BLKDEV_BACKEND
-        bool "Block-device backend driver"
-        default y if XEN_PHYSDEV_ACCESS
-        default n if !XEN_PHYSDEV_ACCESS
-        help
-          The block-device backend driver allows the kernel to export its
-          block devices to other guests via a high-performance shared-memory
-          interface.
-
-config XEN_NETDEV_BACKEND
-        bool "Network-device backend driver"
-        default y if XEN_PHYSDEV_ACCESS
-        default n if !XEN_PHYSDEV_ACCESS
-        help
-          The network-device backend driver allows the kernel to export its
-          network devices to other guests via a high-performance shared-memory
-          interface.
-
-config XEN_BLKDEV_FRONTEND
-        bool "Block-device frontend driver"
-        default y
-        help
-          The block-device frontend driver allows the kernel to access block
-          devices mounted within another guest OS. Unless you are building a
-          dedicated device-driver domain, or your master control domain
-          (domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_FRONTEND
-        bool "Network-device frontend driver"
-        default y
-        help
-          The network-device frontend driver allows the kernel to access
-          network interfaces within another guest OS. Unless you are building a
-          dedicated device-driver domain, or your master control domain
-          (domain 0), then you almost certainly want to say Y here.
-
-if XEN_NETDEV_FRONTEND
-config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
-        bool "Pipelined transmitter (DANGEROUS)"
-        default n
-        help
-          The driver will assume that the backend is pipelining packets for
-          transmission: whenever packets are pending in the remote backend,
-          the driver will not send asynchronous notifications when it queues
-          additional packets for transmission.
-          If the backend is a dumb domain, such as a transparent Ethernet
-          bridge with no local IP interface, it is safe to say Y here to get
-          slightly lower network overhead.
-          If the backend has a local IP interface; or may be doing smart things
-          like reassembling packets to perform firewall filtering; or if you
-          are unsure; or if you experience network hangs when this option is
-          enabled; then you must say N here.
-endif
-
-config XEN_WRITABLE_PAGETABLES
-       bool "Use writable pagetables"
-       default n
-       help
-         Use writable L1 pagetables
-
-config XEN_SCRUB_PAGES
-        bool "Scrub memory before freeing it to Xen"
-        default y
-        help
-          Erase memory contents before freeing it back to Xen's global
-          pool. This ensures that any secrets contained within that
-          memory (e.g., private keys) cannot be found by other guests that
-          may be running on the machine. Most people will want to say Y here.
-          If security is not a concern then you may increase performance by
-          saying N.
-
-endmenu
-
-config FOREIGN_PAGES
-       bool
-       default y
-
-config HAVE_ARCH_DEV_ALLOC_SKB
-       bool
-       default y
-
-#config VT
-#      bool
-#      default y
-
-#config VT_CONSOLE
-#      bool
-#      default y
-
-#config HW_CONSOLE
-#      bool
-#      default y
-
-choice
-       prompt "Processor Type"
-       default X86
-
-config X86
-       bool "X86"
-       help
-         Choose this option if your computer is a X86 architecture.
-
-config X86_64
-       bool "X86_64"
-       help
-         Choose this option if your computer is a X86 architecture.
-
-endchoice
-
-source "init/Kconfig"
-
-if X86
-source "arch/xen/i386/Kconfig"
-endif
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "arch/xen/Kconfig.drivers"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/Kconfig.drivers b/linux-2.6.8.1-xen-sparse/arch/xen/Kconfig.drivers
deleted file mode 100644 (file)
index 93db5f9..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-# arch/xen/Kconfig.drivers
-
-menu "Device Drivers"
-
-source "drivers/base/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/mtd/Kconfig"
-source "drivers/parport/Kconfig"
-source "drivers/pnp/Kconfig"
-endif
-
-source "drivers/block/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/ide/Kconfig"
-endif
-
-source "drivers/scsi/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/cdrom/Kconfig"
-endif
-
-source "drivers/md/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/message/fusion/Kconfig"
-source "drivers/ieee1394/Kconfig"
-source "drivers/message/i2o/Kconfig"
-endif
-
-source "net/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/isdn/Kconfig"
-source "drivers/telephony/Kconfig"
-source "drivers/input/Kconfig"
-source "drivers/char/Kconfig"
-source "drivers/i2c/Kconfig"
-source "drivers/w1/Kconfig"
-source "drivers/misc/Kconfig"
-source "drivers/media/Kconfig"
-source "drivers/video/Kconfig"
-source "sound/Kconfig"
-source "drivers/usb/Kconfig"
-endif
-
-if !XEN_PHYSDEV_ACCESS
-config UNIX98_PTYS
-       bool
-       default y
-endif
-
-endmenu
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/Makefile
deleted file mode 100644 (file)
index 224e0aa..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# xen/Makefile
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" cleaning up for this architecture.
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 2004 by Christian Limpach
-#
-
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-# pick up headers from include/asm-xen/asm in preference over include/asm
-NOSTDINC_FLAGS  = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen -iwithprefix include
-
-# make uname return the processor arch
-UTS_MACHINE := $(XENARCH)
-
-core-y += arch/xen/kernel/
-
-include/.asm-ignore:
-       @rm -f include/.asm-ignore
-       @mv include/asm include/.asm-ignore
-       @echo '  SYMLINK include/asm -> include/asm-$(XENARCH)'
-       $(Q)if [ ! -d include ]; then mkdir -p include; fi;
-       @ln -fsn asm-$(XENARCH) include/asm
-
-include/asm-xen/asm:
-       @echo '  SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
-       @ln -fsn asm-$(XENARCH) $@
-
-include/asm-xen/asm-$(XENARCH)/hypervisor-ifs:
-       @echo '  SYMLINK $@ -> include/asm-xen/hypervisor-ifs'
-       @ln -fsn ../hypervisor-ifs $@
-
-arch/xen/arch:
-       @rm -f $@
-       @ln -fsn $(XENARCH) $@
-
-prepare: include/.asm-ignore include/asm-xen/asm \
-       include/asm-xen/asm-$(XENARCH)/hypervisor-ifs \
-       arch/xen/arch ;
-
-all: vmlinuz
-
-vmlinuz: vmlinux
-       $(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
-
-XINSTALL_NAME ?= $(KERNELRELEASE)
-install: vmlinuz
-       mkdir -p $(INSTALL_PATH)/boot
-       install -m0644 vmlinuz $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-       install -m0644 vmlinux $(INSTALL_PATH)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-       install -m0664 .config $(INSTALL_PATH)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-       install -m0664 System.map $(INSTALL_PATH)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-
-dist:
-       $(MAKE) INSTALL_PATH=../install install
-
-archclean:
-       @if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
-       @rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
-
-define archhelp
-  echo  '* vmlinuz     - Compressed kernel image'
-  echo  '  install     - Install kernel image and config file'
-endef
-
-ifneq ($(XENARCH),)
-include        $(srctree)/arch/xen/$(XENARCH)/Makefile
-endif
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/boot/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/boot/Makefile
deleted file mode 100644 (file)
index ff37924..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-
-OBJCOPYFLAGS := -g --strip-unneeded
-
-vmlinuz: vmlinux-stripped FORCE
-       $(call if_changed,gzip)
-
-vmlinux-stripped: vmlinux FORCE
-       $(call if_changed,objcopy)
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/configs/xen0_defconfig b/linux-2.6.8.1-xen-sparse/arch/xen/configs/xen0_defconfig
deleted file mode 100644 (file)
index 42aeb48..0000000
+++ /dev/null
@@ -1,1032 +0,0 @@
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_XEN=y
-CONFIG_ARCH_XEN=y
-CONFIG_NO_IDLE_HZ=y
-
-#
-# XEN
-#
-CONFIG_XEN_PRIVILEGED_GUEST=y
-CONFIG_XEN_PHYSDEV_ACCESS=y
-CONFIG_XEN_BLKDEV_BACKEND=y
-CONFIG_XEN_NETDEV_BACKEND=y
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
-CONFIG_XEN_WRITABLE_PAGETABLES=y
-CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_FOREIGN_PAGES=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-CONFIG_X86=y
-# CONFIG_X86_64 is not set
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_HOTPLUG=y
-# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-CONFIG_KMOD=y
-
-#
-# X86 Processor Configuration
-#
-CONFIG_XENARCH="i386"
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_GENERIC_ISA_DMA=y
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
-CONFIG_MPENTIUM4=y
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-# CONFIG_X86_GENERIC is not set
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-# CONFIG_HPET_TIMER is not set
-# CONFIG_HPET_EMULATE_RTC is not set
-# CONFIG_SMP is not set
-CONFIG_PREEMPT=y
-CONFIG_X86_CPUID=y
-
-#
-# Firmware Drivers
-#
-# CONFIG_EDD is not set
-CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_HAVE_DEC_LOCK=y
-# CONFIG_REGPARM is not set
-
-#
-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-#
-CONFIG_PCI=y
-# CONFIG_PCI_GOBIOS is not set
-# CONFIG_PCI_GOMMCONFIG is not set
-CONFIG_PCI_GODIRECT=y
-# CONFIG_PCI_GOANY is not set
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_LEGACY_PROC=y
-# CONFIG_PCI_NAMES is not set
-CONFIG_ISA=y
-# CONFIG_EISA is not set
-# CONFIG_MCA is not set
-# CONFIG_SCx200 is not set
-
-#
-# PCMCIA/CardBus support
-#
-CONFIG_PCMCIA=m
-# CONFIG_PCMCIA_DEBUG is not set
-CONFIG_YENTA=m
-CONFIG_CARDBUS=y
-# CONFIG_PD6729 is not set
-# CONFIG_I82092 is not set
-# CONFIG_I82365 is not set
-# CONFIG_TCIC is not set
-CONFIG_PCMCIA_PROBE=y
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_4KSTACKS is not set
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_X86_STD_RESOURCES=y
-CONFIG_PC=y
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-# CONFIG_STANDALONE is not set
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNP is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_DEV_XD is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-CONFIG_BLK_DEV_NBD=y
-# CONFIG_BLK_DEV_SX8 is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-# CONFIG_BLK_DEV_HD_IDE is not set
-CONFIG_BLK_DEV_IDEDISK=y
-# CONFIG_IDEDISK_MULTI_MODE is not set
-# CONFIG_BLK_DEV_IDECS is not set
-CONFIG_BLK_DEV_IDECD=y
-# CONFIG_BLK_DEV_IDETAPE is not set
-# CONFIG_BLK_DEV_IDEFLOPPY is not set
-# CONFIG_BLK_DEV_IDESCSI is not set
-# CONFIG_IDE_TASK_IOCTL is not set
-# CONFIG_IDE_TASKFILE_IO is not set
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=y
-# CONFIG_BLK_DEV_CMD640 is not set
-CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=y
-# CONFIG_BLK_DEV_OPTI621 is not set
-# CONFIG_BLK_DEV_RZ1000 is not set
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-CONFIG_IDEDMA_PCI_AUTO=y
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_ADMA=y
-# CONFIG_BLK_DEV_AEC62XX is not set
-# CONFIG_BLK_DEV_ALI15X3 is not set
-# CONFIG_BLK_DEV_AMD74XX is not set
-# CONFIG_BLK_DEV_ATIIXP is not set
-# CONFIG_BLK_DEV_CMD64X is not set
-# CONFIG_BLK_DEV_TRIFLEX is not set
-# CONFIG_BLK_DEV_CY82C693 is not set
-# CONFIG_BLK_DEV_CS5520 is not set
-# CONFIG_BLK_DEV_CS5530 is not set
-# CONFIG_BLK_DEV_HPT34X is not set
-# CONFIG_BLK_DEV_HPT366 is not set
-# CONFIG_BLK_DEV_SC1200 is not set
-CONFIG_BLK_DEV_PIIX=y
-# CONFIG_BLK_DEV_NS87415 is not set
-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-CONFIG_BLK_DEV_SVWKS=y
-# CONFIG_BLK_DEV_SIIMAGE is not set
-# CONFIG_BLK_DEV_SIS5513 is not set
-# CONFIG_BLK_DEV_SLC90E66 is not set
-# CONFIG_BLK_DEV_TRM290 is not set
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_IDE_ARM is not set
-# CONFIG_IDE_CHIPSETS is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
-CONFIG_IDEDMA_AUTO=y
-# CONFIG_BLK_DEV_HD is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=y
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI Transport Attributes
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-
-#
-# SCSI low-level drivers
-#
-CONFIG_BLK_DEV_3W_XXXX_RAID=y
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_7000FASST is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AHA152X is not set
-# CONFIG_SCSI_AHA1542 is not set
-CONFIG_SCSI_AACRAID=y
-CONFIG_SCSI_AIC7XXX=y
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-CONFIG_AIC7XXX_DEBUG_ENABLE=y
-CONFIG_AIC7XXX_DEBUG_MASK=0
-CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-CONFIG_SCSI_AIC79XX=y
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
-# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-CONFIG_AIC79XX_DEBUG_ENABLE=y
-CONFIG_AIC79XX_DEBUG_MASK=0
-CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_IN2000 is not set
-CONFIG_SCSI_MEGARAID=y
-CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_SVW is not set
-CONFIG_SCSI_ATA_PIIX=y
-# CONFIG_SCSI_SATA_NV is not set
-CONFIG_SCSI_SATA_PROMISE=y
-CONFIG_SCSI_SATA_SX4=y
-CONFIG_SCSI_SATA_SIL=y
-# CONFIG_SCSI_SATA_SIS is not set
-# CONFIG_SCSI_SATA_VIA is not set
-# CONFIG_SCSI_SATA_VITESSE is not set
-CONFIG_SCSI_BUSLOGIC=y
-# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-# CONFIG_SCSI_CPQFCTS is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_NCR53C406A is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_PCI2000 is not set
-# CONFIG_SCSI_PCI2220I is not set
-# CONFIG_SCSI_PSI240I is not set
-# CONFIG_SCSI_QLOGIC_FAS is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA6322 is not set
-# CONFIG_SCSI_SEAGATE is not set
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# PCMCIA SCSI adapter support
-#
-# CONFIG_PCMCIA_AHA152X is not set
-# CONFIG_PCMCIA_FDOMAIN is not set
-# CONFIG_PCMCIA_NINJA_SCSI is not set
-# CONFIG_PCMCIA_QLOGIC is not set
-# CONFIG_PCMCIA_SYM53C500 is not set
-
-#
-# Old CD-ROM drivers (not SCSI, not IDE)
-#
-# CONFIG_CD_NO_IDESCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-# CONFIG_MD_LINEAR is not set
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_MD_RAID5=y
-# CONFIG_MD_RAID6 is not set
-# CONFIG_MD_MULTIPATH is not set
-CONFIG_BLK_DEV_DM=y
-# CONFIG_DM_CRYPT is not set
-CONFIG_DM_SNAPSHOT=y
-CONFIG_DM_MIRROR=y
-# CONFIG_DM_ZERO is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_BRIDGE_NETFILTER=y
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-# CONFIG_IP_NF_IRC is not set
-# CONFIG_IP_NF_TFTP is not set
-# CONFIG_IP_NF_AMANDA is not set
-# CONFIG_IP_NF_QUEUE is not set
-CONFIG_IP_NF_IPTABLES=m
-# CONFIG_IP_NF_MATCH_LIMIT is not set
-# CONFIG_IP_NF_MATCH_IPRANGE is not set
-# CONFIG_IP_NF_MATCH_MAC is not set
-# CONFIG_IP_NF_MATCH_PKTTYPE is not set
-# CONFIG_IP_NF_MATCH_MARK is not set
-# CONFIG_IP_NF_MATCH_MULTIPORT is not set
-# CONFIG_IP_NF_MATCH_TOS is not set
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
-# CONFIG_IP_NF_MATCH_DSCP is not set
-# CONFIG_IP_NF_MATCH_AH_ESP is not set
-# CONFIG_IP_NF_MATCH_LENGTH is not set
-# CONFIG_IP_NF_MATCH_TTL is not set
-# CONFIG_IP_NF_MATCH_TCPMSS is not set
-# CONFIG_IP_NF_MATCH_HELPER is not set
-# CONFIG_IP_NF_MATCH_STATE is not set
-# CONFIG_IP_NF_MATCH_CONNTRACK is not set
-# CONFIG_IP_NF_MATCH_OWNER is not set
-# CONFIG_IP_NF_MATCH_PHYSDEV is not set
-# CONFIG_IP_NF_FILTER is not set
-# CONFIG_IP_NF_NAT is not set
-# CONFIG_IP_NF_MANGLE is not set
-# CONFIG_IP_NF_TARGET_LOG is not set
-# CONFIG_IP_NF_TARGET_ULOG is not set
-# CONFIG_IP_NF_TARGET_TCPMSS is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-# CONFIG_IP_NF_RAW is not set
-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-# CONFIG_IP_NF_MATCH_REALM is not set
-
-#
-# Bridge: Netfilter Configuration
-#
-# CONFIG_BRIDGE_NF_EBTABLES is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-CONFIG_BRIDGE=y
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
-CONFIG_NET_VENDOR_3COM=y
-# CONFIG_EL1 is not set
-# CONFIG_EL2 is not set
-# CONFIG_ELPLUS is not set
-# CONFIG_EL16 is not set
-# CONFIG_EL3 is not set
-# CONFIG_3C515 is not set
-CONFIG_VORTEX=y
-# CONFIG_TYPHOON is not set
-# CONFIG_LANCE is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_NET_VENDOR_RACAL is not set
-
-#
-# Tulip family network device support
-#
-CONFIG_NET_TULIP=y
-# CONFIG_DE2104X is not set
-CONFIG_TULIP=y
-# CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
-# CONFIG_DE4X5 is not set
-# CONFIG_WINBOND_840 is not set
-# CONFIG_DM9102 is not set
-# CONFIG_PCMCIA_XIRCOM is not set
-# CONFIG_PCMCIA_XIRTULIP is not set
-# CONFIG_AT1700 is not set
-# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
-# CONFIG_NET_ISA is not set
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_AC3200 is not set
-# CONFIG_APRICOT is not set
-# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_CS89x0 is not set
-# CONFIG_DGRS is not set
-# CONFIG_EEPRO100 is not set
-CONFIG_E100=y
-# CONFIG_E100_NAPI is not set
-# CONFIG_FEALNX is not set
-# CONFIG_NATSEMI is not set
-# CONFIG_NE2K_PCI is not set
-# CONFIG_8139CP is not set
-CONFIG_8139TOO=y
-CONFIG_8139TOO_PIO=y
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_SIS900 is not set
-# CONFIG_EPIC100 is not set
-# CONFIG_SUNDANCE is not set
-# CONFIG_TLAN is not set
-CONFIG_VIA_RHINE=y
-# CONFIG_VIA_RHINE_MMIO is not set
-# CONFIG_VIA_VELOCITY is not set
-# CONFIG_NET_POCKET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-CONFIG_ACENIC=y
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-# CONFIG_DL2K is not set
-CONFIG_E1000=y
-# CONFIG_E1000_NAPI is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
-CONFIG_TIGON3=y
-
-#
-# Ethernet (10000 Mbit)
-#
-# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# PCMCIA network device support
-#
-# CONFIG_NET_PCMCIA is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_INPORT is not set
-# CONFIG_MOUSE_LOGIBM is not set
-# CONFIG_MOUSE_PC110PAD is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-
-#
-# PCMCIA character devices
-#
-# CONFIG_SYNCLINK_CS is not set
-# CONFIG_MWAVE is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_HANGCHECK_TIMER is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-# CONFIG_IBM_ASM is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-# CONFIG_VIDEO_SELECT is not set
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=y
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
-CONFIG_CRYPTO_SHA1=m
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_586 is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=y
-CONFIG_ZLIB_INFLATE=y
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/configs/xenU_defconfig b/linux-2.6.8.1-xen-sparse/arch/xen/configs/xenU_defconfig
deleted file mode 100644 (file)
index d6940f6..0000000
+++ /dev/null
@@ -1,475 +0,0 @@
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_XEN=y
-CONFIG_ARCH_XEN=y
-CONFIG_NO_IDLE_HZ=y
-
-#
-# XEN
-#
-# CONFIG_XEN_PRIVILEGED_GUEST is not set
-# CONFIG_XEN_PHYSDEV_ACCESS is not set
-# CONFIG_XEN_BLKDEV_BACKEND is not set
-# CONFIG_XEN_NETDEV_BACKEND is not set
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
-CONFIG_XEN_WRITABLE_PAGETABLES=y
-CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_FOREIGN_PAGES=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-CONFIG_X86=y
-# CONFIG_X86_64 is not set
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_HOTPLUG=y
-# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-CONFIG_KMOD=y
-
-#
-# X86 Processor Configuration
-#
-CONFIG_XENARCH="i386"
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_GENERIC_ISA_DMA=y
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
-CONFIG_MPENTIUM4=y
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-# CONFIG_X86_GENERIC is not set
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-# CONFIG_HPET_TIMER is not set
-# CONFIG_HPET_EMULATE_RTC is not set
-# CONFIG_SMP is not set
-CONFIG_PREEMPT=y
-CONFIG_X86_CPUID=y
-
-#
-# Firmware Drivers
-#
-# CONFIG_EDD is not set
-CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_HAVE_DEC_LOCK=y
-# CONFIG_REGPARM is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_DEBUG_KERNEL is not set
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_4KSTACKS is not set
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_X86_STD_RESOURCES=y
-CONFIG_PC=y
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=m
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI Transport Attributes
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_SATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-# CONFIG_NET_ETHERNET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-CONFIG_UNIX98_PTYS=y
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=y
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS_XATTR=y
-# CONFIG_DEVPTS_FS_SECURITY is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-# CONFIG_EXPORTFS is not set
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_DES is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_586 is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC32 is not set
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=y
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/Kconfig b/linux-2.6.8.1-xen-sparse/arch/xen/i386/Kconfig
deleted file mode 100644 (file)
index 6e22f7c..0000000
+++ /dev/null
@@ -1,937 +0,0 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-menu "X86 Processor Configuration"
-
-config XENARCH
-       string
-       default i386
-
-config MMU
-       bool
-       default y
-
-config SBUS
-       bool
-
-config UID16
-       bool
-       default y
-
-config GENERIC_ISA_DMA
-       bool
-       default y
-
-
-choice
-       prompt "Processor family"
-       default M686
-
-#config M386
-#      bool "386"
-#      ---help---
-#        This is the processor type of your CPU. This information is used for
-#        optimizing purposes. In order to compile a kernel that can run on
-#        all x86 CPU types (albeit not optimally fast), you can specify
-#        "386" here.
-#
-#        The kernel will not necessarily run on earlier architectures than
-#        the one you have chosen, e.g. a Pentium optimized kernel will run on
-#        a PPro, but not necessarily on a i486.
-#
-#        Here are the settings recommended for greatest speed:
-#        - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
-#        486DLC/DLC2, UMC 486SX-S and NexGen Nx586.  Only "386" kernels
-#        will run on a 386 class machine.
-#        - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
-#        SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
-#        - "586" for generic Pentium CPUs lacking the TSC
-#        (time stamp counter) register.
-#        - "Pentium-Classic" for the Intel Pentium.
-#        - "Pentium-MMX" for the Intel Pentium MMX.
-#        - "Pentium-Pro" for the Intel Pentium Pro.
-#        - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
-#        - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
-#        - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
-#        - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
-#        - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
-#        - "Crusoe" for the Transmeta Crusoe series.
-#        - "Winchip-C6" for original IDT Winchip.
-#        - "Winchip-2" for IDT Winchip 2.
-#        - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
-#        - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
-#        - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
-#
-#        If you don't know what to do, choose "386".
-
-#config M486
-#      bool "486"
-#      help
-#        Select this for a 486 series processor, either Intel or one of the
-#        compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
-#        DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
-#        U5S.
-
-#config M586
-#      bool "586/K5/5x86/6x86/6x86MX"
-#      help
-#        Select this for an 586 or 686 series processor such as the AMD K5,
-#        the Intel 5x86 or 6x86, or the Intel 6x86MX.  This choice does not
-#        assume the RDTSC (Read Time Stamp Counter) instruction.
-
-#config M586TSC
-#      bool "Pentium-Classic"
-#      help
-#        Select this for a Pentium Classic processor with the RDTSC (Read
-#        Time Stamp Counter) instruction for benchmarking.
-
-#config M586MMX
-#      bool "Pentium-MMX"
-#      help
-#        Select this for a Pentium with the MMX graphics/multimedia
-#        extended instructions.
-
-config M686
-       bool "Pentium-Pro"
-       help
-         Select this for Intel Pentium Pro chips.  This enables the use of
-         Pentium Pro extended instructions, and disables the init-time guard
-         against the f00f bug found in earlier Pentiums.
-
-config MPENTIUMII
-       bool "Pentium-II/Celeron(pre-Coppermine)"
-       help
-         Select this for Intel chips based on the Pentium-II and
-         pre-Coppermine Celeron core.  This option enables an unaligned
-         copy optimization, compiles the kernel with optimization flags
-         tailored for the chip, and applies any applicable Pentium Pro
-         optimizations.
-
-config MPENTIUMIII
-       bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
-       help
-         Select this for Intel chips based on the Pentium-III and
-         Celeron-Coppermine core.  This option enables use of some
-         extended prefetch instructions in addition to the Pentium II
-         extensions.
-
-config MPENTIUMM
-       bool "Pentium M"
-       help
-         Select this for Intel Pentium M (not Pentium-4 M)
-         notebook chips.
-
-config MPENTIUM4
-       bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
-       help
-         Select this for Intel Pentium 4 chips.  This includes the
-         Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
-         (not Pentium M) chips.  This option enables compile flags
-         optimized for the chip, uses the correct cache shift, and
-         applies any applicable Pentium III optimizations.
-
-config MK6
-       bool "K6/K6-II/K6-III"
-       help
-         Select this for an AMD K6-family processor.  Enables use of
-         some extended instructions, and passes appropriate optimization
-         flags to GCC.
-
-config MK7
-       bool "Athlon/Duron/K7"
-       help
-         Select this for an AMD Athlon K7-family processor.  Enables use of
-         some extended instructions, and passes appropriate optimization
-         flags to GCC.
-
-config MK8
-       bool "Opteron/Athlon64/Hammer/K8"
-       help
-         Select this for an AMD Opteron or Athlon64 Hammer-family processor.  Enables
-         use of some extended instructions, and passes appropriate optimization
-         flags to GCC.
-
-config MCRUSOE
-       bool "Crusoe"
-       help
-         Select this for a Transmeta Crusoe processor.  Treats the processor
-         like a 586 with TSC, and sets some GCC optimization flags (like a
-         Pentium Pro with no alignment requirements).
-
-#config MWINCHIPC6
-#      bool "Winchip-C6"
-#      help
-#        Select this for an IDT Winchip C6 chip.  Linux and GCC
-#        treat this chip as a 586TSC with some extended instructions
-#        and alignment requirements.
-
-#config MWINCHIP2
-#      bool "Winchip-2"
-#      help
-#        Select this for an IDT Winchip-2.  Linux and GCC
-#        treat this chip as a 586TSC with some extended instructions
-#        and alignment requirements.
-
-#config MWINCHIP3D
-#      bool "Winchip-2A/Winchip-3"
-#      help
-#        Select this for an IDT Winchip-2A or 3.  Linux and GCC
-#        treat this chip as a 586TSC with some extended instructions
-#        and alignment reqirements.  Also enable out of order memory
-#        stores for this CPU, which can increase performance of some
-#        operations.
-
-config MCYRIXIII
-       bool "CyrixIII/VIA-C3"
-       help
-         Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
-         treat this chip as a generic 586. Whilst the CPU is 686 class,
-         it lacks the cmov extension which gcc assumes is present when
-         generating 686 code.
-         Note that Nehemiah (Model 9) and above will not boot with this
-         kernel due to them lacking the 3DNow! instructions used in earlier
-         incarnations of the CPU.
-
-config MVIAC3_2
-       bool "VIA C3-2 (Nehemiah)"
-       help
-         Select this for a VIA C3 "Nehemiah". Selecting this enables usage
-         of SSE and tells gcc to treat the CPU as a 686.
-         Note, this kernel will not boot on older (pre model 9) C3s.
-
-endchoice
-
-config X86_GENERIC
-       bool "Generic x86 support" 
-       help
-         Instead of just including optimizations for the selected
-         x86 variant (e.g. PII, Crusoe or Athlon), include some more
-         generic optimizations as well. This will make the kernel
-         perform better on x86 CPUs other than that selected.
-
-         This is really intended for distributors who need more
-         generic optimizations.
-
-#
-# Define implied options from the CPU selection here
-#
-config X86_CMPXCHG
-       bool
-       depends on !M386
-       default y
-
-config X86_XADD
-       bool
-       depends on !M386
-       default y
-
-config X86_L1_CACHE_SHIFT
-       int
-       default "7" if MPENTIUM4 || X86_GENERIC
-       default "4" if X86_ELAN || M486 || M386
-       default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2
-       default "6" if MK7 || MK8 || MPENTIUMM
-
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       depends on M386
-       default y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       depends on !M386
-       default y
-
-config X86_PPRO_FENCE
-       bool
-       depends on M686 || M586MMX || M586TSC || M586 || M486 || M386
-       default y
-
-config X86_F00F_BUG
-       bool
-       depends on M586MMX || M586TSC || M586 || M486 || M386
-       default y
-
-config X86_WP_WORKS_OK
-       bool
-       depends on !M386
-       default y
-
-config X86_INVLPG
-       bool
-       depends on !M386
-       default y
-
-config X86_BSWAP
-       bool
-       depends on !M386
-       default y
-
-config X86_POPAD_OK
-       bool
-       depends on !M386
-       default y
-
-config X86_ALIGNMENT_16
-       bool
-       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
-       default y
-
-config X86_GOOD_APIC
-       bool
-       depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8
-       default y
-
-config X86_INTEL_USERCOPY
-       bool
-       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7
-       default y
-
-config X86_USE_PPRO_CHECKSUM
-       bool
-       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2
-       default y
-
-config X86_USE_3DNOW
-       bool
-       depends on MCYRIXIII || MK7
-       default y
-
-config X86_OOSTORE
-       bool
-       depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
-       default y
-
-config HPET_TIMER
-       bool
-       default n
-#config HPET_TIMER
-#      bool "HPET Timer Support"
-#      help
-#        This enables the use of the HPET for the kernel's internal timer.
-#        HPET is the next generation timer replacing legacy 8254s.
-#        You can safely choose Y here.  However, HPET will only be
-#        activated if the platform and the BIOS support this feature.
-#        Otherwise the 8254 will be used for timing services.
-#
-#        Choose N to continue using the legacy 8254 timer.
-
-config HPET_EMULATE_RTC
-       def_bool HPET_TIMER && RTC=y
-
-config SMP
-       bool
-       default n
-#config SMP
-#      bool "Symmetric multi-processing support"
-#      ---help---
-#        This enables support for systems with more than one CPU. If you have
-#        a system with only one CPU, like most personal computers, say N. If
-#        you have a system with more than one CPU, say Y.
-#
-#        If you say N here, the kernel will run on single and multiprocessor
-#        machines, but will use only one CPU of a multiprocessor machine. If
-#        you say Y here, the kernel will run on many, but not all,
-#        singleprocessor machines. On a singleprocessor machine, the kernel
-#        will run faster if you say N here.
-#
-#        Note that if you say Y here and choose architecture "586" or
-#        "Pentium" under "Processor family", the kernel will not work on 486
-#        architectures. Similarly, multiprocessor kernels for the "PPro"
-#        architecture may not work on all Pentium based boards.
-#
-#        People using multiprocessor machines who say Y here should also say
-#        Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
-#        Management" code will be disabled if you say Y here.
-#
-#        See also the <file:Documentation/smp.txt>,
-#        <file:Documentation/i386/IO-APIC.txt>,
-#        <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
-#        <http://www.tldp.org/docs.html#howto>.
-#
-#        If you don't know what to do here, say N.
-
-config NR_CPUS
-       int "Maximum number of CPUs (2-255)"
-       range 2 255
-       depends on SMP
-       default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
-       default "8"
-       help
-         This allows you to specify the maximum number of CPUs which this
-         kernel will support.  The maximum supported value is 255 and the
-         minimum value which makes sense is 2.
-
-         This is purely to save memory - each supported CPU adds
-         approximately eight kilobytes to the kernel image.
-
-config SCHED_SMT
-       bool "SMT (Hyperthreading) scheduler support"
-       depends on SMP
-       default off
-       help
-         SMT scheduler support improves the CPU scheduler's decision making
-         when dealing with Intel Pentium 4 chips with HyperThreading at a
-         cost of slightly increased overhead in some places. If unsure say
-         N here.
-
-config PREEMPT
-       bool "Preemptible Kernel"
-       help
-         This option reduces the latency of the kernel when reacting to
-         real-time or interactive events by allowing a low priority process to
-         be preempted even if it is in kernel mode executing a system call.
-         This allows applications to run more reliably even when the system is
-         under load.
-
-         Say Y here if you are building a kernel for a desktop, embedded
-         or real-time system.  Say N if you are unsure.
-
-#config X86_TSC
-#       bool
-#      depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
-#       default y
-
-#config X86_MCE
-#       bool "Machine Check Exception"
-#      depends on !X86_VOYAGER
-#       ---help---
-#         Machine Check Exception support allows the processor to notify the
-#         kernel if it detects a problem (e.g. overheating, component failure).
-#         The action the kernel takes depends on the severity of the problem,
-#         ranging from a warning message on the console, to halting the machine.
-#         Your processor must be a Pentium or newer to support this - check the
-#         flags in /proc/cpuinfo for mce.  Note that some older Pentium systems
-#         have a design flaw which leads to false MCE events - hence MCE is
-#         disabled on all P5 processors, unless explicitly enabled with "mce"
-#         as a boot argument.  Similarly, if MCE is built in and creates a
-#         problem on some new non-standard machine, you can boot with "nomce"
-#         to disable it.  MCE support simply ignores non-MCE processors like
-#         the 386 and 486, so nearly everyone can say Y here.
-
-#config X86_MCE_NONFATAL
-#      tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
-#       depends on X86_MCE
-#       help
-#         Enabling this feature starts a timer that triggers every 5 seconds which
-#         will look at the machine check registers to see if anything happened.
-#         Non-fatal problems automatically get corrected (but still logged).
-#         Disable this if you don't want to see these messages.
-#         Seeing the messages this option prints out may be indicative of dying hardware,
-#         or out-of-spec (ie, overclocked) hardware.
-#         This option only does something on certain CPUs.
-#         (AMD Athlon/Duron and Intel Pentium 4)
-
-#config X86_MCE_P4THERMAL
-#       bool "check for P4 thermal throttling interrupt."
-#       depends on X86_MCE && (X86_UP_APIC || SMP)
-#       help
-#         Enabling this feature will cause a message to be printed when the P4
-#         enters thermal throttling.
-
-#config MICROCODE
-#       tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
-#       ---help---
-#         If you say Y here and also to "/dev file system support" in the
-#         'File systems' section, you will be able to update the microcode on
-#         Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
-#         Pentium III, Pentium 4, Xeon etc.  You will obviously need the
-#         actual microcode binary data itself which is not shipped with the
-#         Linux kernel.
-#
-#         For latest news and information on obtaining all the required
-#         ingredients for this driver, check:
-#         <http://www.urbanmyth.org/microcode/>.
-#
-#         To compile this driver as a module, choose M here: the
-#         module will be called microcode.
-
-#config X86_MSR
-#       tristate "/dev/cpu/*/msr - Model-specific register support"
-#       help
-#         This device gives privileged processes access to the x86
-#         Model-Specific Registers (MSRs).  It is a character device with
-#         major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
-#         MSR accesses are directed to a specific CPU on multi-processor
-#         systems.
-
-config X86_CPUID
-       tristate "/dev/cpu/*/cpuid - CPU information support"
-       help
-         This device gives processes access to the x86 CPUID instruction to
-         be executed on a specific processor.  It is a character device
-         with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
-         /dev/cpu/31/cpuid.
-
-source "drivers/firmware/Kconfig"
-
-choice
-       prompt "High Memory Support"
-       default NOHIGHMEM
-
-config NOHIGHMEM
-       bool "off"
-       ---help---
-         Linux can use up to 64 Gigabytes of physical memory on x86 systems.
-         However, the address space of 32-bit x86 processors is only 4
-         Gigabytes large. That means that, if you have a large amount of
-         physical memory, not all of it can be "permanently mapped" by the
-         kernel. The physical memory that's not permanently mapped is called
-         "high memory".
-
-         If you are compiling a kernel which will never run on a machine with
-         more than 1 Gigabyte total physical RAM, answer "off" here (default
-         choice and suitable for most users). This will result in a "3GB/1GB"
-         split: 3GB are mapped so that each process sees a 3GB virtual memory
-         space and the remaining part of the 4GB virtual memory space is used
-         by the kernel to permanently map as much physical memory as
-         possible.
-
-         If the machine has between 1 and 4 Gigabytes physical RAM, then
-         answer "4GB" here.
-
-         If more than 4 Gigabytes is used then answer "64GB" here. This
-         selection turns Intel PAE (Physical Address Extension) mode on.
-         PAE implements 3-level paging on IA32 processors. PAE is fully
-         supported by Linux, PAE mode is implemented on all recent Intel
-         processors (Pentium Pro and better). NOTE: If you say "64GB" here,
-         then the kernel will not boot on CPUs that don't support PAE!
-
-         The actual amount of total physical memory will either be
-         auto detected or can be forced by using a kernel command line option
-         such as "mem=256M". (Try "man bootparam" or see the documentation of
-         your boot loader (lilo or loadlin) about how to pass options to the
-         kernel at boot time.)
-
-         If unsure, say "off".
-
-config HIGHMEM4G
-       bool "4GB"
-       help
-         Select this if you have a 32-bit processor and between 1 and 4
-         gigabytes of physical RAM.
-
-#config HIGHMEM64G
-#      bool "64GB"
-#      help
-#        Select this if you have a 32-bit processor and more than 4
-#        gigabytes of physical RAM.
-
-endchoice
-
-config HIGHMEM
-       bool
-       depends on HIGHMEM64G || HIGHMEM4G
-       default y
-
-config X86_PAE
-       bool
-       depends on HIGHMEM64G
-       default y
-
-# Common NUMA Features
-config NUMA
-       bool "Numa Memory Allocation and Scheduler Support"
-       depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
-       default n if X86_PC
-       default y if (X86_NUMAQ || X86_SUMMIT)
-
-# Need comments to help the hapless user trying to turn on NUMA support
-comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
-       depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
-
-comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
-       depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
-
-config DISCONTIGMEM
-       bool
-       depends on NUMA
-       default y
-
-config HAVE_ARCH_BOOTMEM_NODE
-       bool
-       depends on NUMA
-       default y
-
-#config HIGHPTE
-#      bool "Allocate 3rd-level pagetables from highmem"
-#      depends on HIGHMEM4G || HIGHMEM64G
-#      help
-#        The VM uses one page table entry for each page of physical memory.
-#        For systems with a lot of RAM, this can be wasteful of precious
-#        low memory.  Setting this option will put user-space page table
-#        entries in high memory.
-
-#config MTRR
-#       bool "MTRR (Memory Type Range Register) support"
-#       ---help---
-#         On Intel P6 family processors (Pentium Pro, Pentium II and later)
-#         the Memory Type Range Registers (MTRRs) may be used to control
-#         processor access to memory ranges. This is most useful if you have
-#         a video (VGA) card on a PCI or AGP bus. Enabling write-combining
-#         allows bus write transfers to be combined into a larger transfer
-#         before bursting over the PCI/AGP bus. This can increase performance
-#         of image write operations 2.5 times or more. Saying Y here creates a
-#         /proc/mtrr file which may be used to manipulate your processor's
-#         MTRRs. Typically the X server should use this.
-#
-#         This code has a reasonably generic interface so that similar
-#         control registers on other processors can be easily supported
-#         as well:
-#
-#         The Cyrix 6x86, 6x86MX and M II processors have Address Range
-#         Registers (ARRs) which provide a similar functionality to MTRRs. For
-#         these, the ARRs are used to emulate the MTRRs.
-#         The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
-#         MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
-#         write-combining. All of these processors are supported by this code
-#         and it makes sense to say Y here if you have one of them.
-#
-#         Saying Y here also fixes a problem with buggy SMP BIOSes which only
-#         set the MTRRs for the boot CPU and not for the secondary CPUs. This
-#         can lead to all sorts of problems, so it's good to say Y here.
-#
-#         You can safely say Y even if your machine doesn't have MTRRs, you'll
-#         just add about 9 KB to your kernel.
-#
-#         See <file:Documentation/mtrr.txt> for more information.
-
-config IRQBALANCE
-       bool "Enable kernel irq balancing"
-       depends on SMP && X86_IO_APIC
-       default y
-       help
-         The default yes will allow the kernel to do irq load balancing.
-         Saying no will keep the kernel from doing irq load balancing.
-
-config HAVE_DEC_LOCK
-       bool
-       depends on (SMP || PREEMPT) && X86_CMPXCHG
-       default y
-
-# turning this on wastes a bunch of space.
-# Summit needs it only when NUMA is on
-config BOOT_IOREMAP
-       bool
-       depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
-       default y
-
-config REGPARM
-       bool "Use register arguments (EXPERIMENTAL)"
-       depends on EXPERIMENTAL
-       default n
-       help
-       Compile the kernel with -mregparm=3. This uses an different ABI
-       and passes the first three arguments of a function call in registers.
-       This will probably break binary only modules.
-
-       This feature is only enabled for gcc-3.0 and later - earlier compilers
-       generate incorrect output with certain kernel constructs when
-       -mregparm=3 is used.
-
-
-if XEN_PHYSDEV_ACCESS
-
-menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
-
-config X86_VISWS_APIC
-       bool
-       depends on X86_VISWS
-       default y
-
-config X86_LOCAL_APIC
-       bool
-       depends on (X86_VISWS || SMP) && !X86_VOYAGER
-       default y
-
-config X86_IO_APIC
-       bool
-       depends on SMP && !(X86_VISWS || X86_VOYAGER)
-       default y
-
-config PCI
-       bool "PCI support" if !X86_VISWS
-       depends on !X86_VOYAGER
-       default y if X86_VISWS
-       help
-         Find out whether you have a PCI motherboard. PCI is the name of a
-         bus system, i.e. the way the CPU talks to the other stuff inside
-         your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
-         VESA. If you have PCI, say Y, otherwise N.
-
-         The PCI-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>, contains valuable
-         information about which PCI hardware does work under Linux and which
-         doesn't.
-
-#choice
-#      prompt "PCI access mode"
-#      depends on PCI && !X86_VISWS
-#      default PCI_GOANY
-#      ---help---
-#        On PCI systems, the BIOS can be used to detect the PCI devices and
-#        determine their configuration. However, some old PCI motherboards
-#        have BIOS bugs and may crash if this is done. Also, some embedded
-#        PCI-based systems don't have any BIOS at all. Linux can also try to
-#        detect the PCI hardware directly without using the BIOS.
-#
-#        With this option, you can specify how Linux should detect the
-#        PCI devices. If you choose "BIOS", the BIOS will be used,
-#        if you choose "Direct", the BIOS won't be used, and if you
-#        choose "MMConfig", then PCI Express MMCONFIG will be used.
-#        If you choose "Any", the kernel will try MMCONFIG, then the
-#        direct access method and falls back to the BIOS if that doesn't
-#        work. If unsure, go with the default, which is "Any".
-#
-#config PCI_GOBIOS
-#      bool "BIOS"
-#
-#config PCI_GOMMCONFIG
-#      bool "MMConfig"
-#
-#config PCI_GODIRECT
-#      bool "Direct"
-#
-#config PCI_GOANY
-#      bool "Any"
-#
-#endchoice
-#
-#config PCI_BIOS
-#      bool
-#      depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
-#      default y
-#
-#config PCI_DIRECT
-#      bool
-#      depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
-#      default y
-
-config PCI_DIRECT
-       bool
-       depends on PCI
-       default y
-
-source "drivers/pci/Kconfig"
-
-config ISA
-       bool "ISA support"
-       depends on !(X86_VOYAGER || X86_VISWS)
-       help
-         Find out whether you have ISA slots on your motherboard.  ISA is the
-         name of a bus system, i.e. the way the CPU talks to the other stuff
-         inside your box.  Other bus systems are PCI, EISA, MicroChannel
-         (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
-         newer boards don't support it.  If you have ISA, say Y, otherwise N.
-
-config EISA
-       bool "EISA support"
-       depends on ISA
-       ---help---
-         The Extended Industry Standard Architecture (EISA) bus was
-         developed as an open alternative to the IBM MicroChannel bus.
-
-         The EISA bus provided some of the features of the IBM MicroChannel
-         bus while maintaining backward compatibility with cards made for
-         the older ISA bus.  The EISA bus saw limited use between 1988 and
-         1995 when it was made obsolete by the PCI bus.
-
-         Say Y here if you are building a kernel for an EISA-based machine.
-
-         Otherwise, say N.
-
-source "drivers/eisa/Kconfig"
-
-config MCA
-       bool "MCA support"
-       depends on !(X86_VISWS || X86_VOYAGER)
-       help
-         MicroChannel Architecture is found in some IBM PS/2 machines and
-         laptops.  It is a bus system similar to PCI or ISA. See
-         <file:Documentation/mca.txt> (and especially the web page given
-         there) before attempting to build an MCA bus kernel.
-
-config MCA
-       depends on X86_VOYAGER
-       default y if X86_VOYAGER
-
-source "drivers/mca/Kconfig"
-
-config SCx200
-       tristate "NatSemi SCx200 support"
-       depends on !X86_VOYAGER
-       help
-         This provides basic support for the National Semiconductor SCx200 
-         processor.  Right now this is just a driver for the GPIO pins.
-
-         If you don't know what to do here, say N.
-
-         This support is also available as a module.  If compiled as a
-         module, it will be called scx200.
-
-source "drivers/pcmcia/Kconfig"
-
-source "drivers/pci/hotplug/Kconfig"
-
-endmenu
-
-endif
-
-menu "Kernel hacking"
-
-config DEBUG_KERNEL
-       bool "Kernel debugging"
-       help
-         Say Y here if you are developing drivers or trying to debug and
-         identify kernel problems.
-
-config EARLY_PRINTK
-       bool "Early printk" if EMBEDDED
-       default y
-       help
-         Write kernel log output directly into the VGA buffer or to a serial
-         port.
-
-         This is useful for kernel debugging when your machine crashes very
-         early before the console code is initialized. For normal operation
-         it is not recommended because it looks ugly and doesn't cooperate
-         with klogd/syslogd or the X server. You should normally N here,
-         unless you want to debug such a crash.
-
-config DEBUG_STACKOVERFLOW
-       bool "Check for stack overflows"
-       depends on DEBUG_KERNEL
-
-config DEBUG_STACK_USAGE
-       bool "Stack utilization instrumentation"
-       depends on DEBUG_KERNEL
-       help
-         Enables the display of the minimum amount of free stack which each
-         task has ever had available in the sysrq-T and sysrq-P debug output.
-
-         This option will slow down process creation somewhat.
-
-config DEBUG_SLAB
-       bool "Debug memory allocations"
-       depends on DEBUG_KERNEL
-       help
-         Say Y here to have the kernel do limited verification on memory
-         allocation as well as poisoning memory on free to catch use of freed
-         memory.
-
-config MAGIC_SYSRQ
-       bool "Magic SysRq key"
-       depends on DEBUG_KERNEL
-       help
-         If you say Y here, you will have some control over the system even
-         if the system crashes for example during kernel debugging (e.g., you
-         will be able to flush the buffer cache to disk, reboot the system
-         immediately or dump some status information). This is accomplished
-         by pressing various keys while holding SysRq (Alt+PrintScreen). It
-         also works on a serial console (on PC hardware at least), if you
-         send a BREAK and then within 5 seconds a command keypress. The
-         keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
-         unless you really know what this hack does.
-
-config DEBUG_SPINLOCK
-       bool "Spinlock debugging"
-       depends on DEBUG_KERNEL
-       help
-         Say Y here and build SMP to catch missing spinlock initialization
-         and certain other kinds of spinlock errors commonly made.  This is
-         best used in conjunction with the NMI watchdog so that spinlock
-         deadlocks are also debuggable.
-
-config DEBUG_PAGEALLOC
-       bool "Page alloc debugging"
-       depends on DEBUG_KERNEL
-       help
-         Unmap pages from the kernel linear mapping after free_pages().
-         This results in a large slowdown, but helps to find certain types
-         of memory corruptions.
-
-config DEBUG_HIGHMEM
-       bool "Highmem debugging"
-       depends on DEBUG_KERNEL && HIGHMEM
-       help
-         This options enables addition error checking for high memory systems.
-         Disable for production systems.
-
-config DEBUG_INFO
-       bool "Compile the kernel with debug info"
-       depends on DEBUG_KERNEL
-       help
-          If you say Y here the resulting kernel image will include
-         debugging info resulting in a larger kernel image.
-         Say Y here only if you plan to use gdb to debug the kernel.
-         If you don't debug the kernel, you can say N.
-         
-config DEBUG_SPINLOCK_SLEEP
-       bool "Sleep-inside-spinlock checking"
-       help
-         If you say Y here, various routines which may sleep will become very
-         noisy if they are called with a spinlock held.        
-
-config FRAME_POINTER
-       bool "Compile the kernel with frame pointers"
-       help
-         If you say Y here the resulting kernel image will be slightly larger
-         and slower, but it will give very useful debugging information.
-         If you don't debug the kernel, you can say N, but we may not be able
-         to solve problems without frame pointers.
-
-config 4KSTACKS
-       bool "Use 4Kb for kernel stacks instead of 8Kb"
-       help
-         If you say Y here the kernel will use a 4Kb stacksize for the
-         kernel stack attached to each process/thread. This facilitates
-         running more threads on a system and also reduces the pressure
-         on the VM subsystem for higher order allocations. This option
-         will also use IRQ stacks to compensate for the reduced stackspace.
-
-config X86_FIND_SMP_CONFIG
-       bool
-       depends on X86_LOCAL_APIC || X86_VOYAGER
-       default y
-
-config X86_MPPARSE
-       bool
-       depends on X86_LOCAL_APIC && !X86_VISWS
-       default y
-
-endmenu
-
-config X86_SMP
-       bool
-       depends on SMP && !X86_VOYAGER
-       default y
-
-config X86_HT
-       bool
-       depends on SMP && !(X86_VISWS || X86_VOYAGER)
-       default y
-
-config X86_BIOS_REBOOT
-       bool
-       depends on !(X86_VISWS || X86_VOYAGER)
-       default y
-
-config X86_TRAMPOLINE
-       bool
-       depends on X86_SMP || (X86_VOYAGER && SMP)
-       default y
-
-# std_resources is overridden for pc9800, but that's not
-# a currently selectable arch choice
-config X86_STD_RESOURCES
-       bool
-       default y
-
-config PC
-       bool
-       depends on X86 && !EMBEDDED
-       default y
-
-endmenu
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/i386/Makefile
deleted file mode 100644 (file)
index a95f970..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# i386/Makefile
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" cleaning up for this architecture.
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 1994 by Linus Torvalds
-#
-# 19990713  Artur Skawina <skawina@geocities.com>
-#           Added '-march' and '-mpreferred-stack-boundary' support
-#
-
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-LDFLAGS                := -m elf_i386
-LDFLAGS_vmlinux :=
-CHECK          := $(CHECK) -D__i386__=1
-
-CFLAGS += -pipe -msoft-float
-
-# prevent gcc from keeping the stack 16 byte aligned
-CFLAGS += $(call check_gcc,-mpreferred-stack-boundary=2,)
-
-align := $(subst -functions=0,,$(call check_gcc,-falign-functions=0,-malign-functions=0))
-
-cflags-$(CONFIG_M386)          += -march=i386
-cflags-$(CONFIG_M486)          += -march=i486
-cflags-$(CONFIG_M586)          += -march=i586
-cflags-$(CONFIG_M586TSC)       += -march=i586
-cflags-$(CONFIG_M586MMX)       += $(call check_gcc,-march=pentium-mmx,-march=i586)
-cflags-$(CONFIG_M686)          += -march=i686
-cflags-$(CONFIG_MPENTIUMII)    += $(call check_gcc,-march=pentium2,-march=i686)
-cflags-$(CONFIG_MPENTIUMIII)   += $(call check_gcc,-march=pentium3,-march=i686)
-cflags-$(CONFIG_MPENTIUMM)     += $(call check_gcc,-march=pentium3,-march=i686)
-cflags-$(CONFIG_MPENTIUM4)     += $(call check_gcc,-march=pentium4,-march=i686)
-cflags-$(CONFIG_MK6)           += -march=k6
-# Please note, that patches that add -march=athlon-xp and friends are pointless.
-# They make zero difference whatsosever to performance at this time.
-cflags-$(CONFIG_MK7)           += $(call check_gcc,-march=athlon,-march=i686 $(align)-functions=4)
-cflags-$(CONFIG_MK8)           += $(call check_gcc,-march=k8,$(call check_gcc,-march=athlon,-march=i686 $(align)-functions=4))
-cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-cflags-$(CONFIG_MWINCHIPC6)    += $(call check_gcc,-march=winchip-c6,-march=i586)
-cflags-$(CONFIG_MWINCHIP2)     += $(call check_gcc,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MWINCHIP3D)    += $(call check_gcc,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII)     += $(call check_gcc,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-cflags-$(CONFIG_MVIAC3_2)      += $(call check_gcc,-march=c3-2,-march=i686)
-
-# AMD Elan support
-cflags-$(CONFIG_X86_ELAN)      += -march=i486
-
-# -mregparm=3 works ok on gcc-3.0 and later
-#
-GCC_VERSION                    := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
-cflags-$(CONFIG_REGPARM)       += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
-
-# Disable unit-at-a-time mode, it makes gcc use a lot more stack
-# due to the lack of sharing of stacklots.
-CFLAGS += $(call check_gcc,-fno-unit-at-a-time,)
-
-CFLAGS += $(cflags-y)
-
-head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
-
-libs-y                                         += arch/i386/lib/
-core-y                                 += arch/xen/i386/kernel/ \
-                                          arch/xen/i386/mm/ \
-                                          arch/i386/crypto/
-# \
-#                                         arch/xen/$(mcore-y)/
-drivers-$(CONFIG_MATH_EMULATION)       += arch/i386/math-emu/
-drivers-$(CONFIG_PCI)                  += arch/xen/i386/pci/
-# must be linked after kernel/
-drivers-$(CONFIG_OPROFILE)             += arch/i386/oprofile/
-drivers-$(CONFIG_PM)                   += arch/i386/power/
-
-# for clean
-obj-   += kernel/ mm/ pci/
-#obj-  += ../../i386/lib/ ../../i386/mm/ 
-#../../i386/$(mcore-y)/
-#obj-  += ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
-
-xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen
-CFLAGS += $(xenflags-y)
-AFLAGS += $(xenflags-y)
-
-prepare: include/asm-$(XENARCH)/asm_offsets.h
-CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
-
-arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
-       include/linux/version.h include/config/MARKER
-
-include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
-       $(call filechk,gen-asm-offsets)
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/Makefile
deleted file mode 100644 (file)
index dd23046..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-CFLAGS += -Iarch/$(XENARCH)/kernel
-
-extra-y := head.o init_task.o vmlinux.lds.s
-
-obj-y  := traps.o irq.o ldt.o setup.o entry.o time.o pci-dma.o process.o \
-               ioport.o signal.o i386_ksyms.o
-
-c-obj-y        := semaphore.o vm86.o \
-               ptrace.o sys_i386.o \
-               i387.o dmi_scan.o bootflag.o \
-               doublefault.o
-s-obj-y        :=
-
-obj-y                          += cpu/
-obj-y                          += timers/
-c-obj-$(CONFIG_ACPI_BOOT)      += acpi/
-#c-obj-$(CONFIG_X86_BIOS_REBOOT)       += reboot.o
-c-obj-$(CONFIG_MCA)            += mca.o
-c-obj-$(CONFIG_X86_MSR)                += msr.o
-c-obj-$(CONFIG_X86_CPUID)      += cpuid.o
-c-obj-$(CONFIG_MICROCODE)      += microcode.o
-c-obj-$(CONFIG_APM)            += apm.o
-c-obj-$(CONFIG_X86_SMP)                += smp.o smpboot.o
-c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
-c-obj-$(CONFIG_X86_MPPARSE)    += mpparse.o
-c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
-c-obj-$(CONFIG_X86_IO_APIC)    += io_apic.o
-c-obj-$(CONFIG_X86_NUMAQ)      += numaq.o
-c-obj-$(CONFIG_X86_SUMMIT_NUMA)        += summit.o
-c-obj-$(CONFIG_MODULES)                += module.o
-obj-y                          += sysenter.o
-obj-y                          += vsyscall.o
-c-obj-$(CONFIG_ACPI_SRAT)      += srat.o
-c-obj-$(CONFIG_HPET_TIMER)     += time_hpet.o
-c-obj-$(CONFIG_EFI)            += efi.o efi_stub.o
-c-obj-$(CONFIG_EARLY_PRINTK)   += early_printk.o
-
-EXTRA_AFLAGS   := -traditional
-
-c-obj-$(CONFIG_SCx200)         += scx200.o
-
-AFLAGS_vmlinux.lds.o += -U$(XENARCH)
-
-# vsyscall.o contains the vsyscall DSO images as __initdata.
-# We must build both images before we can assemble it.
-# Note: kbuild does not track this dependency due to usage of .incbin
-$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
-targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
-
-# The DSO images are built using a special linker script.
-quiet_cmd_syscall = SYSCALL $@
-      cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
-                         -Wl,-T,$(filter-out FORCE,$^) -o $@
-
-vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
-SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
-SYSCFLAGS_vsyscall-int80.so    = $(vsyscall-flags)
-
-$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
-$(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
-       $(call if_changed,syscall)
-
-# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO.  With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
-extra-y += vsyscall-syms.o
-$(obj)/built-in.o: $(obj)/vsyscall-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
-
-SYSCFLAGS_vsyscall-syms.o = -r
-$(obj)/vsyscall-syms.o: $(obj)/vsyscall.lds $(obj)/vsyscall-sysenter.o FORCE
-       $(call if_changed,syscall)
-
-c-link := init_task.o
-s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
-       @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
-
-$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
-
-obj-y  += $(c-obj-y) $(s-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
deleted file mode 100644 (file)
index 478e023..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Makefile for x86-compatible CPU details and quirks
-#
-
-CFLAGS += -Iarch/i386/kernel/cpu
-
-obj-y  :=      common.o
-c-obj-y        +=      proc.o
-
-c-obj-y        +=      amd.o
-c-obj-y        +=      cyrix.o
-c-obj-y        +=      centaur.o
-c-obj-y        +=      transmeta.o
-c-obj-y        +=      intel.o
-c-obj-y        +=      rise.o
-c-obj-y        +=      nexgen.o
-c-obj-y        +=      umc.o
-
-#obj-$(CONFIG_X86_MCE) +=      ../../../../i386/kernel/cpu/mcheck/
-
-#obj-$(CONFIG_MTRR)    +=      ../../../../i386/kernel/cpu/mtrr/
-#obj-$(CONFIG_CPU_FREQ)        +=      ../../../../i386/kernel/cpu/cpufreq/
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-       @ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
-
-obj-y  += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/common.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/cpu/common.c
deleted file mode 100644 (file)
index 83b293d..0000000
+++ /dev/null
@@ -1,601 +0,0 @@
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/smp.h>
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm-xen/hypervisor.h>
-
-#include "cpu.h"
-
-static int cachesize_override __initdata = -1;
-static int disable_x86_fxsr __initdata = 0;
-static int disable_x86_serial_nr __initdata = 1;
-
-struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-
-extern void mcheck_init(struct cpuinfo_x86 *c);
-
-extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
-
-extern int disable_pse;
-
-static void default_init(struct cpuinfo_x86 * c)
-{
-       /* Not much we can do here... */
-       /* Check if at least it has cpuid */
-       if (c->cpuid_level == -1) {
-               /* No cpuid. It must be an ancient CPU */
-               if (c->x86 == 4)
-                       strcpy(c->x86_model_id, "486");
-               else if (c->x86 == 3)
-                       strcpy(c->x86_model_id, "386");
-       }
-}
-
-static struct cpu_dev default_cpu = {
-       .c_init = default_init,
-};
-static struct cpu_dev * this_cpu = &default_cpu;
-
-static int __init cachesize_setup(char *str)
-{
-       get_option (&str, &cachesize_override);
-       return 1;
-}
-__setup("cachesize=", cachesize_setup);
-
-int __init get_model_name(struct cpuinfo_x86 *c)
-{
-       unsigned int *v;
-       char *p, *q;
-
-       if (cpuid_eax(0x80000000) < 0x80000004)
-               return 0;
-
-       v = (unsigned int *) c->x86_model_id;
-       cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-       cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-       cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-       c->x86_model_id[48] = 0;
-
-       /* Intel chips right-justify this string for some dumb reason;
-          undo that brain damage */
-       p = q = &c->x86_model_id[0];
-       while ( *p == ' ' )
-            p++;
-       if ( p != q ) {
-            while ( *p )
-                 *q++ = *p++;
-            while ( q <= &c->x86_model_id[48] )
-                 *q++ = '\0';  /* Zero-pad the rest */
-       }
-
-       return 1;
-}
-
-
-void __init display_cacheinfo(struct cpuinfo_x86 *c)
-{
-       unsigned int n, dummy, ecx, edx, l2size;
-
-       n = cpuid_eax(0x80000000);
-
-       if (n >= 0x80000005) {
-               cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-                       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-               c->x86_cache_size=(ecx>>24)+(edx>>24);  
-       }
-
-       if (n < 0x80000006)     /* Some chips just has a large L1. */
-               return;
-
-       ecx = cpuid_ecx(0x80000006);
-       l2size = ecx >> 16;
-       
-       /* do processor-specific cache resizing */
-       if (this_cpu->c_size_cache)
-               l2size = this_cpu->c_size_cache(c,l2size);
-
-       /* Allow user to override all this if necessary. */
-       if (cachesize_override != -1)
-               l2size = cachesize_override;
-
-       if ( l2size == 0 )
-               return;         /* Again, no L2 cache is possible */
-
-       c->x86_cache_size = l2size;
-
-       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-              l2size, ecx & 0xFF);
-}
-
-/* Naming convention should be: <Name> [(<Codename>)] */
-/* This table only is used unless init_<vendor>() below doesn't set it; */
-/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
-
-/* Look up CPU names by table lookup. */
-static char __init *table_lookup_model(struct cpuinfo_x86 *c)
-{
-       struct cpu_model_info *info;
-
-       if ( c->x86_model >= 16 )
-               return NULL;    /* Range check */
-
-       if (!this_cpu)
-               return NULL;
-
-       info = this_cpu->c_models;
-
-       while (info && info->family) {
-               if (info->family == c->x86)
-                       return info->model_names[c->x86_model];
-               info++;
-       }
-       return NULL;            /* Not found */
-}
-
-
-void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
-{
-       char *v = c->x86_vendor_id;
-       int i;
-
-       for (i = 0; i < X86_VENDOR_NUM; i++) {
-               if (cpu_devs[i]) {
-                       if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-                           (cpu_devs[i]->c_ident[1] && 
-                            !strcmp(v,cpu_devs[i]->c_ident[1]))) {
-                               c->x86_vendor = i;
-                               if (!early)
-                                       this_cpu = cpu_devs[i];
-                               break;
-                       }
-               }
-       }
-}
-
-
-static int __init x86_fxsr_setup(char * s)
-{
-       disable_x86_fxsr = 1;
-       return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
-
-/* Standard macro to see if a specific flag is changeable */
-static inline int flag_is_changeable_p(u32 flag)
-{
-       u32 f1, f2;
-
-       asm("pushfl\n\t"
-           "pushfl\n\t"
-           "popl %0\n\t"
-           "movl %0,%1\n\t"
-           "xorl %2,%0\n\t"
-           "pushl %0\n\t"
-           "popfl\n\t"
-           "pushfl\n\t"
-           "popl %0\n\t"
-           "popfl\n\t"
-           : "=&r" (f1), "=&r" (f2)
-           : "ir" (flag));
-
-       return ((f1^f2) & flag) != 0;
-}
-
-
-/* Probe for the CPUID instruction */
-int __init have_cpuid_p(void)
-{
-       return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
-/* Do minimum CPU detection early.
-   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-   The others are not touched to avoid unwanted side effects. */
-void __init early_cpu_detect(void)
-{
-       struct cpuinfo_x86 *c = &boot_cpu_data;
-
-       c->x86_cache_alignment = 32;
-
-       if (!have_cpuid_p())
-               return;
-
-       /* Get vendor name */
-       cpuid(0x00000000, &c->cpuid_level,
-             (int *)&c->x86_vendor_id[0],
-             (int *)&c->x86_vendor_id[8],
-             (int *)&c->x86_vendor_id[4]);
-
-       get_cpu_vendor(c, 1);
-
-       c->x86 = 4;
-       if (c->cpuid_level >= 0x00000001) {
-               u32 junk, tfms, cap0, misc;
-               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-               c->x86 = (tfms >> 8) & 15;
-               c->x86_model = (tfms >> 4) & 15;
-               if (c->x86 == 0xf) {
-                       c->x86 += (tfms >> 20) & 0xff;
-                       c->x86_model += ((tfms >> 16) & 0xF) << 4;
-               }
-               c->x86_mask = tfms & 15;
-               if (cap0 & (1<<19))
-                       c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
-       }
-
-       early_intel_workaround(c);
-}
-
-void __init generic_identify(struct cpuinfo_x86 * c)
-{
-       u32 tfms, xlvl;
-       int junk;
-
-       if (have_cpuid_p()) {
-               /* Get vendor name */
-               cpuid(0x00000000, &c->cpuid_level,
-                     (int *)&c->x86_vendor_id[0],
-                     (int *)&c->x86_vendor_id[8],
-                     (int *)&c->x86_vendor_id[4]);
-               
-               get_cpu_vendor(c, 0);
-               /* Initialize the standard set of capabilities */
-               /* Note that the vendor-specific code below might override */
-       
-               /* Intel-defined flags: level 0x00000001 */
-               if ( c->cpuid_level >= 0x00000001 ) {
-                       u32 capability, excap;
-                       cpuid(0x00000001, &tfms, &junk, &excap, &capability);
-                       c->x86_capability[0] = capability;
-                       c->x86_capability[4] = excap;
-                       c->x86 = (tfms >> 8) & 15;
-                       c->x86_model = (tfms >> 4) & 15;
-                       if (c->x86 == 0xf) {
-                               c->x86 += (tfms >> 20) & 0xff;
-                               c->x86_model += ((tfms >> 16) & 0xF) << 4;
-                       } 
-                       c->x86_mask = tfms & 15;
-               } else {
-                       /* Have CPUID level 0 only - unheard of */
-                       c->x86 = 4;
-               }
-
-               /* AMD-defined flags: level 0x80000001 */
-               xlvl = cpuid_eax(0x80000000);
-               if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-                       if ( xlvl >= 0x80000001 )
-                               c->x86_capability[1] = cpuid_edx(0x80000001);
-                       if ( xlvl >= 0x80000004 )
-                               get_model_name(c); /* Default name */
-               }
-       }
-}
-
-static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-{
-       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-               /* Disable processor serial number */
-               unsigned long lo,hi;
-               rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-               lo |= 0x200000;
-               wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-               printk(KERN_NOTICE "CPU serial number disabled.\n");
-               clear_bit(X86_FEATURE_PN, c->x86_capability);
-
-               /* Disabling the serial number may affect the cpuid level */
-               c->cpuid_level = cpuid_eax(0);
-       }
-}
-
-static int __init x86_serial_nr_setup(char *s)
-{
-       disable_x86_serial_nr = 0;
-       return 1;
-}
-__setup("serialnumber", x86_serial_nr_setup);
-
-
-
-/*
- * This does the hard work of actually picking apart the CPU stuff...
- */
-void __init identify_cpu(struct cpuinfo_x86 *c)
-{
-       int i;
-
-       c->loops_per_jiffy = loops_per_jiffy;
-       c->x86_cache_size = -1;
-       c->x86_vendor = X86_VENDOR_UNKNOWN;
-       c->cpuid_level = -1;    /* CPUID not detected */
-       c->x86_model = c->x86_mask = 0; /* So far unknown... */
-       c->x86_vendor_id[0] = '\0'; /* Unset */
-       c->x86_model_id[0] = '\0';  /* Unset */
-       memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
-       if (!have_cpuid_p()) {
-               /* First of all, decide if this is a 486 or higher */
-               /* It's a 486 if we can modify the AC flag */
-               if ( flag_is_changeable_p(X86_EFLAGS_AC) )
-                       c->x86 = 4;
-               else
-                       c->x86 = 3;
-       }
-
-       generic_identify(c);
-
-       printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
-               c->x86_capability[0],
-               c->x86_capability[1],
-               c->x86_capability[2],
-               c->x86_capability[3]);
-
-       if (this_cpu->c_identify) {
-               this_cpu->c_identify(c);
-
-       printk(KERN_DEBUG "CPU: After vendor identify, caps:  %08lx %08lx %08lx %08lx\n",
-               c->x86_capability[0],
-               c->x86_capability[1],
-               c->x86_capability[2],
-               c->x86_capability[3]);
-}
-
-       /*
-        * Vendor-specific initialization.  In this section we
-        * canonicalize the feature flags, meaning if there are
-        * features a certain CPU supports which CPUID doesn't
-        * tell us, CPUID claiming incorrect flags, or other bugs,
-        * we handle them here.
-        *
-        * At the end of this section, c->x86_capability better
-        * indicate the features this CPU genuinely supports!
-        */
-       if (this_cpu->c_init)
-               this_cpu->c_init(c);
-
-       /* Disable the PN if appropriate */
-       squash_the_stupid_serial_number(c);
-
-       /*
-        * The vendor-specific functions might have changed features.  Now
-        * we do "generic changes."
-        */
-
-       /* TSC disabled? */
-       if ( tsc_disable )
-               clear_bit(X86_FEATURE_TSC, c->x86_capability);
-
-       /* FXSR disabled? */
-       if (disable_x86_fxsr) {
-               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-               clear_bit(X86_FEATURE_XMM, c->x86_capability);
-       }
-
-       if (disable_pse)
-               clear_bit(X86_FEATURE_PSE, c->x86_capability);
-
-       /* If the model name is still unset, do table lookup. */
-       if ( !c->x86_model_id[0] ) {
-               char *p;
-               p = table_lookup_model(c);
-               if ( p )
-                       strcpy(c->x86_model_id, p);
-               else
-                       /* Last resort... */
-                       sprintf(c->x86_model_id, "%02x/%02x",
-                               c->x86_vendor, c->x86_model);
-       }
-
-       machine_specific_modify_cpu_capabilities(c);
-
-       /* Now the feature flags better reflect actual CPU features! */
-
-       printk(KERN_DEBUG "CPU: After all inits, caps:        %08lx %08lx %08lx %08lx\n",
-              c->x86_capability[0],
-              c->x86_capability[1],
-              c->x86_capability[2],
-              c->x86_capability[3]);
-
-       /*
-        * On SMP, boot_cpu_data holds the common feature set between
-        * all CPUs; so make sure that we indicate which features are
-        * common between the CPUs.  The first time this routine gets
-        * executed, c == &boot_cpu_data.
-        */
-       if ( c != &boot_cpu_data ) {
-               /* AND the already accumulated flags with these */
-               for ( i = 0 ; i < NCAPINTS ; i++ )
-                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-       }
-
-       /* Init Machine Check Exception if available. */
-#ifdef CONFIG_X86_MCE
-       mcheck_init(c);
-#endif
-}
-/*
- *     Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
- */
-void __init dodgy_tsc(void)
-{
-       if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
-           ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
-               cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
-}
-
-void __init print_cpu_info(struct cpuinfo_x86 *c)
-{
-       char *vendor = NULL;
-
-       if (c->x86_vendor < X86_VENDOR_NUM)
-               vendor = this_cpu->c_vendor;
-       else if (c->cpuid_level >= 0)
-               vendor = c->x86_vendor_id;
-
-       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
-               printk("%s ", vendor);
-
-       if (!c->x86_model_id[0])
-               printk("%d86", c->x86);
-       else
-               printk("%s", c->x86_model_id);
-
-       if (c->x86_mask || c->cpuid_level >= 0) 
-               printk(" stepping %02x\n", c->x86_mask);
-       else
-               printk("\n");
-}
-
-unsigned long cpu_initialized __initdata = 0;
-
-/* This is hacky. :)
- * We're emulating future behavior.
- * In the future, the cpu-specific init functions will be called implicitly
- * via the magic of initcalls.
- * They will insert themselves into the cpu_devs structure.
- * Then, when cpu_init() is called, we can just iterate over that array.
- */
-
-extern int intel_cpu_init(void);
-extern int cyrix_init_cpu(void);
-extern int nsc_init_cpu(void);
-extern int amd_init_cpu(void);
-extern int centaur_init_cpu(void);
-extern int transmeta_init_cpu(void);
-extern int rise_init_cpu(void);
-extern int nexgen_init_cpu(void);
-extern int umc_init_cpu(void);
-void early_cpu_detect(void);
-
-void __init early_cpu_init(void)
-{
-       intel_cpu_init();
-       cyrix_init_cpu();
-       nsc_init_cpu();
-       amd_init_cpu();
-       centaur_init_cpu();
-       transmeta_init_cpu();
-       rise_init_cpu();
-       nexgen_init_cpu();
-       umc_init_cpu();
-       early_cpu_detect();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /* pse is not compatible with on-the-fly unmapping,
-        * disable it even if the cpus claim to support it.
-        */
-       clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-       disable_pse = 1;
-#endif
-}
-
-void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
-{
-       unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
-       unsigned long va;
-       int f;
-
-       for (va = gdt_descr->address, f = 0;
-            va < gdt_descr->address + gdt_descr->size;
-            va += PAGE_SIZE, f++) {
-               frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
-               protect_page(swapper_pg_dir, (void *)va, PROT_ON);
-       }
-       flush_page_update_queue();
-       if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
-               BUG();
-       lgdt_finish();
-}
-
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __init cpu_init (void)
-{
-       int cpu = smp_processor_id();
-       struct tss_struct * t = init_tss + cpu;
-       struct thread_struct *thread = &current->thread;
-
-       if (test_and_set_bit(cpu, &cpu_initialized)) {
-               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-               for (;;) local_irq_enable();
-       }
-       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-
-       if (cpu_has_vme || cpu_has_de)
-               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-       if (tsc_disable && cpu_has_tsc) {
-               printk(KERN_NOTICE "Disabling TSC...\n");
-               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-               set_in_cr4(X86_CR4_TSD);
-       }
-
-       /*
-        * Initialize the per-CPU GDT with the boot GDT,
-        * and set up the GDT descriptor:
-        */
-       if (cpu) {
-               cpu_gdt_descr[cpu].size = GDT_SIZE;
-               cpu_gdt_descr[cpu].address = 0; /* XXXcl alloc page */
-               BUG();          /* XXXcl SMP */
-               memcpy((void *)cpu_gdt_descr[cpu].address,
-                   (void *)cpu_gdt_descr[0].address, GDT_SIZE);
-       }
-       /*
-        * Set up the per-thread TLS descriptor cache:
-        */
-       memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
-           GDT_ENTRY_TLS_ENTRIES * 8);
-
-       cpu_gdt_init(&cpu_gdt_descr[cpu]);
-
-       /*
-        * Delete NT
-        */
-       __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
-
-       /*
-        * Set up and load the per-CPU TSS and LDT
-        */
-       atomic_inc(&init_mm.mm_count);
-       current->active_mm = &init_mm;
-       if (current->mm)
-               BUG();
-       enter_lazy_tlb(&init_mm, current);
-
-       load_esp0(t, thread);
-
-       load_LDT(&init_mm.context);
-       flush_page_update_queue();
-
-       /* Clear %fs and %gs. */
-       asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
-
-       /* Clear all 6 debug registers: */
-
-#define CD(register) HYPERVISOR_set_debugreg(register, 0)
-
-       CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
-
-#undef CD
-
-       /*
-        * Force FPU initialization:
-        */
-       current_thread_info()->status = 0;
-       current->used_math = 0;
-       mxcsr_feature_mask_init();
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/entry.S b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/entry.S
deleted file mode 100644 (file)
index f1e60cc..0000000
+++ /dev/null
@@ -1,1029 +0,0 @@
-/*
- *  linux/arch/i386/entry.S
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-/*
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- * I changed all the .align's to 4 (16 byte alignment), as that's faster
- * on a 486.
- *
- * Stack layout in 'ret_from_system_call':
- *     ptrace needs to have all regs on the stack.
- *     if the order here is changed, it needs to be
- *     updated in fork.c:copy_process, signal.c:do_signal,
- *     ptrace.c and ptrace.h
- *
- *      0(%esp) - %ebx
- *      4(%esp) - %ecx
- *      8(%esp) - %edx
- *       C(%esp) - %esi
- *     10(%esp) - %edi
- *     14(%esp) - %ebp
- *     18(%esp) - %eax
- *     1C(%esp) - %ds
- *     20(%esp) - %es
- *     24(%esp) - orig_eax
- *     28(%esp) - %eip
- *     2C(%esp) - %cs
- *     30(%esp) - %eflags
- *     34(%esp) - %oldesp
- *     38(%esp) - %oldss
- *
- * "current" is in register %ebx during any slow entries.
- */
-
-#include <linux/config.h>
-#include <linux/linkage.h>
-#include <asm/thread_info.h>
-#include <asm/errno.h>
-#include <asm/segment.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include "irq_vectors.h"
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-
-#define nr_syscalls ((syscall_table_size)/4)
-
-EBX            = 0x00
-ECX            = 0x04
-EDX            = 0x08
-ESI            = 0x0C
-EDI            = 0x10
-EBP            = 0x14
-EAX            = 0x18
-DS             = 0x1C
-ES             = 0x20
-ORIG_EAX       = 0x24
-EIP            = 0x28
-CS             = 0x2C
-EFLAGS         = 0x30
-OLDESP         = 0x34
-OLDSS          = 0x38
-
-CF_MASK                = 0x00000001
-TF_MASK                = 0x00000100
-IF_MASK                = 0x00000200
-DF_MASK                = 0x00000400 
-NT_MASK                = 0x00004000
-VM_MASK                = 0x00020000
-
-/* Offsets into shared_info_t. */
-#define evtchn_upcall_pending          /* 0 */
-#define evtchn_upcall_mask             1
-
-#define XEN_BLOCK_EVENTS(reg)  movb $1,evtchn_upcall_mask(reg)
-#define XEN_UNBLOCK_EVENTS(reg)        movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(%reg)
-
-#ifdef CONFIG_PREEMPT
-#define preempt_stop           movl HYPERVISOR_shared_info,%esi        ; \
-                               XEN_BLOCK_EVENTS(%esi)
-#else
-#define preempt_stop
-#define resume_kernel          restore_all
-#endif
-
-#define SAVE_ALL \
-       cld; \
-       pushl %es; \
-       pushl %ds; \
-       pushl %eax; \
-       pushl %ebp; \
-       pushl %edi; \
-       pushl %esi; \
-       pushl %edx; \
-       pushl %ecx; \
-       pushl %ebx; \
-       movl $(__KERNEL_DS), %edx; \
-       movl %edx, %ds; \
-       movl %edx, %es;
-       # XXXcl USER?
-
-#define RESTORE_INT_REGS \
-       popl %ebx;      \
-       popl %ecx;      \
-       popl %edx;      \
-       popl %esi;      \
-       popl %edi;      \
-       popl %ebp;      \
-       popl %eax
-
-#define RESTORE_REGS   \
-       RESTORE_INT_REGS; \
-1:     popl %ds;       \
-2:     popl %es;       \
-.section .fixup,"ax";  \
-3:     movl $0,(%esp); \
-       jmp 1b;         \
-4:     movl $0,(%esp); \
-       jmp 2b;         \
-.previous;             \
-.section __ex_table,"a";\
-       .align 4;       \
-       .long 1b,3b;    \
-       .long 2b,4b;    \
-.previous
-
-
-#define RESTORE_ALL    \
-       RESTORE_REGS    \
-       addl $4, %esp;  \
-1:     iret;           \
-.section .fixup,"ax";   \
-2:     movl $(__USER_DS), %edx; \
-       movl %edx, %ds; \
-       movl %edx, %es; \
-       pushl $11;      \
-       call do_exit;   \
-.previous;             \
-.section __ex_table,"a";\
-       .align 4;       \
-       .long 1b,2b;    \
-.previous
-
-
-
-ENTRY(lcall7)
-       pushfl                  # We get a different stack layout with call
-                               # gates, which has to be cleaned up later..
-       pushl %eax
-       SAVE_ALL
-       movl %esp, %ebp
-       pushl %ebp
-       pushl $0x7
-do_lcall:
-       movl EIP(%ebp), %eax    # due to call gates, this is eflags, not eip..
-       movl CS(%ebp), %edx     # this is eip..
-       movl EFLAGS(%ebp), %ecx # and this is cs..
-       movl %eax,EFLAGS(%ebp)  #
-       movl %edx,EIP(%ebp)     # Now we move them to their "normal" places
-       movl %ecx,CS(%ebp)      #
-       GET_THREAD_INFO_WITH_ESP(%ebp)  # GET_THREAD_INFO
-       movl TI_exec_domain(%ebp), %edx # Get the execution domain
-       call *EXEC_DOMAIN_handler(%edx) # Call the handler for the domain
-       addl $4, %esp
-       popl %eax
-       jmp resume_userspace
-
-ENTRY(lcall27)
-       pushfl                  # We get a different stack layout with call
-                               # gates, which has to be cleaned up later..
-       pushl %eax
-       SAVE_ALL
-       movl %esp, %ebp
-       pushl %ebp
-       pushl $0x27
-       jmp do_lcall
-
-
-ENTRY(ret_from_fork)
-       pushl %eax
-       call schedule_tail
-       GET_THREAD_INFO(%ebp)
-       popl %eax
-       jmp syscall_exit
-
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-
-       # userspace resumption stub bypassing syscall exit tracing
-       ALIGN
-ret_from_exception:
-       preempt_stop
-ret_from_intr:
-       GET_THREAD_INFO(%ebp)
-       movl EFLAGS(%esp), %eax         # mix EFLAGS and CS
-       movb CS(%esp), %al
-       testl $(VM_MASK | 2), %eax
-       jz resume_kernel                # returning to kernel or vm86-space
-ENTRY(resume_userspace)
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-                                       # make sure we don't miss an interrupt
-                                       # setting need_resched or sigpending
-                                       # between sampling and the iret
-ret_syscall_tests:
-       movl TI_flags(%ebp), %ecx
-       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
-                                       # int/exception return?
-       jne work_pending
-       jmp restore_all_enable_events
-
-#ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
-       movl HYPERVISOR_shared_info,%esi
-       cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
-       jnz restore_all_enable_events
-need_resched:
-       movl TI_flags(%ebp), %ecx       # need_resched set ?
-       testb $_TIF_NEED_RESCHED, %cl
-       jz restore_all_enable_events
-       testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
-       jz restore_all_enable_events
-       movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
-       call schedule
-       movl $0,TI_preempt_count(%ebp)
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-       jmp need_resched
-#endif
-
-/* SYSENTER_RETURN points to after the "sysenter" instruction in
-   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
-
-       # sysenter call handler stub
-ENTRY(sysenter_entry)
-       movl TSS_sysenter_esp0(%esp),%esp
-sysenter_past_esp:
-       sti
-       pushl $(__USER_DS)
-       pushl %ebp
-       pushfl
-       pushl $(__USER_CS)
-       pushl $SYSENTER_RETURN
-
-/*
- * Load the potential sixth argument from user stack.
- * Careful about security.
- */
-       cmpl $__PAGE_OFFSET-3,%ebp
-       jae syscall_fault
-1:     movl (%ebp),%ebp
-.section __ex_table,"a"
-       .align 4
-       .long 1b,syscall_fault
-.previous
-
-       pushl %eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
-       cmpl $(nr_syscalls), %eax
-       jae syscall_badsys
-
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-       jnz syscall_trace_entry
-       call *sys_call_table(,%eax,4)
-       movl %eax,EAX(%esp)
-       cli
-       movl TI_flags(%ebp), %ecx
-       testw $_TIF_ALLWORK_MASK, %cx
-       jne syscall_exit_work
-/* if something modifies registers it must also disable sysexit */
-       movl EIP(%esp), %edx
-       movl OLDESP(%esp), %ecx
-       sti
-       sysexit
-
-
-       # system call handler stub
-ENTRY(system_call)
-       pushl %eax                      # save orig_eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
-       cmpl $(nr_syscalls), %eax
-       jae syscall_badsys
-                                       # system call tracing in operation
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-       jnz syscall_trace_entry
-syscall_call:
-       call *sys_call_table(,%eax,4)
-       movl %eax,EAX(%esp)             # store the return value
-syscall_exit:
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-                                       # make sure we don't miss an interrupt
-                                       # setting need_resched or sigpending
-                                       # between sampling and the iret
-       movl TI_flags(%ebp), %ecx
-       testw $_TIF_ALLWORK_MASK, %cx   # current->work
-       jne syscall_exit_work
-       jmp restore_all_enable_events
-
-       ALIGN
-restore_all:
-       RESTORE_ALL
-
-       # perform work that needs to be done immediately before resumption
-       ALIGN
-work_pending:
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
-       testb $_TIF_NEED_RESCHED, %cl
-       jz work_notifysig
-work_resched:
-       call schedule
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-                                       # make sure we don't miss an interrupt
-                                       # setting need_resched or sigpending
-                                       # between sampling and the iret
-       movl TI_flags(%ebp), %ecx
-       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
-                                       # than syscall tracing?
-       jz restore_all_enable_events
-       # XXXcl sti missing???
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
-       testb $_TIF_NEED_RESCHED, %cl
-       jnz work_resched
-
-work_notifysig:                                # deal with pending signals and
-                                       # notify-resume requests
-       testl $VM_MASK, EFLAGS(%esp)
-       movl %esp, %eax
-       jne work_notifysig_v86          # returning to kernel-space or
-                                       # vm86-space
-       xorl %edx, %edx
-       call do_notify_resume
-       movl HYPERVISOR_shared_info,%esi
-       jmp restore_all_enable_events
-
-       ALIGN
-work_notifysig_v86:
-       pushl %ecx
-       call save_v86_state
-       popl %ecx
-       movl %eax, %esp
-       xorl %edx, %edx
-       call do_notify_resume
-       movl HYPERVISOR_shared_info,%esi
-       jmp restore_all_enable_events
-
-       # perform syscall exit tracing
-       ALIGN
-syscall_trace_entry:
-       movl $-ENOSYS,EAX(%esp)
-       movl %esp, %eax
-       xorl %edx,%edx
-       call do_syscall_trace
-       movl ORIG_EAX(%esp), %eax
-       cmpl $(nr_syscalls), %eax
-       jnae syscall_call
-       jmp syscall_exit
-
-       # perform syscall exit tracing
-       ALIGN
-syscall_exit_work:
-       movl HYPERVISOR_shared_info,%esi
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
-       jz work_pending
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
-                                       # could let do_syscall_trace() call
-                                       # schedule() instead
-       movl %esp, %eax
-       movl $1, %edx
-       call do_syscall_trace
-       jmp resume_userspace
-
-       ALIGN
-syscall_fault:
-       pushl %eax                      # save orig_eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
-       movl $-EFAULT,EAX(%esp)
-       jmp resume_userspace
-
-       ALIGN
-syscall_badsys:
-       movl $-ENOSYS,EAX(%esp)
-       jmp resume_userspace
-
-ENTRY(divide_error)
-       pushl $0                        # no error code
-       pushl $do_divide_error
-       ALIGN
-error_code:
-       pushl %ds
-       pushl %eax
-       xorl %eax, %eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       decl %eax                       # eax = -1
-       pushl %ecx
-       pushl %ebx
-       cld
-       movl %es, %ecx
-       movl ORIG_EAX(%esp), %esi       # get the error code
-       movl ES(%esp), %edi             # get the function address
-       movl %eax, ORIG_EAX(%esp)
-       movl %ecx, ES(%esp)
-       movl %esp, %edx
-       pushl %esi                      # push the error code
-       pushl %edx                      # push the pt_regs pointer
-       movl $(__KERNEL_DS), %edx       # XXXcl USER?
-       movl %edx, %ds
-       movl %edx, %es
-       call *%edi
-       addl $8, %esp
-       jmp ret_from_exception
-
-# A note on the "critical region" in our callback handler.
-# We want to avoid stacking callback handlers due to events occurring
-# during handling of the last event. To do this, we keep events disabled
-# until we've done all processing. HOWEVER, we must enable events before
-# popping the stack frame (can't be done atomically) and so it would still
-# be possible to get enough handler activations to overflow the stack.
-# Although unlikely, bugs of that kind are hard to track down, so we'd
-# like to avoid the possibility.
-# So, on entry to the handler we detect whether we interrupted an
-# existing activation in its critical region -- if so, we pop the current
-# activation and restart the handler using the previous one.
-ENTRY(hypervisor_callback)
-       pushl %eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
-       movl EIP(%esp),%eax
-       cmpl $scrit,%eax
-       jb   11f
-       cmpl $ecrit,%eax
-       jb   critical_region_fixup
-11:    push %esp
-       call evtchn_do_upcall
-       add  $4,%esp
-       movl HYPERVISOR_shared_info,%esi
-       movb CS(%esp),%cl
-       test $2,%cl                     # slow return to ring 2 or 3
-       jne  ret_syscall_tests
-restore_all_enable_events:  
-safesti:XEN_UNBLOCK_EVENTS(%esi)       # reenable event callbacks
-scrit: /**** START OF CRITICAL REGION ****/
-       testb $1,evtchn_upcall_pending(%esi)
-       jnz  14f                        # process more events if necessary...
-       RESTORE_ALL
-14:    XEN_BLOCK_EVENTS(%esi)
-       jmp  11b
-ecrit:  /**** END OF CRITICAL REGION ****/
-# [How we do the fixup]. We want to merge the current stack frame with the
-# just-interrupted frame. How we do this depends on where in the critical
-# region the interrupted handler was executing, and so how many saved
-# registers are in each frame. We do this quickly using the lookup table
-# 'critical_fixup_table'. For each byte offset in the critical region, it
-# provides the number of bytes which have already been popped from the
-# interrupted stack frame. 
-critical_region_fixup:
-       addl $critical_fixup_table-scrit,%eax
-       movzbl (%eax),%eax              # %eax contains num bytes popped
-       mov  %esp,%esi
-       add  %eax,%esi                  # %esi points at end of src region
-       mov  %esp,%edi
-       add  $0x34,%edi                 # %edi points at end of dst region
-       mov  %eax,%ecx
-       shr  $2,%ecx                    # convert words to bytes
-       je   16f                        # skip loop if nothing to copy
-15:    subl $4,%esi                    # pre-decrementing copy loop
-       subl $4,%edi
-       movl (%esi),%eax
-       movl %eax,(%edi)
-       loop 15b
-16:    movl %edi,%esp                  # final %edi is top of merged stack
-       jmp  11b
-
-critical_fixup_table:
-       .byte 0x00,0x00,0x00            # testb $0xff,(%esi)
-       .byte 0x00,0x00                 # jnz  14f
-       .byte 0x00                      # pop  %ebx
-       .byte 0x04                      # pop  %ecx
-       .byte 0x08                      # pop  %edx
-       .byte 0x0c                      # pop  %esi
-       .byte 0x10                      # pop  %edi
-       .byte 0x14                      # pop  %ebp
-       .byte 0x18                      # pop  %eax
-       .byte 0x1c                      # pop  %ds
-       .byte 0x20                      # pop  %es
-       .byte 0x24,0x24,0x24            # add  $4,%esp
-       .byte 0x28                      # iret
-       .byte 0x00,0x00,0x00,0x00       # movb $1,1(%esi)
-       .byte 0x00,0x00                 # jmp  11b
-
-# Hypervisor uses this for application faults while it executes.
-ENTRY(failsafe_callback)
-       pushal
-       call install_safe_pf_handler
-       movl 32(%esp),%ebx
-1:     movl %ebx,%ds
-       movl 36(%esp),%ebx
-2:     movl %ebx,%es
-       movl 40(%esp),%ebx
-3:     movl %ebx,%fs
-       movl 44(%esp),%ebx
-4:     movl %ebx,%gs
-       call install_normal_pf_handler
-       popal
-       addl $16,%esp
-5:     iret
-.section .fixup,"ax";  \
-6:     xorl %ebx,%ebx; \
-       jmp 1b;         \
-7:     xorl %ebx,%ebx; \
-       jmp 2b;         \
-8:     xorl %ebx,%ebx; \
-       jmp 3b;         \
-9:     xorl %ebx,%ebx; \
-       jmp 4b;         \
-10:    pushl %ss;      \
-       popl %ds;       \
-       pushl %ss;      \
-       popl %es;       \
-       pushl $11;      \
-       call do_exit;   \
-.previous;             \
-.section __ex_table,"a";\
-       .align 4;       \
-       .long 1b,6b;    \
-       .long 2b,7b;    \
-       .long 3b,8b;    \
-       .long 4b,9b;    \
-       .long 5b,10b;   \
-.previous
-
-ENTRY(coprocessor_error)
-       pushl $0
-       pushl $do_coprocessor_error
-       jmp error_code
-
-ENTRY(simd_coprocessor_error)
-       pushl $0
-       pushl $do_simd_coprocessor_error
-       jmp error_code
-
-ENTRY(device_not_available)
-       pushl $-1                       # mark this as an int
-       SAVE_ALL
-       preempt_stop
-       call math_state_restore
-       jmp ret_from_exception
-
-/*
- * Debug traps and NMI can happen at the one SYSENTER instruction
- * that sets up the real kernel stack. Check here, since we can't
- * allow the wrong stack to be used.
- *
- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
- * already pushed 3 words if it hits on the sysenter instruction:
- * eflags, cs and eip.
- *
- * We just load the right stack, and push the three (known) values
- * by hand onto the new stack - while updating the return eip past
- * the instruction that would have done it for sysenter.
- */
-#define FIX_STACK(offset, ok, label)           \
-       cmpw $__KERNEL_CS,4(%esp);              \
-       jne ok;                                 \
-label:                                         \
-       movl TSS_sysenter_esp0+offset(%esp),%esp;       \
-       pushfl;                                 \
-       pushl $__KERNEL_CS;                     \
-       pushl $sysenter_past_esp
-
-ENTRY(debug)
-       cmpl $sysenter_entry,(%esp)
-       jne debug_stack_correct
-       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
-debug_stack_correct:
-       pushl $0
-       pushl $do_debug
-       jmp error_code
-
-#if 0
-/*
- * NMI is doubly nasty. It can happen _while_ we're handling
- * a debug fault, and the debug fault hasn't yet been able to
- * clear up the stack. So we first check whether we got  an
- * NMI on the sysenter entry path, but after that we need to
- * check whether we got an NMI on the debug path where the debug
- * fault happened on the sysenter path.
- */
-ENTRY(nmi)
-       cmpl $sysenter_entry,(%esp)
-       je nmi_stack_fixup
-       pushl %eax
-       movl %esp,%eax
-       /* Do not access memory above the end of our stack page,
-        * it might not exist.
-        */
-       andl $(THREAD_SIZE-1),%eax
-       cmpl $(THREAD_SIZE-20),%eax
-       popl %eax
-       jae nmi_stack_correct
-       cmpl $sysenter_entry,12(%esp)
-       je nmi_debug_stack_check
-nmi_stack_correct:
-       pushl %eax
-       SAVE_ALL
-       movl %esp, %edx
-       pushl $0
-       pushl %edx
-       call do_nmi
-       addl $8, %esp
-       RESTORE_ALL
-
-nmi_stack_fixup:
-       FIX_STACK(12,nmi_stack_correct, 1)
-       jmp nmi_stack_correct
-nmi_debug_stack_check:
-       cmpw $__KERNEL_CS,16(%esp)
-       jne nmi_stack_correct
-       cmpl $debug - 1,(%esp)
-       jle nmi_stack_correct
-       cmpl $debug_esp_fix_insn,(%esp)
-       jle nmi_debug_stack_fixup
-nmi_debug_stack_fixup:
-       FIX_STACK(24,nmi_stack_correct, 1)
-       jmp nmi_stack_correct
-#endif
-
-ENTRY(int3)
-       pushl $0
-       pushl $do_int3
-       jmp error_code
-
-ENTRY(overflow)
-       pushl $0
-       pushl $do_overflow
-       jmp error_code
-
-ENTRY(bounds)
-       pushl $0
-       pushl $do_bounds
-       jmp error_code
-
-ENTRY(invalid_op)
-       pushl $0
-       pushl $do_invalid_op
-       jmp error_code
-
-ENTRY(coprocessor_segment_overrun)
-       pushl $0
-       pushl $do_coprocessor_segment_overrun
-       jmp error_code
-
-ENTRY(double_fault)
-       pushl $do_double_fault
-       jmp error_code
-
-ENTRY(invalid_TSS)
-       pushl $do_invalid_TSS
-       jmp error_code
-
-ENTRY(segment_not_present)
-       pushl $do_segment_not_present
-       jmp error_code
-
-ENTRY(stack_segment)
-       pushl $do_stack_segment
-       jmp error_code
-
-ENTRY(general_protection)
-       pushl $do_general_protection
-       jmp error_code
-
-ENTRY(alignment_check)
-       pushl $do_alignment_check
-       jmp error_code
-
-# This handler is special, because it gets an extra value on its stack,
-# which is the linear faulting address.
-#define PAGE_FAULT_STUB(_name1, _name2)                                          \
-ENTRY(_name1)                                                            \
-       pushl %ds                                                       ; \
-       pushl %eax                                                      ; \
-       xorl %eax,%eax                                                  ; \
-       pushl %ebp                                                      ; \
-       pushl %edi                                                      ; \
-       pushl %esi                                                      ; \
-       pushl %edx                                                      ; \
-       decl %eax                       /* eax = -1 */                  ; \
-       pushl %ecx                                                      ; \
-       pushl %ebx                                                      ; \
-       GET_THREAD_INFO(%ebp)                                           ; \
-       cld                                                             ; \
-       movl %es,%ecx                                                   ; \
-       movl ORIG_EAX(%esp), %esi       /* get the error code */        ; \
-       movl ES(%esp), %edi             /* get the faulting address */  ; \
-       movl %eax, ORIG_EAX(%esp)                                       ; \
-       movl %ecx, ES(%esp)                                             ; \
-       movl %esp,%edx                                                  ; \
-       pushl %edi                      /* push the faulting address */ ; \
-       pushl %esi                      /* push the error code */       ; \
-       pushl %edx                      /* push the pt_regs pointer */  ; \
-       movl $(__KERNEL_DS),%edx                                        ; \
-       movl %edx,%ds                                                   ; \
-       movl %edx,%es                                                   ; \
-       call _name2                                                     ; \
-       addl $12,%esp                                                   ; \
-       jmp ret_from_exception                                          ;
-PAGE_FAULT_STUB(page_fault, do_page_fault)
-PAGE_FAULT_STUB(safe_page_fault, do_safe_page_fault)
-
-#ifdef CONFIG_X86_MCE
-ENTRY(machine_check)
-       pushl $0
-       pushl machine_check_vector
-       jmp error_code
-#endif
-
-ENTRY(fixup_4gb_segment)
-       pushl $do_fixup_4gb_segment
-       jmp error_code
-
-.data
-ENTRY(sys_call_table)
-       .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
-       .long sys_exit
-       .long sys_fork
-       .long sys_read
-       .long sys_write
-       .long sys_open          /* 5 */
-       .long sys_close
-       .long sys_waitpid
-       .long sys_creat
-       .long sys_link
-       .long sys_unlink        /* 10 */
-       .long sys_execve
-       .long sys_chdir
-       .long sys_time
-       .long sys_mknod
-       .long sys_chmod         /* 15 */
-       .long sys_lchown16
-       .long sys_ni_syscall    /* old break syscall holder */
-       .long sys_stat
-       .long sys_lseek
-       .long sys_getpid        /* 20 */
-       .long sys_mount
-       .long sys_oldumount
-       .long sys_setuid16
-       .long sys_getuid16
-       .long sys_stime         /* 25 */
-       .long sys_ptrace
-       .long sys_alarm
-       .long sys_fstat
-       .long sys_pause
-       .long sys_utime         /* 30 */
-       .long sys_ni_syscall    /* old stty syscall holder */
-       .long sys_ni_syscall    /* old gtty syscall holder */
-       .long sys_access
-       .long sys_nice
-       .long sys_ni_syscall    /* 35 - old ftime syscall holder */
-       .long sys_sync
-       .long sys_kill
-       .long sys_rename
-       .long sys_mkdir
-       .long sys_rmdir         /* 40 */
-       .long sys_dup
-       .long sys_pipe
-       .long sys_times
-       .long sys_ni_syscall    /* old prof syscall holder */
-       .long sys_brk           /* 45 */
-       .long sys_setgid16
-       .long sys_getgid16
-       .long sys_signal
-       .long sys_geteuid16
-       .long sys_getegid16     /* 50 */
-       .long sys_acct
-       .long sys_umount        /* recycled never used phys() */
-       .long sys_ni_syscall    /* old lock syscall holder */
-       .long sys_ioctl
-       .long sys_fcntl         /* 55 */
-       .long sys_ni_syscall    /* old mpx syscall holder */
-       .long sys_setpgid
-       .long sys_ni_syscall    /* old ulimit syscall holder */
-       .long sys_olduname
-       .long sys_umask         /* 60 */
-       .long sys_chroot
-       .long sys_ustat
-       .long sys_dup2
-       .long sys_getppid
-       .long sys_getpgrp       /* 65 */
-       .long sys_setsid
-       .long sys_sigaction
-       .long sys_sgetmask
-       .long sys_ssetmask
-       .long sys_setreuid16    /* 70 */
-       .long sys_setregid16
-       .long sys_sigsuspend
-       .long sys_sigpending
-       .long sys_sethostname
-       .long sys_setrlimit     /* 75 */
-       .long sys_old_getrlimit
-       .long sys_getrusage
-       .long sys_gettimeofday
-       .long sys_settimeofday
-       .long sys_getgroups16   /* 80 */
-       .long sys_setgroups16
-       .long old_select
-       .long sys_symlink
-       .long sys_lstat
-       .long sys_readlink      /* 85 */
-       .long sys_uselib
-       .long sys_swapon
-       .long sys_reboot
-       .long old_readdir
-       .long old_mmap          /* 90 */
-       .long sys_munmap
-       .long sys_truncate
-       .long sys_ftruncate
-       .long sys_fchmod
-       .long sys_fchown16      /* 95 */
-       .long sys_getpriority
-       .long sys_setpriority
-       .long sys_ni_syscall    /* old profil syscall holder */
-       .long sys_statfs
-       .long sys_fstatfs       /* 100 */
-       .long sys_ioperm
-       .long sys_socketcall
-       .long sys_syslog
-       .long sys_setitimer
-       .long sys_getitimer     /* 105 */
-       .long sys_newstat
-       .long sys_newlstat
-       .long sys_newfstat
-       .long sys_uname
-       .long sys_iopl          /* 110 */
-       .long sys_vhangup
-       .long sys_ni_syscall    /* old "idle" system call */
-       .long sys_ni_syscall    /* disable sys_vm86old */
-       .long sys_wait4
-       .long sys_swapoff       /* 115 */
-       .long sys_sysinfo
-       .long sys_ipc
-       .long sys_fsync
-       .long sys_sigreturn
-       .long sys_clone         /* 120 */
-       .long sys_setdomainname
-       .long sys_newuname
-       .long sys_modify_ldt
-       .long sys_adjtimex
-       .long sys_mprotect      /* 125 */
-       .long sys_sigprocmask
-       .long sys_ni_syscall    /* old "create_module" */ 
-       .long sys_init_module
-       .long sys_delete_module
-       .long sys_ni_syscall    /* 130: old "get_kernel_syms" */
-       .long sys_quotactl
-       .long sys_getpgid
-       .long sys_fchdir
-       .long sys_bdflush
-       .long sys_sysfs         /* 135 */
-       .long sys_personality
-       .long sys_ni_syscall    /* reserved for afs_syscall */
-       .long sys_setfsuid16
-       .long sys_setfsgid16
-       .long sys_llseek        /* 140 */
-       .long sys_getdents
-       .long sys_select
-       .long sys_flock
-       .long sys_msync
-       .long sys_readv         /* 145 */
-       .long sys_writev
-       .long sys_getsid
-       .long sys_fdatasync
-       .long sys_sysctl
-       .long sys_mlock         /* 150 */
-       .long sys_munlock
-       .long sys_mlockall
-       .long sys_munlockall
-       .long sys_sched_setparam
-       .long sys_sched_getparam   /* 155 */
-       .long sys_sched_setscheduler
-       .long sys_sched_getscheduler
-       .long sys_sched_yield
-       .long sys_sched_get_priority_max
-       .long sys_sched_get_priority_min  /* 160 */
-       .long sys_sched_rr_get_interval
-       .long sys_nanosleep
-       .long sys_mremap
-       .long sys_setresuid16
-       .long sys_getresuid16   /* 165 */
-       .long sys_vm86
-       .long sys_ni_syscall    /* Old sys_query_module */
-       .long sys_poll
-       .long sys_nfsservctl
-       .long sys_setresgid16   /* 170 */
-       .long sys_getresgid16
-       .long sys_prctl
-       .long sys_rt_sigreturn
-       .long sys_rt_sigaction
-       .long sys_rt_sigprocmask        /* 175 */
-       .long sys_rt_sigpending
-       .long sys_rt_sigtimedwait
-       .long sys_rt_sigqueueinfo
-       .long sys_rt_sigsuspend
-       .long sys_pread64       /* 180 */
-       .long sys_pwrite64
-       .long sys_chown16
-       .long sys_getcwd
-       .long sys_capget
-       .long sys_capset        /* 185 */
-       .long sys_sigaltstack
-       .long sys_sendfile
-       .long sys_ni_syscall    /* reserved for streams1 */
-       .long sys_ni_syscall    /* reserved for streams2 */
-       .long sys_vfork         /* 190 */
-       .long sys_getrlimit
-       .long sys_mmap2
-       .long sys_truncate64
-       .long sys_ftruncate64
-       .long sys_stat64        /* 195 */
-       .long sys_lstat64
-       .long sys_fstat64
-       .long sys_lchown
-       .long sys_getuid
-       .long sys_getgid        /* 200 */
-       .long sys_geteuid
-       .long sys_getegid
-       .long sys_setreuid
-       .long sys_setregid
-       .long sys_getgroups     /* 205 */
-       .long sys_setgroups
-       .long sys_fchown
-       .long sys_setresuid
-       .long sys_getresuid
-       .long sys_setresgid     /* 210 */
-       .long sys_getresgid
-       .long sys_chown
-       .long sys_setuid
-       .long sys_setgid
-       .long sys_setfsuid      /* 215 */
-       .long sys_setfsgid
-       .long sys_pivot_root
-       .long sys_mincore
-       .long sys_madvise
-       .long sys_getdents64    /* 220 */
-       .long sys_fcntl64
-       .long sys_ni_syscall    /* reserved for TUX */
-       .long sys_ni_syscall
-       .long sys_gettid
-       .long sys_readahead     /* 225 */
-       .long sys_setxattr
-       .long sys_lsetxattr
-       .long sys_fsetxattr
-       .long sys_getxattr
-       .long sys_lgetxattr     /* 230 */
-       .long sys_fgetxattr
-       .long sys_listxattr
-       .long sys_llistxattr
-       .long sys_flistxattr
-       .long sys_removexattr   /* 235 */
-       .long sys_lremovexattr
-       .long sys_fremovexattr
-       .long sys_tkill
-       .long sys_sendfile64
-       .long sys_futex         /* 240 */
-       .long sys_sched_setaffinity
-       .long sys_sched_getaffinity
-       .long sys_set_thread_area
-       .long sys_get_thread_area
-       .long sys_io_setup      /* 245 */
-       .long sys_io_destroy
-       .long sys_io_getevents
-       .long sys_io_submit
-       .long sys_io_cancel
-       .long sys_fadvise64     /* 250 */
-       .long sys_ni_syscall
-       .long sys_exit_group
-       .long sys_lookup_dcookie
-       .long sys_epoll_create
-       .long sys_epoll_ctl     /* 255 */
-       .long sys_epoll_wait
-       .long sys_remap_file_pages
-       .long sys_set_tid_address
-       .long sys_timer_create
-       .long sys_timer_settime         /* 260 */
-       .long sys_timer_gettime
-       .long sys_timer_getoverrun
-       .long sys_timer_delete
-       .long sys_clock_settime
-       .long sys_clock_gettime         /* 265 */
-       .long sys_clock_getres
-       .long sys_clock_nanosleep
-       .long sys_statfs64
-       .long sys_fstatfs64     
-       .long sys_tgkill        /* 270 */
-       .long sys_utimes
-       .long sys_fadvise64_64
-       .long sys_ni_syscall    /* sys_vserver */
-       .long sys_mbind
-       .long sys_get_mempolicy
-       .long sys_set_mempolicy
-       .long sys_mq_open
-       .long sys_mq_unlink
-       .long sys_mq_timedsend
-       .long sys_mq_timedreceive       /* 280 */
-       .long sys_mq_notify
-       .long sys_mq_getsetattr
-       .long sys_ni_syscall            /* reserved for kexec */
-
-syscall_table_size=(.-sys_call_table)
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/head.S b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/head.S
deleted file mode 100644 (file)
index b41a96d..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-
-#include <linux/config.h>
-
-.section __xen_guest
-       .ascii  "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=2.0,VIRT_BASE=0xC0000000"
-       .ascii  ",LOADER=generic"
-#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
-       .ascii  ",PT_MODE_WRITABLE"
-#endif
-       .byte   0
-
-.text
-#include <linux/threads.h>
-#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/thread_info.h>
-#include <asm/asm_offsets.h>
-#include <asm/hypervisor-ifs/arch-x86_32.h>
-
-/*
- * References to members of the new_cpu_data structure.
- */
-
-#define X86            new_cpu_data+CPUINFO_x86
-#define X86_VENDOR     new_cpu_data+CPUINFO_x86_vendor
-#define X86_MODEL      new_cpu_data+CPUINFO_x86_model
-#define X86_MASK       new_cpu_data+CPUINFO_x86_mask
-#define X86_HARD_MATH  new_cpu_data+CPUINFO_hard_math
-#define X86_CPUID      new_cpu_data+CPUINFO_cpuid_level
-#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
-#define X86_VENDOR_ID  new_cpu_data+CPUINFO_x86_vendor_id
-
-ENTRY(startup_32)
-       cld
-
-       /* Set up the stack pointer */
-       lss stack_start,%esp
-
-       /* Copy the necessary stuff from xen_start_info structure. */
-       mov  $xen_start_info_union,%edi
-       mov  $128,%ecx
-       rep movsl
-
-checkCPUtype:
-
-       /* get vendor info */
-       xorl %eax,%eax                  # call CPUID with 0 -> return vendor ID
-       cpuid
-       movl %eax,X86_CPUID             # save CPUID level
-       movl %ebx,X86_VENDOR_ID         # lo 4 chars
-       movl %edx,X86_VENDOR_ID+4       # next 4 chars
-       movl %ecx,X86_VENDOR_ID+8       # last 4 chars
-
-       movl $1,%eax            # Use the CPUID instruction to get CPU type
-       cpuid
-       movb %al,%cl            # save reg for future use
-       andb $0x0f,%ah          # mask processor family
-       movb %ah,X86
-       andb $0xf0,%al          # mask model
-       shrb $4,%al
-       movb %al,X86_MODEL
-       andb $0x0f,%cl          # mask mask revision
-       movb %cl,X86_MASK
-       movl %edx,X86_CAPABILITY
-
-       xorl %eax,%eax          # Clear FS/GS and LDT
-       movl %eax,%fs
-       movl %eax,%gs
-       cld             # gcc2 wants the direction flag cleared at all times
-
-       call start_kernel
-L6:
-       jmp L6                  # main should never return here, but
-                               # just in case, we know what happens.
-
-ENTRY(lgdt_finish)
-       movl $(__KERNEL_DS),%eax        # reload all the segment registers
-       movw %ax,%ss                    # after changing gdt.
-
-       movl $(__USER_DS),%eax          # DS/ES contains default USER segment
-       movw %ax,%ds
-       movw %ax,%es
-
-       popl %eax                       # reload CS by intersegment return
-       pushl $(__KERNEL_CS)
-       pushl %eax
-       lret
-
-ENTRY(stack_start)
-       .long init_thread_union+THREAD_SIZE
-       .long __BOOT_DS
-
-# XXXcl
-.globl idt_descr
-.globl cpu_gdt_descr
-
-       ALIGN
-       .word 0                         # 32-bit align idt_desc.address
-idt_descr:
-       .word IDT_ENTRIES*8-1           # idt contains 256 entries
-       .long idt_table
-# XXXcl
-
-# boot GDT descriptor (later on used by CPU#0):
-       .word 0                         # 32 bit align gdt_desc.address
-cpu_gdt_descr:
-       .word GDT_SIZE
-       .long cpu_gdt_table
-
-       .fill NR_CPUS-1,8,0             # space for the other GDT descriptors
-
-.org 0x1000
-ENTRY(empty_zero_page)
-
-.org 0x2000
-ENTRY(swapper_pg_dir)
-
-.org 0x3000
-ENTRY(cpu_gdt_table)
-       .quad 0x0000000000000000        /* NULL descriptor */
-       .quad 0x0000000000000000        /* 0x0b reserved */
-       .quad 0x0000000000000000        /* 0x13 reserved */
-       .quad 0x0000000000000000        /* 0x1b reserved */
-       .quad 0x0000000000000000        /* 0x20 unused */
-       .quad 0x0000000000000000        /* 0x28 unused */
-       .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
-       .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
-       .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
-       .quad 0x0000000000000000        /* 0x4b reserved */
-       .quad 0x0000000000000000        /* 0x53 reserved */
-       .quad 0x0000000000000000        /* 0x5b reserved */
-
-       .quad 0x00cfbb000000c3ff        /* 0x60 kernel 4GB code at 0x00000000 */
-       .quad 0x00cfb3000000c3ff        /* 0x68 kernel 4GB data at 0x00000000 */
-       .quad 0x00cffb000000c3ff        /* 0x73 user 4GB code at 0x00000000 */
-       .quad 0x00cff3000000c3ff        /* 0x7b user 4GB data at 0x00000000 */
-
-       .quad 0x0000000000000000        /* 0x80 TSS descriptor */
-       .quad 0x0000000000000000        /* 0x88 LDT descriptor */
-
-       /* Segments used for calling PnP BIOS */
-       .quad 0x0000000000000000        /* 0x90 32-bit code */
-       .quad 0x0000000000000000        /* 0x98 16-bit code */
-       .quad 0x0000000000000000        /* 0xa0 16-bit data */
-       .quad 0x0000000000000000        /* 0xa8 16-bit data */
-       .quad 0x0000000000000000        /* 0xb0 16-bit data */
-       /*
-        * The APM segments have byte granularity and their bases
-        * and limits are set at run time.
-        */
-       .quad 0x0000000000000000        /* 0xb8 APM CS    code */
-       .quad 0x0000000000000000        /* 0xc0 APM CS 16 code (16 bit) */
-       .quad 0x0000000000000000        /* 0xc8 APM DS    data */
-
-       .quad 0x0000000000000000        /* 0xd0 - unused */
-       .quad 0x0000000000000000        /* 0xd8 - unused */
-       .quad 0x0000000000000000        /* 0xe0 - unused */
-       .quad 0x0000000000000000        /* 0xe8 - unused */
-       .quad 0x0000000000000000        /* 0xf0 - unused */
-       .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
-       .fill GDT_ENTRIES-32,8,0
-
-.org 0x4000
-ENTRY(default_ldt)
-
-.org 0x5000
-/*
- * Real beginning of normal "text" segment
- */
-ENTRY(stext)
-ENTRY(_stext)
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
deleted file mode 100644 (file)
index b598dcf..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mca.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/highmem.h>
-#include <linux/time.h>
-
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/hardirq.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/nmi.h>
-#include <asm/ist.h>
-
-extern void dump_thread(struct pt_regs *, struct user *);
-extern spinlock_t rtc_lock;
-
-/* This is definitely a GPL-only symbol */
-EXPORT_SYMBOL_GPL(cpu_gdt_table);
-
-#if defined(CONFIG_APM_MODULE)
-extern void machine_real_restart(unsigned char *, int);
-EXPORT_SYMBOL(machine_real_restart);
-extern void default_idle(void);
-EXPORT_SYMBOL(default_idle);
-#endif
-
-#ifdef CONFIG_SMP
-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-#endif
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
-extern unsigned long cpu_khz;
-extern unsigned long get_cmos_time(void);
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-EXPORT_SYMBOL(MCA_bus);
-#ifdef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(physnode_map);
-#endif
-#ifdef CONFIG_X86_NUMAQ
-EXPORT_SYMBOL(xquad_portio);
-#endif
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(dump_extended_fpu);
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(probe_irq_mask);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-#ifdef CONFIG_APM
-EXPORT_SYMBOL(pm_power_off);
-#endif
-EXPORT_SYMBOL(get_cmos_time);
-EXPORT_SYMBOL(cpu_khz);
-EXPORT_SYMBOL(apm_info);
-
-EXPORT_SYMBOL_NOVERS(__down_failed);
-EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
-EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
-EXPORT_SYMBOL_NOVERS(__up_wakeup);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_generic);
-/* Delay loops */
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
-
-EXPORT_SYMBOL_NOVERS(__get_user_1);
-EXPORT_SYMBOL_NOVERS(__get_user_2);
-EXPORT_SYMBOL_NOVERS(__get_user_4);
-
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
-
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_from_user_ll);
-EXPORT_SYMBOL(__copy_to_user_ll);
-EXPORT_SYMBOL(strnlen_user);
-
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pcibios_penalize_isa_irq);
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
-#ifdef CONFIG_PCI_BIOS
-EXPORT_SYMBOL(pcibios_set_irq_routing);
-EXPORT_SYMBOL(pcibios_get_irq_routing_table);
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-EXPORT_SYMBOL(_mmx_memcpy);
-EXPORT_SYMBOL(mmx_clear_page);
-EXPORT_SYMBOL(mmx_copy_page);
-#endif
-
-#ifdef CONFIG_X86_HT
-EXPORT_SYMBOL(smp_num_siblings);
-EXPORT_SYMBOL(cpu_sibling_map);
-#endif
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_callout_map);
-EXPORT_SYMBOL_NOVERS(__write_lock_failed);
-EXPORT_SYMBOL_NOVERS(__read_lock_failed);
-
-/* Global SMP stuff */
-EXPORT_SYMBOL(synchronize_irq);
-EXPORT_SYMBOL(smp_call_function);
-
-/* TLB flushing */
-EXPORT_SYMBOL(flush_tlb_page);
-EXPORT_SYMBOL_GPL(flush_tlb_all);
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-#endif
-
-#ifdef CONFIG_MCA
-EXPORT_SYMBOL(machine_id);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
-EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(rtc_lock);
-
-EXPORT_SYMBOL_GPL(set_nmi_callback);
-EXPORT_SYMBOL_GPL(unset_nmi_callback);
-
-#undef memcmp
-extern int memcmp(const void *,const void *,__kernel_size_t);
-EXPORT_SYMBOL_NOVERS(memcmp);
-
-#ifdef CONFIG_HAVE_DEC_LOCK
-EXPORT_SYMBOL(atomic_dec_and_lock);
-#endif
-
-EXPORT_SYMBOL(__PAGE_KERNEL);
-
-#ifdef CONFIG_HIGHMEM
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
-#endif
-
-#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-EXPORT_SYMBOL(ist_info);
-#endif
-
-EXPORT_SYMBOL(csum_partial);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ioport.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ioport.c
deleted file mode 100644 (file)
index bb3b0f3..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <asm/hypervisor-ifs/dom0_ops.h>
-
-asmlinkage long sys_iopl(unsigned int new_io_pl)
-{
-       unsigned int old_io_pl = current->thread.io_pl;
-       dom0_op_t op;
-
-       if (new_io_pl > 3)
-               return -EINVAL;
-
-       /* Need "raw I/O" privileges for direct port access. */
-       if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
-               return -EPERM;
-
-       if (!(xen_start_info.flags & SIF_PRIVILEGED))
-               return -EPERM;
-
-       /* Maintain OS privileges even if user attempts to relinquish them. */
-       if (new_io_pl == 0)
-               new_io_pl = 1;
-
-       /* Change our version of the privilege levels. */
-       current->thread.io_pl = new_io_pl;
-
-       /* Force the change at ring 0. */
-       op.cmd           = DOM0_IOPL;
-       op.u.iopl.domain = DOMID_SELF;
-       op.u.iopl.iopl   = new_io_pl;
-       HYPERVISOR_dom0_op(&op);
-
-       return 0;
-}
-
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-{
-       printk(KERN_INFO "ioperm not fully supported - %s\n",
-               turn_on ? "set iopl to 3" : "ignore resource release");
-       return turn_on ? sys_iopl(3) : 0;
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/irq.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/irq.c
deleted file mode 100644 (file)
index 758245e..0000000
+++ /dev/null
@@ -1,1199 +0,0 @@
-/*
- *     linux/arch/i386/kernel/irq.c
- *
- *     Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
- *
- * This file contains the code used by various IRQ handling routines:
- * asking for different IRQ's should be done through these routines
- * instead of just grabbing them. Thus setups with different IRQ numbers
- * shouldn't result in any weird surprises, and installing new handlers
- * should be easier.
- */
-
-/*
- * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
- *
- * IRQs are in fact implemented a bit like signal handlers for the kernel.
- * Naturally it's not a 1:1 relation, but there are similarities.
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/smp_lock.h>
-#include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/irq.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/kallsyms.h>
-
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-#include <asm/desc.h>
-#include <asm/irq.h>
-
-/*
- * Linux has a controller-independent x86 interrupt architecture.
- * every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the apropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
- * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
- * (IO-APICs assumed to be messaging to Pentium local-APICs)
- *
- * the code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic.
- */
-
-/*
- * Controller mappings for all interrupt sources:
- */
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
-       [0 ... NR_IRQS-1] = {
-               .handler = &no_irq_type,
-               .lock = SPIN_LOCK_UNLOCKED
-       }
-};
-
-static void register_irq_proc (unsigned int irq);
-
-/*
- * per-CPU IRQ handling stacks
- */
-#ifdef CONFIG_4KSTACKS
-union irq_ctx *hardirq_ctx[NR_CPUS];
-union irq_ctx *softirq_ctx[NR_CPUS];
-#endif
-
-/*
- * Special irq handlers.
- */
-
-irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
-{ return IRQ_NONE; }
-
-/*
- * Generic no controller code
- */
-
-static void enable_none(unsigned int irq) { }
-static unsigned int startup_none(unsigned int irq) { return 0; }
-static void disable_none(unsigned int irq) { }
-static void ack_none(unsigned int irq)
-{
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves, it doesn't deserve
- * a generic callback i think.
- */
-#ifdef CONFIG_X86
-       printk("unexpected IRQ trap at vector %02x\n", irq);
-#ifdef CONFIG_X86_LOCAL_APIC
-       /*
-        * Currently unexpected vectors happen only on SMP and APIC.
-        * We _must_ ack these because every local APIC has only N
-        * irq slots per priority level, and a 'hanging, unacked' IRQ
-        * holds up an irq slot - in excessive cases (when multiple
-        * unexpected vectors occur) that might lock up the APIC
-        * completely.
-        */
-       ack_APIC_irq();
-#endif
-#endif
-}
-
-/* startup is the same as "enable", shutdown is same as "disable" */
-#define shutdown_none  disable_none
-#define end_none       enable_none
-
-struct hw_interrupt_type no_irq_type = {
-       "none",
-       startup_none,
-       shutdown_none,
-       enable_none,
-       disable_none,
-       ack_none,
-       end_none
-};
-
-atomic_t irq_err_count;
-#if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
-atomic_t irq_mis_count;
-#endif
-
-/*
- * Generic, controller-independent functions:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-       int i = *(loff_t *) v, j;
-       struct irqaction * action;
-       unsigned long flags;
-
-       if (i == 0) {
-               seq_printf(p, "           ");
-               for (j=0; j<NR_CPUS; j++)
-                       if (cpu_online(j))
-                               seq_printf(p, "CPU%d       ",j);
-               seq_putc(p, '\n');
-       }
-
-       if (i < NR_IRQS) {
-               spin_lock_irqsave(&irq_desc[i].lock, flags);
-               action = irq_desc[i].action;
-               if (!action) 
-                       goto skip;
-               seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
-               seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-               for (j = 0; j < NR_CPUS; j++)
-                       if (cpu_online(j))
-                               seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
-               seq_printf(p, " %14s", irq_desc[i].handler->typename);
-               seq_printf(p, "  %s", action->name);
-
-               for (action=action->next; action; action = action->next)
-                       seq_printf(p, ", %s", action->name);
-
-               seq_putc(p, '\n');
-skip:
-               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-       } else if (i == NR_IRQS) {
-               seq_printf(p, "NMI: ");
-               for (j = 0; j < NR_CPUS; j++)
-                       if (cpu_online(j))
-                               seq_printf(p, "%10u ", nmi_count(j));
-               seq_putc(p, '\n');
-#ifdef CONFIG_X86_LOCAL_APIC
-               seq_printf(p, "LOC: ");
-               for (j = 0; j < NR_CPUS; j++)
-                       if (cpu_online(j))
-                               seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
-               seq_putc(p, '\n');
-#endif
-               seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-#if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
-               seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-#endif
-       }
-       return 0;
-}
-
-
-
-
-#ifdef CONFIG_SMP
-inline void synchronize_irq(unsigned int irq)
-{
-       while (irq_desc[irq].status & IRQ_INPROGRESS)
-               cpu_relax();
-}
-#endif
-
-/*
- * This should really return information about whether
- * we should do bottom half handling etc. Right now we
- * end up _always_ checking the bottom half, which is a
- * waste of time and is not what some drivers would
- * prefer.
- */
-asmlinkage int handle_IRQ_event(unsigned int irq,
-               struct pt_regs *regs, struct irqaction *action)
-{
-       int status = 1; /* Force the "do bottom halves" bit */
-       int retval = 0;
-
-       if (!(action->flags & SA_INTERRUPT))
-               local_irq_enable();
-
-       do {
-               status |= action->flags;
-               retval |= action->handler(irq, action->dev_id, regs);
-               action = action->next;
-       } while (action);
-       if (status & SA_SAMPLE_RANDOM)
-               add_interrupt_randomness(irq);
-       local_irq_disable();
-       return retval;
-}
-
-static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
-{
-       struct irqaction *action;
-
-       if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
-               printk(KERN_ERR "irq event %d: bogus return value %x\n",
-                               irq, action_ret);
-       } else {
-               printk(KERN_ERR "irq %d: nobody cared!\n", irq);
-       }
-       dump_stack();
-       printk(KERN_ERR "handlers:\n");
-       action = desc->action;
-       do {
-               printk(KERN_ERR "[<%p>]", action->handler);
-               print_symbol(" (%s)",
-                       (unsigned long)action->handler);
-               printk("\n");
-               action = action->next;
-       } while (action);
-}
-
-static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
-{
-       static int count = 100;
-
-       if (count) {
-               count--;
-               __report_bad_irq(irq, desc, action_ret);
-       }
-}
-
-static int noirqdebug;
-
-static int __init noirqdebug_setup(char *str)
-{
-       noirqdebug = 1;
-       printk("IRQ lockup detection disabled\n");
-       return 1;
-}
-
-__setup("noirqdebug", noirqdebug_setup);
-
-/*
- * If 99,900 of the previous 100,000 interrupts have not been handled then
- * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
- * turn the IRQ off.
- *
- * (The other 100-of-100,000 interrupts may have been a correctly-functioning
- *  device sharing an IRQ with the failing one)
- *
- * Called under desc->lock
- */
-static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
-{
-       if (action_ret != IRQ_HANDLED) {
-               desc->irqs_unhandled++;
-               if (action_ret != IRQ_NONE)
-                       report_bad_irq(irq, desc, action_ret);
-       }
-
-       desc->irq_count++;
-       if (desc->irq_count < 100000)
-               return;
-
-       desc->irq_count = 0;
-       if (desc->irqs_unhandled > 99900) {
-               /*
-                * The interrupt is stuck
-                */
-               __report_bad_irq(irq, desc, action_ret);
-               /*
-                * Now kill the IRQ
-                */
-               printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
-               desc->status |= IRQ_DISABLED;
-               desc->handler->disable(irq);
-       }
-       desc->irqs_unhandled = 0;
-}
-
-/*
- * Generic enable/disable code: this just calls
- * down into the PIC-specific version for the actual
- * hardware disable after having gotten the irq
- * controller lock. 
- */
-/**
- *     disable_irq_nosync - disable an irq without waiting
- *     @irq: Interrupt to disable
- *
- *     Disable the selected interrupt line.  Disables and Enables are
- *     nested.
- *     Unlike disable_irq(), this function does not ensure existing
- *     instances of the IRQ handler have completed before returning.
- *
- *     This function may be called from IRQ context.
- */
-inline void disable_irq_nosync(unsigned int irq)
-{
-       irq_desc_t *desc = irq_desc + irq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&desc->lock, flags);
-       if (!desc->depth++) {
-               desc->status |= IRQ_DISABLED;
-               desc->handler->disable(irq);
-       }
-       spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/**
- *     disable_irq - disable an irq and wait for completion
- *     @irq: Interrupt to disable
- *
- *     Disable the selected interrupt line.  Enables and Disables are
- *     nested.
- *     This function waits for any pending IRQ handlers for this interrupt
- *     to complete before returning. If you use this function while
- *     holding a resource the IRQ handler may need you will deadlock.
- *
- *     This function may be called - with care - from IRQ context.
- */
-void disable_irq(unsigned int irq)
-{
-       irq_desc_t *desc = irq_desc + irq;
-       disable_irq_nosync(irq);
-       if (desc->action)
-               synchronize_irq(irq);
-}
-
-/**
- *     enable_irq - enable handling of an irq
- *     @irq: Interrupt to enable
- *
- *     Undoes the effect of one call to disable_irq().  If this
- *     matches the last disable, processing of interrupts on this
- *     IRQ line is re-enabled.
- *
- *     This function may be called from IRQ context.
- */
-void enable_irq(unsigned int irq)
-{
-       irq_desc_t *desc = irq_desc + irq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&desc->lock, flags);
-       switch (desc->depth) {
-       case 1: {
-               unsigned int status = desc->status & ~IRQ_DISABLED;
-               desc->status = status;
-               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
-                       desc->status = status | IRQ_REPLAY;
-                       hw_resend_irq(desc->handler,irq);
-               }
-               desc->handler->enable(irq);
-               /* fall-through */
-       }
-       default:
-               desc->depth--;
-               break;
-       case 0:
-               printk("enable_irq(%u) unbalanced from %p\n", irq,
-                      __builtin_return_address(0));
-       }
-       spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
-{      
-       /* 
-        * We ack quickly, we don't want the irq controller
-        * thinking we're snobs just because some other CPU has
-        * disabled global interrupts (we have already done the
-        * INT_ACK cycles, it's too late to try to pretend to the
-        * controller that we aren't taking the interrupt).
-        *
-        * 0 return value means that this irq is already being
-        * handled by some other CPU. (or is disabled)
-        */
-       irq_desc_t *desc = irq_desc + irq;
-       struct irqaction * action;
-       unsigned int status;
-
-       irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
-       /* Debugging check for stack overflow: is there less than 1KB free? */
-       {
-               long esp;
-
-               __asm__ __volatile__("andl %%esp,%0" :
-                                       "=r" (esp) : "0" (THREAD_SIZE - 1));
-               if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
-                       printk("do_IRQ: stack overflow: %ld\n",
-                               esp - sizeof(struct thread_info));
-                       dump_stack();
-               }
-       }
-#endif
-       kstat_this_cpu.irqs[irq]++;
-       spin_lock(&desc->lock);
-       desc->handler->ack(irq);
-       /*
-          REPLAY is when Linux resends an IRQ that was dropped earlier
-          WAITING is used by probe to mark irqs that are being tested
-          */
-       status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
-       status |= IRQ_PENDING; /* we _want_ to handle it */
-
-       /*
-        * If the IRQ is disabled for whatever reason, we cannot
-        * use the action we have.
-        */
-       action = NULL;
-       if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
-               action = desc->action;
-               status &= ~IRQ_PENDING; /* we commit to handling */
-               status |= IRQ_INPROGRESS; /* we are handling it */
-       }
-       desc->status = status;
-
-       /*
-        * If there is no IRQ handler or it was disabled, exit early.
-          Since we set PENDING, if another processor is handling
-          a different instance of this same irq, the other processor
-          will take care of it.
-        */
-       if (unlikely(!action))
-               goto out;
-
-       /*
-        * Edge triggered interrupts need to remember
-        * pending events.
-        * This applies to any hw interrupts that allow a second
-        * instance of the same irq to arrive while we are in do_IRQ
-        * or in the handler. But the code here only handles the _second_
-        * instance of the irq, not the third or fourth. So it is mostly
-        * useful for irq hardware that does not mask cleanly in an
-        * SMP environment.
-        */
-#ifdef CONFIG_4KSTACKS
-
-       for (;;) {
-               irqreturn_t action_ret;
-               u32 *isp;
-               union irq_ctx * curctx;
-               union irq_ctx * irqctx;
-
-               curctx = (union irq_ctx *) current_thread_info();
-               irqctx = hardirq_ctx[smp_processor_id()];
-
-               spin_unlock(&desc->lock);
-
-               /*
-                * this is where we switch to the IRQ stack. However, if we are already using
-                * the IRQ stack (because we interrupted a hardirq handler) we can't do that
-                * and just have to keep using the current stack (which is the irq stack already
-                * after all)
-                */
-
-               if (curctx == irqctx)
-                       action_ret = handle_IRQ_event(irq, regs, action);
-               else {
-                       /* build the stack frame on the IRQ stack */
-                       isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-                       irqctx->tinfo.task = curctx->tinfo.task;
-                       irqctx->tinfo.previous_esp = current_stack_pointer();
-
-                       *--isp = (u32) action;
-                       *--isp = (u32) regs;
-                       *--isp = (u32) irq;
-
-                       asm volatile(
-                               "       xchgl   %%ebx,%%esp     \n"
-                               "       call    handle_IRQ_event \n"
-                               "       xchgl   %%ebx,%%esp     \n"
-                               : "=a"(action_ret)
-                               : "b"(isp)
-                               : "memory", "cc", "edx", "ecx"
-                       );
-
-
-               }
-               spin_lock(&desc->lock);
-               if (!noirqdebug)
-                       note_interrupt(irq, desc, action_ret);
-               if (curctx != irqctx)
-                       irqctx->tinfo.task = NULL;
-               if (likely(!(desc->status & IRQ_PENDING)))
-                       break;
-               desc->status &= ~IRQ_PENDING;
-       }
-
-#else
-
-       for (;;) {
-               irqreturn_t action_ret;
-
-               spin_unlock(&desc->lock);
-
-               action_ret = handle_IRQ_event(irq, regs, action);
-
-               spin_lock(&desc->lock);
-               if (!noirqdebug)
-                       note_interrupt(irq, desc, action_ret);
-               if (likely(!(desc->status & IRQ_PENDING)))
-                       break;
-               desc->status &= ~IRQ_PENDING;
-       }
-#endif
-       desc->status &= ~IRQ_INPROGRESS;
-
-out:
-       /*
-        * The ->end() handler has to deal with interrupts which got
-        * disabled while the handler was running.
-        */
-       desc->handler->end(irq);
-       spin_unlock(&desc->lock);
-
-       irq_exit();
-
-       return 1;
-}
-
-int can_request_irq(unsigned int irq, unsigned long irqflags)
-{
-       struct irqaction *action;
-
-       if (irq >= NR_IRQS)
-               return 0;
-       action = irq_desc[irq].action;
-       if (action) {
-               if (irqflags & action->flags & SA_SHIRQ)
-                       action = NULL;
-       }
-       return !action;
-}
-
-/**
- *     request_irq - allocate an interrupt line
- *     @irq: Interrupt line to allocate
- *     @handler: Function to be called when the IRQ occurs
- *     @irqflags: Interrupt type flags
- *     @devname: An ascii name for the claiming device
- *     @dev_id: A cookie passed back to the handler function
- *
- *     This call allocates interrupt resources and enables the
- *     interrupt line and IRQ handling. From the point this
- *     call is made your handler function may be invoked. Since
- *     your handler function must clear any interrupt the board 
- *     raises, you must take care both to initialise your hardware
- *     and to set up the interrupt handler in the right order.
- *
- *     Dev_id must be globally unique. Normally the address of the
- *     device data structure is used as the cookie. Since the handler
- *     receives this value it makes sense to use it.
- *
- *     If your interrupt is shared you must pass a non NULL dev_id
- *     as this is required when freeing the interrupt.
- *
- *     Flags:
- *
- *     SA_SHIRQ                Interrupt is shared
- *
- *     SA_INTERRUPT            Disable local interrupts while processing
- *
- *     SA_SAMPLE_RANDOM        The interrupt can be used for entropy
- *
- */
-int request_irq(unsigned int irq, 
-               irqreturn_t (*handler)(int, void *, struct pt_regs *),
-               unsigned long irqflags, 
-               const char * devname,
-               void *dev_id)
-{
-       int retval;
-       struct irqaction * action;
-
-#if 1
-       /*
-        * Sanity-check: shared interrupts should REALLY pass in
-        * a real dev-ID, otherwise we'll have trouble later trying
-        * to figure out which interrupt is which (messes up the
-        * interrupt freeing logic etc).
-        */
-       if (irqflags & SA_SHIRQ) {
-               if (!dev_id)
-                       printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
-       }
-#endif
-
-       if (irq >= NR_IRQS)
-               return -EINVAL;
-       if (!handler)
-               return -EINVAL;
-
-       action = (struct irqaction *)
-                       kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
-       if (!action)
-               return -ENOMEM;
-
-       action->handler = handler;
-       action->flags = irqflags;
-       cpus_clear(action->mask);
-       action->name = devname;
-       action->next = NULL;
-       action->dev_id = dev_id;
-
-       retval = setup_irq(irq, action);
-       if (retval)
-               kfree(action);
-       return retval;
-}
-
-EXPORT_SYMBOL(request_irq);
-
-/**
- *     free_irq - free an interrupt
- *     @irq: Interrupt line to free
- *     @dev_id: Device identity to free
- *
- *     Remove an interrupt handler. The handler is removed and if the
- *     interrupt line is no longer in use by any driver it is disabled.
- *     On a shared IRQ the caller must ensure the interrupt is disabled
- *     on the card it drives before calling this function. The function
- *     does not return until any executing interrupts for this IRQ
- *     have completed.
- *
- *     This function must not be called from interrupt context. 
- */
-void free_irq(unsigned int irq, void *dev_id)
-{
-       irq_desc_t *desc;
-       struct irqaction **p;
-       unsigned long flags;
-
-       if (irq >= NR_IRQS)
-               return;
-
-       desc = irq_desc + irq;
-       spin_lock_irqsave(&desc->lock,flags);
-       p = &desc->action;
-       for (;;) {
-               struct irqaction * action = *p;
-
-               if (action) {
-                       struct irqaction **pp = p;
-                       p = &action->next;
-                       if (action->dev_id != dev_id)
-                               continue;
-
-                       /* Found it - now remove it from the list of entries */
-                       *pp = action->next;
-                       if (!desc->action) {
-                               desc->status |= IRQ_DISABLED;
-                               desc->handler->shutdown(irq);
-                       }
-                       spin_unlock_irqrestore(&desc->lock,flags);
-
-                       /* Wait to make sure it's not being used on another CPU */
-                       synchronize_irq(irq);
-
-#define SA_STATIC_ACTION 0x01000000 /* Is it our duty to free the action? */
-                       if (!(action->flags & SA_STATIC_ACTION))
-                               kfree(action);
-                       return;
-               }
-               printk("Trying to free free IRQ%d\n",irq);
-               spin_unlock_irqrestore(&desc->lock,flags);
-               return;
-       }
-}
-
-EXPORT_SYMBOL(free_irq);
-
-/*
- * IRQ autodetection code..
- *
- * This depends on the fact that any interrupt that
- * comes in on to an unassigned handler will get stuck
- * with "IRQ_WAITING" cleared and the interrupt
- * disabled.
- */
-
-static DECLARE_MUTEX(probe_sem);
-
-/**
- *     probe_irq_on    - begin an interrupt autodetect
- *
- *     Commence probing for an interrupt. The interrupts are scanned
- *     and a mask of potential interrupt lines is returned.
- *
- */
-unsigned long probe_irq_on(void)
-{
-       unsigned int i;
-       irq_desc_t *desc;
-       unsigned long val;
-       unsigned long delay;
-
-       down(&probe_sem);
-       /* 
-        * something may have generated an irq long ago and we want to
-        * flush such a longstanding irq before considering it as spurious. 
-        */
-       for (i = NR_PIRQS-1; i > 0; i--)  {
-               desc = irq_desc + i;
-
-               spin_lock_irq(&desc->lock);
-               if (!irq_desc[i].action) 
-                       irq_desc[i].handler->startup(i);
-               spin_unlock_irq(&desc->lock);
-       }
-
-       /* Wait for longstanding interrupts to trigger. */
-       for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
-               /* about 20ms delay */ barrier();
-
-       /*
-        * enable any unassigned irqs
-        * (we must startup again here because if a longstanding irq
-        * happened in the previous stage, it may have masked itself)
-        */
-       for (i = NR_PIRQS-1; i > 0; i--) {
-               desc = irq_desc + i;
-
-               spin_lock_irq(&desc->lock);
-               if (!desc->action) {
-                       desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
-                       if (desc->handler->startup(i))
-                               desc->status |= IRQ_PENDING;
-               }
-               spin_unlock_irq(&desc->lock);
-       }
-
-       /*
-        * Wait for spurious interrupts to trigger
-        */
-       for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
-               /* about 100ms delay */ barrier();
-
-       /*
-        * Now filter out any obviously spurious interrupts
-        */
-       val = 0;
-       for (i = 0; i < NR_PIRQS; i++) {
-               irq_desc_t *desc = irq_desc + i;
-               unsigned int status;
-
-               spin_lock_irq(&desc->lock);
-               status = desc->status;
-
-               if (status & IRQ_AUTODETECT) {
-                       /* It triggered already - consider it spurious. */
-                       if (!(status & IRQ_WAITING)) {
-                               desc->status = status & ~IRQ_AUTODETECT;
-                               desc->handler->shutdown(i);
-                       } else
-                               if (i < 32)
-                                       val |= 1 << i;
-               }
-               spin_unlock_irq(&desc->lock);
-       }
-
-       return val;
-}
-
-EXPORT_SYMBOL(probe_irq_on);
-
-/*
- * Return a mask of triggered interrupts (this
- * can handle only legacy ISA interrupts).
- */
-/**
- *     probe_irq_mask - scan a bitmap of interrupt lines
- *     @val:   mask of interrupts to consider
- *
- *     Scan the ISA bus interrupt lines and return a bitmap of
- *     active interrupts. The interrupt probe logic state is then
- *     returned to its previous value.
- *
- *     Note: we need to scan all the irq's even though we will
- *     only return ISA irq numbers - just so that we reset them
- *     all to a known state.
- */
-unsigned int probe_irq_mask(unsigned long val)
-{
-       int i;
-       unsigned int mask;
-
-       mask = 0;
-       for (i = 0; i < NR_PIRQS; i++) {
-               irq_desc_t *desc = irq_desc + i;
-               unsigned int status;
-
-               spin_lock_irq(&desc->lock);
-               status = desc->status;
-
-               if (status & IRQ_AUTODETECT) {
-                       if (i < 16 && !(status & IRQ_WAITING))
-                               mask |= 1 << i;
-
-                       desc->status = status & ~IRQ_AUTODETECT;
-                       desc->handler->shutdown(i);
-               }
-               spin_unlock_irq(&desc->lock);
-       }
-       up(&probe_sem);
-
-       return mask & val;
-}
-
-/*
- * Return the one interrupt that triggered (this can
- * handle any interrupt source).
- */
-
-/**
- *     probe_irq_off   - end an interrupt autodetect
- *     @val: mask of potential interrupts (unused)
- *
- *     Scans the unused interrupt lines and returns the line which
- *     appears to have triggered the interrupt. If no interrupt was
- *     found then zero is returned. If more than one interrupt is
- *     found then minus the first candidate is returned to indicate
- *     their is doubt.
- *
- *     The interrupt probe logic state is returned to its previous
- *     value.
- *
- *     BUGS: When used in a module (which arguably shouldnt happen)
- *     nothing prevents two IRQ probe callers from overlapping. The
- *     results of this are non-optimal.
- */
-int probe_irq_off(unsigned long val)
-{
-       int i, irq_found, nr_irqs;
-
-       nr_irqs = 0;
-       irq_found = 0;
-       for (i = 0; i < NR_PIRQS; i++) {
-               irq_desc_t *desc = irq_desc + i;
-               unsigned int status;
-
-               spin_lock_irq(&desc->lock);
-               status = desc->status;
-
-               if (status & IRQ_AUTODETECT) {
-                       if (!(status & IRQ_WAITING)) {
-                               if (!nr_irqs)
-                                       irq_found = i;
-                               nr_irqs++;
-                       }
-                       desc->status = status & ~IRQ_AUTODETECT;
-                       desc->handler->shutdown(i);
-               }
-               spin_unlock_irq(&desc->lock);
-       }
-       up(&probe_sem);
-
-       if (nr_irqs > 1)
-               irq_found = -irq_found;
-       return irq_found;
-}
-
-EXPORT_SYMBOL(probe_irq_off);
-
-/* this was setup_x86_irq but it seems pretty generic */
-int setup_irq(unsigned int irq, struct irqaction * new)
-{
-       int shared = 0;
-       unsigned long flags;
-       struct irqaction *old, **p;
-       irq_desc_t *desc = irq_desc + irq;
-
-       if (desc->handler == &no_irq_type)
-               return -ENOSYS;
-       /*
-        * Some drivers like serial.c use request_irq() heavily,
-        * so we have to be careful not to interfere with a
-        * running system.
-        */
-       if (new->flags & SA_SAMPLE_RANDOM) {
-               /*
-                * This function might sleep, we want to call it first,
-                * outside of the atomic block.
-                * Yes, this might clear the entropy pool if the wrong
-                * driver is attempted to be loaded, without actually
-                * installing a new handler, but is this really a problem,
-                * only the sysadmin is able to do this.
-                */
-               rand_initialize_irq(irq);
-       }
-
-       /*
-        * The following block of code has to be executed atomically
-        */
-       spin_lock_irqsave(&desc->lock,flags);
-       p = &desc->action;
-       if ((old = *p) != NULL) {
-               /* Can't share interrupts unless both agree to */
-               if (!(old->flags & new->flags & SA_SHIRQ)) {
-                       spin_unlock_irqrestore(&desc->lock,flags);
-                       return -EBUSY;
-               }
-
-               /* add new interrupt at end of irq queue */
-               do {
-                       p = &old->next;
-                       old = *p;
-               } while (old);
-               shared = 1;
-       }
-
-       *p = new;
-
-       if (!shared) {
-               desc->depth = 0;
-               desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
-               desc->handler->startup(irq);
-       }
-       spin_unlock_irqrestore(&desc->lock,flags);
-
-       register_irq_proc(irq);
-       return 0;
-}
-
-static struct proc_dir_entry * root_irq_dir;
-static struct proc_dir_entry * irq_dir [NR_IRQS];
-
-#ifdef CONFIG_SMP
-
-static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
-
-cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-
-static int irq_affinity_read_proc(char *page, char **start, off_t off,
-                       int count, int *eof, void *data)
-{
-       int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
-       if (count - len < 2)
-               return -EINVAL;
-       len += sprintf(page + len, "\n");
-       return len;
-}
-
-static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
-                                       unsigned long count, void *data)
-{
-       int irq = (long)data, full_count = count, err;
-       cpumask_t new_value, tmp;
-
-       if (!irq_desc[irq].handler->set_affinity)
-               return -EIO;
-
-       err = cpumask_parse(buffer, count, new_value);
-       if (err)
-               return err;
-
-       /*
-        * Do not allow disabling IRQs completely - it's a too easy
-        * way to make the system unusable accidentally :-) At least
-        * one online CPU still has to be targeted.
-        */
-       cpus_and(tmp, new_value, cpu_online_map);
-       if (cpus_empty(tmp))
-               return -EINVAL;
-
-       irq_affinity[irq] = new_value;
-       irq_desc[irq].handler->set_affinity(irq,
-                                       cpumask_of_cpu(first_cpu(new_value)));
-
-       return full_count;
-}
-
-#endif
-
-static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
-                       int count, int *eof, void *data)
-{
-       int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
-       if (count - len < 2)
-               return -EINVAL;
-       len += sprintf(page + len, "\n");
-       return len;
-}
-
-static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
-                                       unsigned long count, void *data)
-{
-       cpumask_t *mask = (cpumask_t *)data;
-       unsigned long full_count = count, err;
-       cpumask_t new_value;
-
-       err = cpumask_parse(buffer, count, new_value);
-       if (err)
-               return err;
-
-       *mask = new_value;
-       return full_count;
-}
-
-#define MAX_NAMELEN 10
-
-static void register_irq_proc (unsigned int irq)
-{
-       char name [MAX_NAMELEN];
-
-       if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
-                       irq_dir[irq])
-               return;
-
-       memset(name, 0, MAX_NAMELEN);
-       sprintf(name, "%d", irq);
-
-       /* create /proc/irq/1234 */
-       irq_dir[irq] = proc_mkdir(name, root_irq_dir);
-
-#ifdef CONFIG_SMP
-       {
-               struct proc_dir_entry *entry;
-
-               /* create /proc/irq/1234/smp_affinity */
-               entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
-
-               if (entry) {
-                       entry->nlink = 1;
-                       entry->data = (void *)(long)irq;
-                       entry->read_proc = irq_affinity_read_proc;
-                       entry->write_proc = irq_affinity_write_proc;
-               }
-
-               smp_affinity_entry[irq] = entry;
-       }
-#endif
-}
-
-unsigned long prof_cpu_mask = -1;
-
-void init_irq_proc (void)
-{
-       struct proc_dir_entry *entry;
-       int i;
-
-       /* create /proc/irq */
-       root_irq_dir = proc_mkdir("irq", NULL);
-
-       /* create /proc/irq/prof_cpu_mask */
-       entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
-
-       if (!entry)
-           return;
-
-       entry->nlink = 1;
-       entry->data = (void *)&prof_cpu_mask;
-       entry->read_proc = prof_cpu_mask_read_proc;
-       entry->write_proc = prof_cpu_mask_write_proc;
-
-       /*
-        * Create entries for all existing IRQs.
-        */
-       for (i = 0; i < NR_IRQS; i++)
-               register_irq_proc(i);
-}
-
-
-#ifdef CONFIG_4KSTACKS
-/*
- * These should really be __section__(".bss.page_aligned") as well, but
- * gcc's 3.0 and earlier don't handle that correctly.
- */
-static char softirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE)));
-static char hardirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE)));
-
-/*
- * allocate per-cpu stacks for hardirq and for softirq processing
- */
-void irq_ctx_init(int cpu)
-{
-       union irq_ctx *irqctx;
-
-       if (hardirq_ctx[cpu])
-               return;
-
-       irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-       irqctx->tinfo.task              = NULL;
-       irqctx->tinfo.exec_domain       = NULL;
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
-
-       hardirq_ctx[cpu] = irqctx;
-
-       irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-       irqctx->tinfo.task              = NULL;
-       irqctx->tinfo.exec_domain       = NULL;
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
-
-       softirq_ctx[cpu] = irqctx;
-
-       printk("CPU %u irqstacks, hard=%p soft=%p\n",
-               cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
-}
-
-extern asmlinkage void __do_softirq(void);
-
-asmlinkage void do_softirq(void)
-{
-       unsigned long flags;
-       struct thread_info *curctx;
-       union irq_ctx *irqctx;
-       u32 *isp;
-
-       if (in_interrupt())
-               return;
-
-       local_irq_save(flags);
-
-       if (local_softirq_pending()) {
-               curctx = current_thread_info();
-               irqctx = softirq_ctx[smp_processor_id()];
-               irqctx->tinfo.task = curctx->task;
-               irqctx->tinfo.previous_esp = current_stack_pointer();
-
-               /* build the stack frame on the softirq stack */
-               isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-
-
-               asm volatile(
-                       "       xchgl   %%ebx,%%esp     \n"
-                       "       call    __do_softirq    \n"
-                       "       movl    %%ebx,%%esp     \n"
-                       : "=b"(isp)
-                       : "0"(isp)
-                       : "memory", "cc", "edx", "ecx", "eax"
-               );
-       }
-
-       local_irq_restore(flags);
-}
-
-EXPORT_SYMBOL(do_softirq);
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ldt.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/ldt.c
deleted file mode 100644 (file)
index 4dd6973..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * linux/kernel/ldt.c
- *
- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/ldt.h>
-#include <asm/desc.h>
-
-#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-static void flush_ldt(void *null)
-{
-       if (current->active_mm) {
-               load_LDT(&current->active_mm->context);
-               flush_page_update_queue();
-       }
-}
-#endif
-
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
-{
-       void *oldldt;
-       void *newldt;
-       int oldsize;
-
-       if (mincount <= pc->size)
-               return 0;
-       oldsize = pc->size;
-       mincount = (mincount+511)&(~511);
-       if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-               newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-       else
-               newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-
-       if (!newldt)
-               return -ENOMEM;
-
-       if (oldsize)
-               memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-       oldldt = pc->ldt;
-       memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-       pc->ldt = newldt;
-       wmb();
-       pc->size = mincount;
-       wmb();
-
-       if (reload) {
-#ifdef CONFIG_SMP
-               cpumask_t mask;
-               preempt_disable();
-#endif
-               make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
-                                   PAGE_SIZE);
-               load_LDT(pc);
-               flush_page_update_queue();
-#ifdef CONFIG_SMP
-               mask = cpumask_of_cpu(smp_processor_id());
-               if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-                       smp_call_function(flush_ldt, NULL, 1, 1);
-               preempt_enable();
-#endif
-       }
-       if (oldsize) {
-               make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
-                       PAGE_SIZE);
-               flush_page_update_queue();
-               if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-                       vfree(oldldt);
-               else
-                       kfree(oldldt);
-       }
-       return 0;
-}
-
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-{
-       int err = alloc_ldt(new, old->size, 0);
-       if (err < 0)
-               return err;
-       memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-       make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
-                           PAGE_SIZE);
-       flush_page_update_queue();
-       return 0;
-}
-
-/*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-       struct mm_struct * old_mm;
-       int retval = 0;
-
-       init_MUTEX(&mm->context.sem);
-       mm->context.size = 0;
-       old_mm = current->mm;
-       if (old_mm && old_mm->context.size > 0) {
-               down(&old_mm->context.sem);
-               retval = copy_ldt(&mm->context, &old_mm->context);
-               up(&old_mm->context.sem);
-       }
-       return retval;
-}
-
-/*
- * No need to lock the MM as we are the last user
- */
-void destroy_context(struct mm_struct *mm)
-{
-       if (mm->context.size) {
-               if (mm == current->active_mm)
-                       clear_LDT();
-               make_pages_writable(mm->context.ldt, 
-                                    (mm->context.size * LDT_ENTRY_SIZE) /
-                                    PAGE_SIZE);
-               flush_page_update_queue();
-               if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-                       vfree(mm->context.ldt);
-               else
-                       kfree(mm->context.ldt);
-               mm->context.size = 0;
-       }
-}
-
-static int read_ldt(void __user * ptr, unsigned long bytecount)
-{
-       int err;
-       unsigned long size;
-       struct mm_struct * mm = current->mm;
-
-       if (!mm->context.size)
-               return 0;
-       if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-               bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-
-       down(&mm->context.sem);
-       size = mm->context.size*LDT_ENTRY_SIZE;
-       if (size > bytecount)
-               size = bytecount;
-
-       err = 0;
-       if (copy_to_user(ptr, mm->context.ldt, size))
-               err = -EFAULT;
-       up(&mm->context.sem);
-       if (err < 0)
-               return err;
-       if (size != bytecount) {
-               /* zero-fill the rest */
-               clear_user(ptr+size, bytecount-size);
-       }
-       return bytecount;
-}
-
-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-{
-       int err;
-       unsigned long size;
-       void *address;
-
-       err = 0;
-       address = &default_ldt[0];
-       size = 5*sizeof(struct desc_struct);
-       if (size > bytecount)
-               size = bytecount;
-
-       err = size;
-       if (copy_to_user(ptr, address, size))
-               err = -EFAULT;
-
-       return err;
-}
-
-static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-{
-       struct mm_struct * mm = current->mm;
-       __u32 entry_1, entry_2, *lp;
-       unsigned long phys_lp;
-       int error;
-       struct user_desc ldt_info;
-
-       error = -EINVAL;
-       if (bytecount != sizeof(ldt_info))
-               goto out;
-       error = -EFAULT;        
-       if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-               goto out;
-
-       error = -EINVAL;
-       if (ldt_info.entry_number >= LDT_ENTRIES)
-               goto out;
-       if (ldt_info.contents == 3) {
-               if (oldmode)
-                       goto out;
-               if (ldt_info.seg_not_present == 0)
-                       goto out;
-       }
-
-       down(&mm->context.sem);
-       if (ldt_info.entry_number >= mm->context.size) {
-               error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-               if (error < 0)
-                       goto out_unlock;
-       }
-
-       lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-       phys_lp = arbitrary_virt_to_phys(lp);
-
-       /* Allow LDTs to be cleared by the user. */
-       if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-               if (oldmode || LDT_empty(&ldt_info)) {
-                       entry_1 = 0;
-                       entry_2 = 0;
-                       goto install;
-               }
-       }
-
-       entry_1 = LDT_entry_a(&ldt_info);
-       entry_2 = LDT_entry_b(&ldt_info);
-       if (oldmode)
-               entry_2 &= ~(1 << 20);
-
-       /* Install the new entry ...  */
-install:
-       error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
-
-out_unlock:
-       up(&mm->context.sem);
-out:
-       return error;
-}
-
-asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-{
-       int ret = -ENOSYS;
-
-       switch (func) {
-       case 0:
-               ret = read_ldt(ptr, bytecount);
-               break;
-       case 1:
-               ret = write_ldt(ptr, bytecount, 1);
-               break;
-       case 2:
-               ret = read_default_ldt(ptr, bytecount);
-               break;
-       case 0x11:
-               ret = write_ldt(ptr, bytecount, 0);
-               break;
-       }
-       return ret;
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/pci-dma.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/pci-dma.c
deleted file mode 100644 (file)
index 06a8659..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * On i386 there is no hardware dynamic DMA address translation,
- * so consistent alloc/free are merely page allocation/freeing.
- * The rest of the dynamic DMA mapping interface is implemented
- * in asm/pci.h.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/version.h>
-#include <asm/io.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define pte_offset_kernel pte_offset
-void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
-                          dma_addr_t *dma_handle)
-#else
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, int gfp)
-#endif
-{
-       void *ret;
-       unsigned int order = get_order(size);
-       unsigned long vstart;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-       int gfp = GFP_ATOMIC;
-
-       if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
-#else
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
-#endif
-
-       vstart = __get_free_pages(gfp, order);
-       ret = (void *)vstart;
-       if (ret == NULL)
-               return ret;
-
-       /*
-        * Ensure multi-page extents are contiguous in machine memory.
-        * This code could be cleaned up some, and the number of
-        * hypercalls reduced.
-        */
-       if (size > PAGE_SIZE) {
-               pgd_t         *pgd; 
-               pmd_t         *pmd;
-               pte_t         *pte;
-               unsigned long  pfn, i;
-               scrub_pages(vstart, 1 << order);
-               /* 1. Zap current PTEs, giving away the underlying pages. */
-               for (i = 0; i < (1<<order); i++) {
-                       pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-                       pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
-                       pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-                       pfn = pte->pte_low >> PAGE_SHIFT;
-                       queue_l1_entry_update(pte, 0);
-                       phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] =
-                               INVALID_P2M_ENTRY;
-                       flush_page_update_queue();
-                       if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
-                                                 &pfn, 1, 0) != 1) BUG();
-               }
-               /* 2. Get a new contiguous memory extent. */
-               if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
-                                         &pfn, 1, order) != 1) BUG();
-               /* 3. Map the new extent in place of old pages. */
-               for (i = 0; i < (1<<order); i++) {
-                       pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-                       pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
-                       pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-                       queue_l1_entry_update(
-                               pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
-                       queue_machphys_update(
-                               pfn+i, (__pa(ret)>>PAGE_SHIFT)+i);
-                       phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] =
-                               pfn+i;
-               }
-               /* Flush updates through and flush the TLB. */
-               xen_tlb_flush();
-       }
-
-       memset(ret, 0, size);
-       *dma_handle = virt_to_bus(ret);
-
-       return ret;
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-void pci_free_consistent(struct pci_dev *hwdev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
-#else
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
-#endif
-{
-       free_pages((unsigned long)vaddr, get_order(size));
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/process.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/process.c
deleted file mode 100644 (file)
index ee7be2b..0000000
+++ /dev/null
@@ -1,827 +0,0 @@
-/*
- *  linux/arch/i386/kernel/process.c
- *
- *  Copyright (C) 1995  Linus Torvalds
- *
- *  Pentium III FXSR, SSE support
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- */
-
-/*
- * This file handles the architecture-dependent parts of process handling..
- */
-
-#include <stdarg.h>
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/elfcore.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/mc146818rtc.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/ptrace.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ldt.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/irq.h>
-#include <asm/desc.h>
-#include <asm-xen/multicall.h>
-#include <asm/hypervisor-ifs/dom0_ops.h>
-#ifdef CONFIG_MATH_EMULATION
-#include <asm/math_emu.h>
-#endif
-
-#include <linux/irq.h>
-#include <linux/err.h>
-
-asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-
-int hlt_counter;
-
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return ((unsigned long *)tsk->thread.esp)[3];
-}
-
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-
-void disable_hlt(void)
-{
-       hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
-       hlt_counter--;
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
-       if (!hlt_counter && current_cpu_data.hlt_works_ok) {
-               local_irq_disable();
-               if (!need_resched())
-                       safe_halt();
-               else
-                       local_irq_enable();
-       }
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle (void)
-{
-       int oldval;
-
-       local_irq_enable();
-
-       /*
-        * Deal with another CPU just having chosen a thread to
-        * run here:
-        */
-       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-
-       if (!oldval) {
-               set_thread_flag(TIF_POLLING_NRFLAG);
-               asm volatile(
-                       "2:"
-                       "testl %0, %1;"
-                       "rep; nop;"
-                       "je 2b;"
-                       : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
-
-               clear_thread_flag(TIF_POLLING_NRFLAG);
-       } else {
-               set_need_resched();
-       }
-}
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle (void)
-{
-       /* endless idle loop with no priority at all */
-       while (1) {
-               while (!need_resched()) {
-                       void (*idle)(void) = pm_idle;
-
-                       if (!idle)
-                               idle = default_idle;
-
-                       irq_stat[smp_processor_id()].idle_timestamp = jiffies;
-                       idle();
-               }
-               schedule();
-       }
-}
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- */
-static void mwait_idle(void)
-{
-       local_irq_enable();
-
-       if (!need_resched()) {
-               set_thread_flag(TIF_POLLING_NRFLAG);
-               do {
-                       __monitor((void *)&current_thread_info()->flags, 0, 0);
-                       if (need_resched())
-                               break;
-                       __mwait(0, 0);
-               } while (!need_resched());
-               clear_thread_flag(TIF_POLLING_NRFLAG);
-       }
-}
-
-void __init select_idle_routine(const struct cpuinfo_x86 *c)
-{
-       if (cpu_has(c, X86_FEATURE_MWAIT)) {
-               printk("monitor/mwait feature present.\n");
-               /*
-                * Skip, if setup has overridden idle.
-                * Also, take care of system with asymmetric CPUs.
-                * Use, mwait_idle only if all cpus support it.
-                * If not, we fallback to default_idle()
-                */
-               if (!pm_idle) {
-                       printk("using mwait in idle threads.\n");
-                       pm_idle = mwait_idle;
-               }
-               return;
-       }
-       if (!pm_idle)
-               pm_idle = default_idle;
-       return;
-}
-
-static int __init idle_setup (char *str)
-{
-       if (!strncmp(str, "poll", 4)) {
-               printk("using polling idle threads.\n");
-               pm_idle = poll_idle;
-#ifdef CONFIG_X86_SMP
-               if (smp_num_siblings > 1)
-                       printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
-#endif
-       } else if (!strncmp(str, "halt", 4)) {
-               printk("using halt in idle threads.\n");
-               pm_idle = default_idle;
-       }
-
-       return 1;
-}
-
-__setup("idle=", idle_setup);
-
-void show_regs(struct pt_regs * regs)
-{
-       printk("\n");
-       printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
-       printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
-       print_symbol("EIP is at %s\n", regs->eip);
-
-       if (regs->xcs & 2)
-               printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-       printk(" EFLAGS: %08lx    %s  (%s)\n",regs->eflags, print_tainted(),UTS_RELEASE);
-       printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-               regs->eax,regs->ebx,regs->ecx,regs->edx);
-       printk("ESI: %08lx EDI: %08lx EBP: %08lx",
-               regs->esi, regs->edi, regs->ebp);
-       printk(" DS: %04x ES: %04x\n",
-               0xffff & regs->xds,0xffff & regs->xes);
-
-       show_trace(NULL, &regs->esp);
-}
-
-/*
- * This gets run with %ebx containing the
- * function to call, and %edx containing
- * the "args".
- */
-extern void kernel_thread_helper(void);
-__asm__(".section .text\n"
-       ".align 4\n"
-       "kernel_thread_helper:\n\t"
-       "movl %edx,%eax\n\t"
-       "pushl %edx\n\t"
-       "call *%ebx\n\t"
-       "pushl %eax\n\t"
-       "call do_exit\n"
-       ".previous");
-
-/*
- * Create a kernel thread
- */
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-{
-       struct pt_regs regs;
-
-       memset(&regs, 0, sizeof(regs));
-
-       regs.ebx = (unsigned long) fn;
-       regs.edx = (unsigned long) arg;
-
-       regs.xds = __USER_DS;
-       regs.xes = __USER_DS;
-       regs.orig_eax = -1;
-       regs.eip = (unsigned long) kernel_thread_helper;
-       regs.xcs = __KERNEL_CS;
-       regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-
-       /* Ok, create the new process.. */
-       return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(void)
-{
-       struct task_struct *tsk = current;
-
-       /* The process may have allocated an io port bitmap... nuke it. */
-       if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
-               int cpu = get_cpu();
-               struct tss_struct *tss = init_tss + cpu;
-               kfree(tsk->thread.io_bitmap_ptr);
-               tsk->thread.io_bitmap_ptr = NULL;
-               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-               put_cpu();
-       }
-}
-
-void flush_thread(void)
-{
-       struct task_struct *tsk = current;
-
-       memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));        
-       /*
-        * Forget coprocessor state..
-        */
-       clear_fpu(tsk);
-       tsk->used_math = 0;
-}
-
-void release_thread(struct task_struct *dead_task)
-{
-       if (dead_task->mm) {
-               // temporary debugging check
-               if (dead_task->mm->context.size) {
-                       printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-                                       dead_task->comm,
-                                       dead_task->mm->context.ldt,
-                                       dead_task->mm->context.size);
-                       BUG();
-               }
-       }
-
-       release_x86_irqs(dead_task);
-}
-
-/*
- * This gets called before we allocate a new thread and copy
- * the current task into it.
- */
-void prepare_to_copy(struct task_struct *tsk)
-{
-       unlazy_fpu(tsk);
-}
-
-int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-       unsigned long unused,
-       struct task_struct * p, struct pt_regs * regs)
-{
-       struct pt_regs * childregs;
-       struct task_struct *tsk;
-       int err;
-       unsigned long eflags;
-
-       childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
-       *childregs = *regs;
-       childregs->eax = 0;
-       childregs->esp = esp;
-       p->set_child_tid = p->clear_child_tid = NULL;
-
-       p->thread.esp = (unsigned long) childregs;
-       p->thread.esp0 = (unsigned long) (childregs+1);
-
-       p->thread.eip = (unsigned long) ret_from_fork;
-
-       savesegment(fs,p->thread.fs);
-       savesegment(gs,p->thread.gs);
-
-       tsk = current;
-       if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
-               p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-               if (!p->thread.io_bitmap_ptr)
-                       return -ENOMEM;
-               memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
-                       IO_BITMAP_BYTES);
-       }
-
-       /*
-        * Set a new TLS for the child thread?
-        */
-       if (clone_flags & CLONE_SETTLS) {
-               struct desc_struct *desc;
-               struct user_desc info;
-               int idx;
-
-               err = -EFAULT;
-               if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
-                       goto out;
-               err = -EINVAL;
-               if (LDT_empty(&info))
-                       goto out;
-
-               idx = info.entry_number;
-               if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-                       goto out;
-
-               desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-               desc->a = LDT_entry_a(&info);
-               desc->b = LDT_entry_b(&info);
-       }
-
-
-       __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
-       p->thread.io_pl = (eflags >> 12) & 3;
-
-       err = 0;
- out:
-       if (err && p->thread.io_bitmap_ptr)
-               kfree(p->thread.io_bitmap_ptr);
-       return err;
-}
-
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
-       int i;
-
-/* changed the size calculations - should hopefully work better. lbt */
-       dump->magic = CMAGIC;
-       dump->start_code = 0;
-       dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
-       dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-       dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-       dump->u_dsize -= dump->u_tsize;
-       dump->u_ssize = 0;
-       for (i = 0; i < 8; i++)
-               dump->u_debugreg[i] = current->thread.debugreg[i];  
-
-       if (dump->start_stack < TASK_SIZE)
-               dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
-       dump->regs.ebx = regs->ebx;
-       dump->regs.ecx = regs->ecx;
-       dump->regs.edx = regs->edx;
-       dump->regs.esi = regs->esi;
-       dump->regs.edi = regs->edi;
-       dump->regs.ebp = regs->ebp;
-       dump->regs.eax = regs->eax;
-       dump->regs.ds = regs->xds;
-       dump->regs.es = regs->xes;
-       savesegment(fs,dump->regs.fs);
-       savesegment(gs,dump->regs.gs);
-       dump->regs.orig_eax = regs->orig_eax;
-       dump->regs.eip = regs->eip;
-       dump->regs.cs = regs->xcs;
-       dump->regs.eflags = regs->eflags;
-       dump->regs.esp = regs->esp;
-       dump->regs.ss = regs->xss;
-
-       dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-}
-
-/* 
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
-       struct pt_regs ptregs;
-       
-       ptregs = *(struct pt_regs *)
-               ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
-       ptregs.xcs &= 0xffff;
-       ptregs.xds &= 0xffff;
-       ptregs.xes &= 0xffff;
-       ptregs.xss &= 0xffff;
-
-       elf_core_copy_regs(regs, &ptregs);
-
-       return 1;
-}
-
-/*
- * This special macro can be used to load a debugging register
- */
-#define loaddebug(thread,register) \
-               HYPERVISOR_set_debugreg((register),     \
-                       (thread->debugreg[register]))
-
-/*
- *     switch_to(x,yn) should switch tasks from x to y.
- *
- * We fsave/fwait so that an exception goes off at the right time
- * (as a call from the fsave or fwait in effect) rather than to
- * the wrong process. Lazy FP saving no longer makes any sense
- * with modern CPU's, and this simplifies a lot of things (SMP
- * and UP become the same).
- *
- * NOTE! We used to use the x86 hardware context switching. The
- * reason for not using it any more becomes apparent when you
- * try to recover gracefully from saved state that is no longer
- * valid (stale segment register values in particular). With the
- * hardware task-switch, there is no way to fix up bad state in
- * a reasonable manner.
- *
- * The fact that Intel documents the hardware task-switching to
- * be slow is a fairly red herring - this code is not noticeably
- * faster. However, there _is_ some room for improvement here,
- * so the performance issues may eventually be a valid point.
- * More important, however, is the fact that this allows us much
- * more flexibility.
- *
- * The return value (in %eax) will be the "prev" task after
- * the task-switch, and shows up in ret_from_fork in entry.S,
- * for example.
- */
-struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-{
-       struct thread_struct *prev = &prev_p->thread,
-                                *next = &next_p->thread;
-       int cpu = smp_processor_id();
-       struct tss_struct *tss = init_tss + cpu;
-       dom0_op_t op;
-
-        /* NB. No need to disable interrupts as already done in sched.c */
-        /* __cli(); */
-
-       /*
-        * Save away %fs and %gs. No need to save %es and %ds, as
-        * those are always kernel segments while inside the kernel.
-        */
-       asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
-       asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
-
-       /*
-        * We clobber FS and GS here so that we avoid a GPF when
-        * restoring previous task's FS/GS values in Xen when the LDT
-        * is switched. If we don't do this then we can end up
-        * erroneously re-flushing the page-update queue when we
-        * 'execute_multicall_list'.
-        */
-       __asm__ __volatile__ ( 
-               "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
-               "eax" );
-
-       MULTICALL_flush_page_update_queue();
-
-       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-
-       /*
-        * This is basically '__unlazy_fpu', except that we queue a
-        * multicall to indicate FPU task switch, rather than
-        * synchronously trapping to Xen.
-        */
-       if (prev_p->thread_info->status & TS_USEDFPU) {
-               save_init_fpu(prev_p);
-               queue_multicall0(__HYPERVISOR_fpu_taskswitch);
-       }
-
-       /*
-        * Reload esp0, LDT and the page table pointer:
-        * This is load_esp0(tss, next) with a multicall.
-        */
-       tss->esp0 = next->esp0;
-       /* This can only happen when SEP is enabled, no need to test
-        * "SEP"arately */
-       if (unlikely(tss->ss1 != next->sysenter_cs)) {
-               tss->ss1 = next->sysenter_cs;
-               wrmsr(MSR_IA32_SYSENTER_CS, next->sysenter_cs, 0);
-       }
-       queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
-
-       /*
-        * Load the per-thread Thread-Local Storage descriptor.
-        * This is load_TLS(next, cpu) with multicalls.
-        */
-#define C(i) do {                                                          \
-       if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||        \
-                    next->tls_array[i].b != prev->tls_array[i].b))         \
-               queue_multicall3(__HYPERVISOR_update_descriptor,            \
-                                virt_to_machine(&get_cpu_gdt_table(cpu)    \
-                                                [GDT_ENTRY_TLS_MIN + i]),  \
-                                ((u32 *)&next->tls_array[i])[0],           \
-                                ((u32 *)&next->tls_array[i])[1]);          \
-} while (0)
-       C(0); C(1); C(2);
-#undef C
-
-       if (xen_start_info.flags & SIF_PRIVILEGED) {
-               op.cmd           = DOM0_IOPL;
-               op.u.iopl.domain = DOMID_SELF;
-               op.u.iopl.iopl   = next->io_pl;
-               queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
-       }
-
-       /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
-       execute_multicall_list();
-        /* __sti(); */
-
-       /*
-        * Restore %fs and %gs if needed.
-        */
-       if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) {
-               loadsegment(fs, next->fs);
-               loadsegment(gs, next->gs);
-       }
-
-       /*
-        * Now maybe reload the debug registers
-        */
-       if (unlikely(next->debugreg[7])) {
-               loaddebug(next, 0);
-               loaddebug(next, 1);
-               loaddebug(next, 2);
-               loaddebug(next, 3);
-               /* no 4 and 5 */
-               loaddebug(next, 6);
-               loaddebug(next, 7);
-       }
-
-       if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-               if (next->io_bitmap_ptr) {
-                       /*
-                        * 4 cachelines copy ... not good, but not that
-                        * bad either. Anyone got something better?
-                        * This only affects processes which use ioperm().
-                        * [Putting the TSSs into 4k-tlb mapped regions
-                        * and playing VM tricks to switch the IO bitmap
-                        * is not really acceptable.]
-                        */
-                       memcpy(tss->io_bitmap, next->io_bitmap_ptr,
-                               IO_BITMAP_BYTES);
-                       tss->io_bitmap_base = IO_BITMAP_OFFSET;
-               } else
-                       /*
-                        * a bitmap offset pointing outside of the TSS limit
-                        * causes a nicely controllable SIGSEGV if a process
-                        * tries to use a port IO instruction. The first
-                        * sys_ioperm() call sets up the bitmap properly.
-                        */
-                       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-       }
-       return prev_p;
-}
-
-asmlinkage int sys_fork(struct pt_regs regs)
-{
-       return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-}
-
-asmlinkage int sys_clone(struct pt_regs regs)
-{
-       unsigned long clone_flags;
-       unsigned long newsp;
-       int __user *parent_tidptr, *child_tidptr;
-
-       clone_flags = regs.ebx;
-       newsp = regs.ecx;
-       parent_tidptr = (int __user *)regs.edx;
-       child_tidptr = (int __user *)regs.edi;
-       if (!newsp)
-               newsp = regs.esp;
-       return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, &regs, 0, parent_tidptr, child_tidptr);
-}
-
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(struct pt_regs regs)
-{
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(struct pt_regs regs)
-{
-       int error;
-       char * filename;
-
-       filename = getname((char __user *) regs.ebx);
-       error = PTR_ERR(filename);
-       if (IS_ERR(filename))
-               goto out;
-       error = do_execve(filename,
-                       (char __user * __user *) regs.ecx,
-                       (char __user * __user *) regs.edx,
-                       &regs);
-       if (error == 0) {
-               current->ptrace &= ~PT_DTRACE;
-               /* Make sure we don't return using sysenter.. */
-               set_thread_flag(TIF_IRET);
-       }
-       putname(filename);
-out:
-       return error;
-}
-
-#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long ebp, esp, eip;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack_page = (unsigned long)p->thread_info;
-       esp = p->thread.esp;
-       if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
-               return 0;
-       /* include/asm-i386/system.h:switch_to() pushes ebp last. */
-       ebp = *(unsigned long *) esp;
-       do {
-               if (ebp < stack_page || ebp > top_ebp+stack_page)
-                       return 0;
-               eip = *(unsigned long *) (ebp+4);
-               if (!in_sched_functions(eip))
-                       return eip;
-               ebp = *(unsigned long *) ebp;
-       } while (count++ < 16);
-       return 0;
-}
-
-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
-       struct thread_struct *t = &current->thread;
-       int idx;
-
-       for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-               if (desc_empty(t->tls_array + idx))
-                       return idx + GDT_ENTRY_TLS_MIN;
-       return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- */
-asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-{
-       struct thread_struct *t = &current->thread;
-       struct user_desc info;
-       struct desc_struct *desc;
-       int cpu, idx;
-
-       if (copy_from_user(&info, u_info, sizeof(info)))
-               return -EFAULT;
-       idx = info.entry_number;
-
-       /*
-        * index -1 means the kernel should try to find and
-        * allocate an empty descriptor:
-        */
-       if (idx == -1) {
-               idx = get_free_idx();
-               if (idx < 0)
-                       return idx;
-               if (put_user(idx, &u_info->entry_number))
-                       return -EFAULT;
-       }
-
-       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-               return -EINVAL;
-
-       desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-
-       /*
-        * We must not get preempted while modifying the TLS.
-        */
-       cpu = get_cpu();
-
-       if (LDT_empty(&info)) {
-               desc->a = 0;
-               desc->b = 0;
-       } else {
-               desc->a = LDT_entry_a(&info);
-               desc->b = LDT_entry_b(&info);
-       }
-       load_TLS(t, cpu);
-
-       put_cpu();
-
-       return 0;
-}
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
-       (((desc)->a >> 16) & 0x0000ffff) | \
-       (((desc)->b << 16) & 0x00ff0000) | \
-       ( (desc)->b        & 0xff000000)   )
-
-#define GET_LIMIT(desc) ( \
-       ((desc)->a & 0x0ffff) | \
-        ((desc)->b & 0xf0000) )
-       
-#define GET_32BIT(desc)                (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc)     (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc)     (((desc)->b >>  9) & 1)
-#define GET_LIMIT_PAGES(desc)  (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc)      (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc)      (((desc)->b >> 20) & 1)
-
-asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-{
-       struct user_desc info;
-       struct desc_struct *desc;
-       int idx;
-
-       if (get_user(idx, &u_info->entry_number))
-               return -EFAULT;
-       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-               return -EINVAL;
-
-       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
-       info.entry_number = idx;
-       info.base_addr = GET_BASE(desc);
-       info.limit = GET_LIMIT(desc);
-       info.seg_32bit = GET_32BIT(desc);
-       info.contents = GET_CONTENTS(desc);
-       info.read_exec_only = !GET_WRITABLE(desc);
-       info.limit_in_pages = GET_LIMIT_PAGES(desc);
-       info.seg_not_present = !GET_PRESENT(desc);
-       info.useable = GET_USEABLE(desc);
-
-       if (copy_to_user(u_info, &info, sizeof(info)))
-               return -EFAULT;
-       return 0;
-}
-
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/setup.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/setup.c
deleted file mode 100644 (file)
index 5b1aec8..0000000
+++ /dev/null
@@ -1,1471 +0,0 @@
-/*
- *  linux/arch/i386/kernel/setup.c
- *
- *  Copyright (C) 1995  Linus Torvalds
- *
- *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- *
- *  Memory region support
- *     David Parsons <orc@pell.chi.il.us>, July-August 1999
- *
- *  Added E820 sanitization routine (removes overlapping memory regions);
- *  Brian Moyle <bmoyle@mvista.com>, February 2001
- *
- * Moved CPU detection code to cpu/${cpu}.c
- *    Patrick Mochel <mochel@osdl.org>, March 2002
- *
- *  Provisions for empty E820 memory regions (reported by certain BIOSes).
- *  Alex Achenbach <xela@slit.de>, December 2002.
- *
- */
-
-/*
- * This file handles the architecture-dependent parts of initialization
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/apm_bios.h>
-#include <linux/initrd.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/console.h>
-#include <linux/root_dev.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/efi.h>
-#include <linux/init.h>
-#include <linux/edd.h>
-#include <video/edid.h>
-#include <asm/e820.h>
-#include <asm/mpspec.h>
-#include <asm/setup.h>
-#include <asm/arch_hooks.h>
-#include <asm/sections.h>
-#include <asm/io_apic.h>
-#include <asm/ist.h>
-#include <asm/io.h>
-#include <asm-xen/hypervisor.h>
-#include "setup_arch_pre.h"
-
-int disable_pse __initdata = 0;
-
-/*
- * Machine setup..
- */
-
-#ifdef CONFIG_EFI
-int efi_enabled = 0;
-EXPORT_SYMBOL(efi_enabled);
-#endif
-
-/* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
-/* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
-
-unsigned long mmu_cr4_features;
-EXPORT_SYMBOL_GPL(mmu_cr4_features);
-
-#ifdef CONFIG_ACPI_INTERPRETER
-       int acpi_disabled = 0;
-#else
-       int acpi_disabled = 1;
-#endif
-EXPORT_SYMBOL(acpi_disabled);
-
-#ifdef CONFIG_ACPI_BOOT
-int __initdata acpi_force = 0;
-extern acpi_interrupt_flags    acpi_sci_flags;
-#endif
-
-int MCA_bus;
-/* for MCA, but anyone else can use it if they want */
-unsigned int machine_id;
-unsigned int machine_submodel_id;
-unsigned int BIOS_revision;
-unsigned int mca_pentium_flag;
-
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x10000000;
-
-/* user-defined highmem size */
-static unsigned int highmem_pages = -1;
-
-/*
- * Setup options
- */
-struct drive_info_struct { char dummy[32]; } drive_info;
-struct screen_info screen_info;
-struct apm_info apm_info;
-struct sys_desc_table_struct {
-       unsigned short length;
-       unsigned char table[0];
-};
-struct edid_info edid_info;
-struct ist_info ist_info;
-struct e820map e820;
-
-unsigned char aux_device_present;
-
-extern void early_cpu_init(void);
-extern void dmi_scan_machine(void);
-extern void generic_apic_probe(char *);
-extern int root_mountflags;
-
-unsigned long saved_videomode;
-
-#define RAMDISK_IMAGE_START_MASK       0x07FF
-#define RAMDISK_PROMPT_FLAG            0x8000
-#define RAMDISK_LOAD_FLAG              0x4000  
-
-static char command_line[COMMAND_LINE_SIZE];
-
-unsigned char __initdata boot_params[PARAM_SIZE];
-
-static struct resource data_resource = {
-       .name   = "Kernel data",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource code_resource = {
-       .name   = "Kernel code",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static struct resource system_rom_resource = {
-       .name   = "System ROM",
-       .start  = 0xf0000,
-       .end    = 0xfffff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource extension_rom_resource = {
-       .name   = "Extension ROM",
-       .start  = 0xe0000,
-       .end    = 0xeffff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource adapter_rom_resources[] = { {
-       .name   = "Adapter ROM",
-       .start  = 0xc8000,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-} };
-
-#define ADAPTER_ROM_RESOURCES \
-       (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
-
-static struct resource video_rom_resource = {
-       .name   = "Video ROM",
-       .start  = 0xc0000,
-       .end    = 0xc7fff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-#endif
-
-static struct resource video_ram_resource = {
-       .name   = "Video RAM area",
-       .start  = 0xa0000,
-       .end    = 0xbffff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource standard_io_resources[] = { {
-       .name   = "dma1",
-       .start  = 0x0000,
-       .end    = 0x001f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "pic1",
-       .start  = 0x0020,
-       .end    = 0x0021,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "timer",
-       .start  = 0x0040,
-       .end    = 0x005f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "keyboard",
-       .start  = 0x0060,
-       .end    = 0x006f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "dma page reg",
-       .start  = 0x0080,
-       .end    = 0x008f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "pic2",
-       .start  = 0x00a0,
-       .end    = 0x00a1,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "dma2",
-       .start  = 0x00c0,
-       .end    = 0x00df,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "fpu",
-       .start  = 0x00f0,
-       .end    = 0x00ff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-} };
-
-#define STANDARD_IO_RESOURCES \
-       (sizeof standard_io_resources / sizeof standard_io_resources[0])
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-
-static int __init romchecksum(unsigned char *rom, unsigned long length)
-{
-       unsigned char *p, sum = 0;
-
-       for (p = rom; p < rom + length; p++)
-               sum += *p;
-       return sum == 0;
-}
-
-static void __init probe_roms(void)
-{
-       unsigned long start, length, upper;
-       unsigned char *rom;
-       int           i;
-
-       /* video rom */
-       upper = adapter_rom_resources[0].start;
-       for (start = video_rom_resource.start; start < upper; start += 2048) {
-               rom = isa_bus_to_virt(start);
-               if (!romsignature(rom))
-                       continue;
-
-               video_rom_resource.start = start;
-
-               /* 0 < length <= 0x7f * 512, historically */
-               length = rom[2] * 512;
-
-               /* if checksum okay, trust length byte */
-               if (length && romchecksum(rom, length))
-                       video_rom_resource.end = start + length - 1;
-
-               request_resource(&iomem_resource, &video_rom_resource);
-               break;
-       }
-
-       start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-       if (start < upper)
-               start = upper;
-
-       /* system rom */
-       request_resource(&iomem_resource, &system_rom_resource);
-       upper = system_rom_resource.start;
-
-       /* check for extension rom (ignore length byte!) */
-       rom = isa_bus_to_virt(extension_rom_resource.start);
-       if (romsignature(rom)) {
-               length = extension_rom_resource.end - extension_rom_resource.start + 1;
-               if (romchecksum(rom, length)) {
-                       request_resource(&iomem_resource, &extension_rom_resource);
-                       upper = extension_rom_resource.start;
-               }
-       }
-
-       /* check for adapter roms on 2k boundaries */
-       for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-               rom = isa_bus_to_virt(start);
-               if (!romsignature(rom))
-                       continue;
-
-               /* 0 < length <= 0x7f * 512, historically */
-               length = rom[2] * 512;
-
-               /* but accept any length that fits if checksum okay */
-               if (!length || start + length > upper || !romchecksum(rom, length))
-                       continue;
-
-               adapter_rom_resources[i].start = start;
-               adapter_rom_resources[i].end = start + length - 1;
-               request_resource(&iomem_resource, &adapter_rom_resources[i]);
-
-               start = adapter_rom_resources[i++].end & ~2047UL;
-       }
-}
-#endif
-
-/*
- * Point at the empty zero page to start with. We map the real shared_info
- * page as soon as fixmap is up and running.
- */
-shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-EXPORT_SYMBOL(HYPERVISOR_shared_info);
-
-unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
-EXPORT_SYMBOL(phys_to_machine_mapping);
-
-multicall_entry_t multicall_list[8];
-int nr_multicall_ents = 0;
-
-/* Raw start-of-day parameters from the hypervisor. */
-union xen_start_info_union xen_start_info_union;
-
-extern void (*pm_idle)(void);
-
-static void __init limit_regions(unsigned long long size)
-{
-       unsigned long long current_addr = 0;
-       int i;
-
-       if (efi_enabled) {
-               for (i = 0; i < memmap.nr_map; i++) {
-                       current_addr = memmap.map[i].phys_addr +
-                                      (memmap.map[i].num_pages << 12);
-                       if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
-                               if (current_addr >= size) {
-                                       memmap.map[i].num_pages -=
-                                               (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
-                                       memmap.nr_map = i + 1;
-                                       return;
-                               }
-                       }
-               }
-       }
-       for (i = 0; i < e820.nr_map; i++) {
-               if (e820.map[i].type == E820_RAM) {
-                       current_addr = e820.map[i].addr + e820.map[i].size;
-                       if (current_addr >= size) {
-                               e820.map[i].size -= current_addr-size;
-                               e820.nr_map = i + 1;
-                               return;
-                       }
-               }
-       }
-}
-
-static void __init add_memory_region(unsigned long long start,
-                                  unsigned long long size, int type)
-{
-       int x;
-
-       if (!efi_enabled) {
-                       x = e820.nr_map;
-
-               if (x == E820MAX) {
-                   printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-                   return;
-               }
-
-               e820.map[x].addr = start;
-               e820.map[x].size = size;
-               e820.map[x].type = type;
-               e820.nr_map++;
-       }
-} /* add_memory_region */
-
-#define E820_DEBUG     1
-
-static void __init print_memory_map(char *who)
-{
-       int i;
-
-       for (i = 0; i < e820.nr_map; i++) {
-               printk(" %s: %016Lx - %016Lx ", who,
-                       e820.map[i].addr,
-                       e820.map[i].addr + e820.map[i].size);
-               switch (e820.map[i].type) {
-               case E820_RAM:  printk("(usable)\n");
-                               break;
-               case E820_RESERVED:
-                               printk("(reserved)\n");
-                               break;
-               case E820_ACPI:
-                               printk("(ACPI data)\n");
-                               break;
-               case E820_NVS:
-                               printk("(ACPI NVS)\n");
-                               break;
-               default:        printk("type %lu\n", e820.map[i].type);
-                               break;
-               }
-       }
-}
-
-#if 0
-/*
- * Sanitize the BIOS e820 map.
- *
- * Some e820 responses include overlapping entries.  The following 
- * replaces the original e820 map with a new one, removing overlaps.
- *
- */
-struct change_member {
-       struct e820entry *pbios; /* pointer to original bios entry */
-       unsigned long long addr; /* address for this change point */
-};
-struct change_member change_point_list[2*E820MAX] __initdata;
-struct change_member *change_point[2*E820MAX] __initdata;
-struct e820entry *overlap_list[E820MAX] __initdata;
-struct e820entry new_bios[E820MAX] __initdata;
-
-static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-{
-       struct change_member *change_tmp;
-       unsigned long current_type, last_type;
-       unsigned long long last_addr;
-       int chgidx, still_changing;
-       int overlap_entries;
-       int new_bios_entry;
-       int old_nr, new_nr, chg_nr;
-       int i;
-
-       /*
-               Visually we're performing the following (1,2,3,4 = memory types)...
-
-               Sample memory map (w/overlaps):
-                  ____22__________________
-                  ______________________4_
-                  ____1111________________
-                  _44_____________________
-                  11111111________________
-                  ____________________33__
-                  ___________44___________
-                  __________33333_________
-                  ______________22________
-                  ___________________2222_
-                  _________111111111______
-                  _____________________11_
-                  _________________4______
-
-               Sanitized equivalent (no overlap):
-                  1_______________________
-                  _44_____________________
-                  ___1____________________
-                  ____22__________________
-                  ______11________________
-                  _________1______________
-                  __________3_____________
-                  ___________44___________
-                  _____________33_________
-                  _______________2________
-                  ________________1_______
-                  _________________4______
-                  ___________________2____
-                  ____________________33__
-                  ______________________4_
-       */
-
-       /* if there's only one memory region, don't bother */
-       if (*pnr_map < 2)
-               return -1;
-
-       old_nr = *pnr_map;
-
-       /* bail out if we find any unreasonable addresses in bios map */
-       for (i=0; i<old_nr; i++)
-               if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-                       return -1;
-
-       /* create pointers for initial change-point information (for sorting) */
-       for (i=0; i < 2*old_nr; i++)
-               change_point[i] = &change_point_list[i];
-
-       /* record all known change-points (starting and ending addresses),
-          omitting those that are for empty memory regions */
-       chgidx = 0;
-       for (i=0; i < old_nr; i++)      {
-               if (biosmap[i].size != 0) {
-                       change_point[chgidx]->addr = biosmap[i].addr;
-                       change_point[chgidx++]->pbios = &biosmap[i];
-                       change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-                       change_point[chgidx++]->pbios = &biosmap[i];
-               }
-       }
-       chg_nr = chgidx;        /* true number of change-points */
-
-       /* sort change-point list by memory addresses (low -> high) */
-       still_changing = 1;
-       while (still_changing)  {
-               still_changing = 0;
-               for (i=1; i < chg_nr; i++)  {
-                       /* if <current_addr> > <last_addr>, swap */
-                       /* or, if current=<start_addr> & last=<end_addr>, swap */
-                       if ((change_point[i]->addr < change_point[i-1]->addr) ||
-                               ((change_point[i]->addr == change_point[i-1]->addr) &&
-                                (change_point[i]->addr == change_point[i]->pbios->addr) &&
-                                (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-                          )
-                       {
-                               change_tmp = change_point[i];
-                               change_point[i] = change_point[i-1];
-                               change_point[i-1] = change_tmp;
-                               still_changing=1;
-                       }
-               }
-       }
-
-       /* create a new bios memory map, removing overlaps */
-       overlap_entries=0;       /* number of entries in the overlap table */
-       new_bios_entry=0;        /* index for creating new bios map entries */
-       last_type = 0;           /* start with undefined memory type */
-       last_addr = 0;           /* start with 0 as last starting address */
-       /* loop through change-points, determining affect on the new bios map */
-       for (chgidx=0; chgidx < chg_nr; chgidx++)
-       {
-               /* keep track of all overlapping bios entries */
-               if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-               {
-                       /* add map entry to overlap list (> 1 entry implies an overlap) */
-                       overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-               }
-               else
-               {
-                       /* remove entry from list (order independent, so swap with last) */
-                       for (i=0; i<overlap_entries; i++)
-                       {
-                               if (overlap_list[i] == change_point[chgidx]->pbios)
-                                       overlap_list[i] = overlap_list[overlap_entries-1];
-                       }
-                       overlap_entries--;
-               }
-               /* if there are overlapping entries, decide which "type" to use */
-               /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-               current_type = 0;
-               for (i=0; i<overlap_entries; i++)
-                       if (overlap_list[i]->type > current_type)
-                               current_type = overlap_list[i]->type;
-               /* continue building up new bios map based on this information */
-               if (current_type != last_type)  {
-                       if (last_type != 0)      {
-                               new_bios[new_bios_entry].size =
-                                       change_point[chgidx]->addr - last_addr;
-                               /* move forward only if the new size was non-zero */
-                               if (new_bios[new_bios_entry].size != 0)
-                                       if (++new_bios_entry >= E820MAX)
-                                               break;  /* no more space left for new bios entries */
-                       }
-                       if (current_type != 0)  {
-                               new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-                               new_bios[new_bios_entry].type = current_type;
-                               last_addr=change_point[chgidx]->addr;
-                       }
-                       last_type = current_type;
-               }
-       }
-       new_nr = new_bios_entry;   /* retain count for new bios entries */
-
-       /* copy new bios mapping into original location */
-       memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-       *pnr_map = new_nr;
-
-       return 0;
-}
-
-/*
- * Copy the BIOS e820 map into a safe place.
- *
- * Sanity-check it while we're at it..
- *
- * If we're lucky and live on a modern system, the setup code
- * will have given us a memory map that we can use to properly
- * set up memory.  If we aren't, we'll fake a memory map.
- *
- * We check to see that the memory map contains at least 2 elements
- * before we'll use it, because the detection code in setup.S may
- * not be perfect and most every PC known to man has two memory
- * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
- * thinkpad 560x, for example, does not cooperate with the memory
- * detection code.)
- */
-static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-{
-       /* Only one memory region (or negative)? Ignore it */
-       if (nr_map < 2)
-               return -1;
-
-       do {
-               unsigned long long start = biosmap->addr;
-               unsigned long long size = biosmap->size;
-               unsigned long long end = start + size;
-               unsigned long type = biosmap->type;
-
-               /* Overflow in 64 bits? Ignore the memory map. */
-               if (start > end)
-                       return -1;
-
-               /*
-                * Some BIOSes claim RAM in the 640k - 1M region.
-                * Not right. Fix it up.
-                */
-               if (type == E820_RAM) {
-                       if (start < 0x100000ULL && end > 0xA0000ULL) {
-                               if (start < 0xA0000ULL)
-                                       add_memory_region(start, 0xA0000ULL-start, type);
-                               if (end <= 0x100000ULL)
-                                       continue;
-                               start = 0x100000ULL;
-                               size = end - start;
-                       }
-               }
-               add_memory_region(start, size, type);
-       } while (biosmap++,--nr_map);
-       return 0;
-}
-#endif
-
-#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-struct edd edd;
-#ifdef CONFIG_EDD_MODULE
-EXPORT_SYMBOL(edd);
-#endif
-/**
- * copy_edd() - Copy the BIOS EDD information
- *              from boot_params into a safe place.
- *
- */
-static inline void copy_edd(void)
-{
-     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-     edd.edd_info_nr = EDD_NR;
-}
-#else
-static inline void copy_edd(void)
-{
-}
-#endif
-
-/*
- * Do NOT EVER look at the BIOS memory size location.
- * It does not work on many machines.
- */
-#define LOWMEMSIZE()   (0x9f000)
-
-static void __init parse_cmdline_early (char ** cmdline_p)
-{
-       char c = ' ', *to = command_line, *from = saved_command_line;
-       int len = 0;
-       int userdef = 0;
-
-        memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
-       /* Save unparsed command line copy for /proc/cmdline */
-       saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
-
-       for (;;) {
-               /*
-                * "mem=nopentium" disables the 4MB page tables.
-                * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
-                * to <mem>, overriding the bios size.
-                * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
-                * <start> to <start>+<mem>, overriding the bios size.
-                *
-                * HPA tells me bootloaders need to parse mem=, so no new
-                * option should be mem=  [also see Documentation/i386/boot.txt]
-                */
-               if (c == ' ' && !memcmp(from, "mem=", 4)) {
-                       if (to != command_line)
-                               to--;
-                       if (!memcmp(from+4, "nopentium", 9)) {
-                               from += 9+4;
-                               clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-                               disable_pse = 1;
-                       } else {
-                               /* If the user specifies memory size, we
-                                * limit the BIOS-provided memory map to
-                                * that size. exactmap can be used to specify
-                                * the exact map. mem=number can be used to
-                                * trim the existing memory map.
-                                */
-                               unsigned long long mem_size;
-                               mem_size = memparse(from+4, &from);
-                               limit_regions(mem_size);
-                               userdef=1;
-                       }
-               }
-
-               if (c == ' ' && !memcmp(from, "memmap=", 7)) {
-                       if (to != command_line)
-                               to--;
-                       if (!memcmp(from+7, "exactmap", 8)) {
-                               from += 8+7;
-                               e820.nr_map = 0;
-                               userdef = 1;
-                       } else {
-                               /* If the user specifies memory size, we
-                                * limit the BIOS-provided memory map to
-                                * that size. exactmap can be used to specify
-                                * the exact map. mem=number can be used to
-                                * trim the existing memory map.
-                                */
-                               unsigned long long start_at, mem_size;
-                               mem_size = memparse(from+7, &from);
-                               if (*from == '@') {
-                                       start_at = memparse(from+1, &from);
-                                       add_memory_region(start_at, mem_size, E820_RAM);
-                               } else if (*from == '#') {
-                                       start_at = memparse(from+1, &from);
-                                       add_memory_region(start_at, mem_size, E820_ACPI);
-                               } else if (*from == '$') {
-                                       start_at = memparse(from+1, &from);
-                                       add_memory_region(start_at, mem_size, E820_RESERVED);
-                               } else {
-                                       limit_regions(mem_size);
-                                       userdef=1;
-                               }
-                       }
-               }
-
-#ifdef  CONFIG_X86_SMP
-               /*
-                * If the BIOS enumerates physical processors before logical,
-                * maxcpus=N at enumeration-time can be used to disable HT.
-                */
-               else if (!memcmp(from, "maxcpus=", 8)) {
-                       extern unsigned int maxcpus;
-
-                       maxcpus = simple_strtoul(from + 8, NULL, 0);
-               }
-#endif
-
-#ifdef CONFIG_ACPI_BOOT
-               /* "acpi=off" disables both ACPI table parsing and interpreter */
-               else if (!memcmp(from, "acpi=off", 8)) {
-                       disable_acpi();
-               }
-
-               /* acpi=force to over-ride black-list */
-               else if (!memcmp(from, "acpi=force", 10)) {
-                       acpi_force = 1;
-                       acpi_ht = 1;
-                       acpi_disabled = 0;
-               }
-
-               /* acpi=strict disables out-of-spec workarounds */
-               else if (!memcmp(from, "acpi=strict", 11)) {
-                       acpi_strict = 1;
-               }
-
-               /* Limit ACPI just to boot-time to enable HT */
-               else if (!memcmp(from, "acpi=ht", 7)) {
-                       if (!acpi_force)
-                               disable_acpi();
-                       acpi_ht = 1;
-               }
-               
-               /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
-               else if (!memcmp(from, "pci=noacpi", 10)) {
-                       acpi_disable_pci();
-               }
-               /* "acpi=noirq" disables ACPI interrupt routing */
-               else if (!memcmp(from, "acpi=noirq", 10)) {
-                       acpi_noirq_set();
-               }
-
-               else if (!memcmp(from, "acpi_sci=edge", 13))
-                       acpi_sci_flags.trigger =  1;
-
-               else if (!memcmp(from, "acpi_sci=level", 14))
-                       acpi_sci_flags.trigger = 3;
-
-               else if (!memcmp(from, "acpi_sci=high", 13))
-                       acpi_sci_flags.polarity = 1;
-
-               else if (!memcmp(from, "acpi_sci=low", 12))
-                       acpi_sci_flags.polarity = 3;
-
-#ifdef CONFIG_X86_IO_APIC
-               else if (!memcmp(from, "acpi_skip_timer_override", 24))
-                       acpi_skip_timer_override = 1;
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
-               /* disable IO-APIC */
-               else if (!memcmp(from, "noapic", 6))
-                       disable_ioapic_setup();
-#endif /* CONFIG_X86_LOCAL_APIC */
-#endif /* CONFIG_ACPI_BOOT */
-
-               /*
-                * highmem=size forces highmem to be exactly 'size' bytes.
-                * This works even on boxes that have no highmem otherwise.
-                * This also works to reduce highmem size on bigger boxes.
-                */
-               if (c == ' ' && !memcmp(from, "highmem=", 8))
-                       highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
-       
-               c = *(from++);
-               if (!c)
-                       break;
-               if (COMMAND_LINE_SIZE <= ++len)
-                       break;
-               *(to++) = c;
-       }
-       *to = '\0';
-       *cmdline_p = command_line;
-       if (userdef) {
-               printk(KERN_INFO "user-defined physical RAM map:\n");
-               print_memory_map("user");
-       }
-}
-
-/*
- * Callback for efi_memory_walk.
- */
-static int __init
-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-{
-       unsigned long *max_pfn = arg, pfn;
-
-       if (start < end) {
-               pfn = PFN_UP(end -1);
-               if (pfn > *max_pfn)
-                       *max_pfn = pfn;
-       }
-       return 0;
-}
-
-
-/*
- * Find the highest page frame number we have available
- */
-void __init find_max_pfn(void)
-{
-       int i;
-
-       max_pfn = 0;
-       if (efi_enabled) {
-               efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-               return;
-       }
-
-       for (i = 0; i < e820.nr_map; i++) {
-               unsigned long start, end;
-               /* RAM? */
-               if (e820.map[i].type != E820_RAM)
-                       continue;
-               start = PFN_UP(e820.map[i].addr);
-               end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-               if (start >= end)
-                       continue;
-               if (end > max_pfn)
-                       max_pfn = end;
-       }
-}
-
-/*
- * Determine low and high memory ranges:
- */
-unsigned long __init find_max_low_pfn(void)
-{
-       unsigned long max_low_pfn;
-
-       max_low_pfn = max_pfn;
-       if (max_low_pfn > MAXMEM_PFN) {
-               if (highmem_pages == -1)
-                       highmem_pages = max_pfn - MAXMEM_PFN;
-               if (highmem_pages + MAXMEM_PFN < max_pfn)
-                       max_pfn = MAXMEM_PFN + highmem_pages;
-               if (highmem_pages + MAXMEM_PFN > max_pfn) {
-                       printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
-                       highmem_pages = 0;
-               }
-               max_low_pfn = MAXMEM_PFN;
-#ifndef CONFIG_HIGHMEM
-               /* Maximum memory usable is what is directly addressable */
-               printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-                                       MAXMEM>>20);
-               if (max_pfn > MAX_NONPAE_PFN)
-                       printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-               else
-                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-               max_pfn = MAXMEM_PFN;
-#else /* !CONFIG_HIGHMEM */
-#ifndef CONFIG_X86_PAE
-               if (max_pfn > MAX_NONPAE_PFN) {
-                       max_pfn = MAX_NONPAE_PFN;
-                       printk(KERN_WARNING "Warning only 4GB will be used.\n");
-                       printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-               }
-#endif /* !CONFIG_X86_PAE */
-#endif /* !CONFIG_HIGHMEM */
-       } else {
-               if (highmem_pages == -1)
-                       highmem_pages = 0;
-#ifdef CONFIG_HIGHMEM
-               if (highmem_pages >= max_pfn) {
-                       printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
-                       highmem_pages = 0;
-               }
-               if (highmem_pages) {
-                       if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
-                               printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
-                               highmem_pages = 0;
-                       }
-                       max_low_pfn -= highmem_pages;
-               }
-#else
-               if (highmem_pages)
-                       printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
-#endif
-       }
-       return max_low_pfn;
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-
-/*
- * Free all available memory for boot time allocation.  Used
- * as a callback function by efi_memory_walk()
- */
-
-static int __init
-free_available_memory(unsigned long start, unsigned long end, void *arg)
-{
-       /* check max_low_pfn */
-       if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
-               return 0;
-       if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
-               end = (max_low_pfn + 1) << PAGE_SHIFT;
-       if (start < end)
-               free_bootmem(start, end - start);
-
-       return 0;
-}
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-{
-       int i;
-
-       if (efi_enabled) {
-               efi_memmap_walk(free_available_memory, NULL);
-               return;
-       }
-       for (i = 0; i < e820.nr_map; i++) {
-               unsigned long curr_pfn, last_pfn, size;
-               /*
-                * Reserve usable low memory
-                */
-               if (e820.map[i].type != E820_RAM)
-                       continue;
-               /*
-                * We are rounding up the start address of usable memory:
-                */
-               curr_pfn = PFN_UP(e820.map[i].addr);
-               if (curr_pfn >= max_low_pfn)
-                       continue;
-               /*
-                * ... and at the end of the usable range downwards:
-                */
-               last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-
-               if (last_pfn > max_low_pfn)
-                       last_pfn = max_low_pfn;
-
-               /*
-                * .. finally, did all the rounding and playing
-                * around just make the area go away?
-                */
-               if (last_pfn <= curr_pfn)
-                       continue;
-
-               size = last_pfn - curr_pfn;
-               free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
-       }
-}
-
-static unsigned long __init setup_memory(void)
-{
-       unsigned long bootmap_size, start_pfn, max_low_pfn;
-
-       /*
-        * partially used pages are not usable - thus
-        * we are rounding upwards:
-        */
-       start_pfn = PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames;
-
-       find_max_pfn();
-
-       max_low_pfn = find_max_low_pfn();
-
-#ifdef CONFIG_HIGHMEM
-       highstart_pfn = highend_pfn = max_pfn;
-       if (max_pfn > max_low_pfn) {
-               highstart_pfn = max_low_pfn;
-       }
-       printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
-               pages_to_mb(highend_pfn - highstart_pfn));
-#endif
-       printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
-                       pages_to_mb(max_low_pfn));
-       /*
-        * Initialize the boot-time allocator (with low memory only):
-        */
-       bootmap_size = init_bootmem(start_pfn, max_low_pfn);
-
-       register_bootmem_low_pages(max_low_pfn);
-
-       /*
-        * Reserve the bootmem bitmap itself as well. We do this in two
-        * steps (first step was init_bootmem()) because this catches
-        * the (very unlikely) case of us accidentally initializing the
-        * bootmem allocator with an invalid RAM area.
-        */
-       reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
-                        bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
-
-    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
-       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
-       unless you have no PS/2 mouse plugged in. */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-           boot_cpu_data.x86 == 6)
-            reserve_bootmem(0xa0000 - 4096, 4096);
-
-#ifdef CONFIG_SMP
-       /*
-        * But first pinch a few for the stack/trampoline stuff
-        * FIXME: Don't need the extra page at 4K, but need to fix
-        * trampoline before removing it. (see the GDT stuff)
-        */
-       reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
-#endif
-#ifdef CONFIG_ACPI_SLEEP
-       /*
-        * Reserve low memory region for sleep support.
-        */
-       acpi_reserve_bootmem();
-#endif
-#ifdef CONFIG_X86_FIND_SMP_CONFIG
-       /*
-        * Find and reserve possible boot-time SMP configuration:
-        */
-       find_smp_config();
-#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (xen_start_info.mod_start) {
-               if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
-                       /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
-                       initrd_start = INITRD_START + PAGE_OFFSET;
-                       initrd_end = initrd_start+INITRD_SIZE;
-                       initrd_below_start_ok = 1;
-               }
-               else {
-                       printk(KERN_ERR "initrd extends beyond end of memory "
-                           "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-                           INITRD_START + INITRD_SIZE,
-                           max_low_pfn << PAGE_SHIFT);
-                       initrd_start = 0;
-               }
-       }
-#endif
-
-       phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
-
-       return max_low_pfn;
-}
-#else
-extern unsigned long setup_memory(void);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-/*
- * Request address space for all standard RAM and ROM resources
- * and also for regions reported as reserved by the e820.
- */
-static void __init
-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-{
-       int i;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       probe_roms();
-#endif
-       for (i = 0; i < e820.nr_map; i++) {
-               struct resource *res;
-               if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-                       continue;
-               res = alloc_bootmem_low(sizeof(struct resource));
-               switch (e820.map[i].type) {
-               case E820_RAM:  res->name = "System RAM"; break;
-               case E820_ACPI: res->name = "ACPI Tables"; break;
-               case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
-               default:        res->name = "reserved";
-               }
-               res->start = e820.map[i].addr;
-               res->end = res->start + e820.map[i].size - 1;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-               request_resource(&iomem_resource, res);
-               if (e820.map[i].type == E820_RAM) {
-                       /*
-                        *  We don't know which RAM region contains kernel data,
-                        *  so we try it repeatedly and let the resource manager
-                        *  test it.
-                        */
-                       request_resource(res, code_resource);
-                       request_resource(res, data_resource);
-               }
-       }
-}
-
-/*
- * Request address space for all standard resources
- */
-static void __init register_memory(unsigned long max_low_pfn)
-{
-       unsigned long low_mem_size;
-       int           i;
-
-       if (efi_enabled)
-               efi_initialize_iomem_resources(&code_resource, &data_resource);
-       else
-               legacy_init_iomem_resources(&code_resource, &data_resource);
-
-       /* EFI systems may still have VGA */
-       request_resource(&iomem_resource, &video_ram_resource);
-
-       /* request I/O space for devices used on all i[345]86 PCs */
-       for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-               request_resource(&ioport_resource, &standard_io_resources[i]);
-
-       /* Tell the PCI layer not to allocate too close to the RAM area.. */
-       low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
-       if (low_mem_size > pci_mem_start)
-               pci_mem_start = low_mem_size;
-}
-
-/* Use inline assembly to define this because the nops are defined 
-   as inline assembly strings in the include files and we cannot 
-   get them easily into strings. */
-asm("\t.data\nintelnops: " 
-    GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
-    GENERIC_NOP7 GENERIC_NOP8); 
-asm("\t.data\nk8nops: " 
-    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
-    K8_NOP7 K8_NOP8); 
-asm("\t.data\nk7nops: " 
-    K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
-    K7_NOP7 K7_NOP8); 
-    
-extern unsigned char intelnops[], k8nops[], k7nops[];
-static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 
-     NULL,
-     intelnops,
-     intelnops + 1,
-     intelnops + 1 + 2,
-     intelnops + 1 + 2 + 3,
-     intelnops + 1 + 2 + 3 + 4,
-     intelnops + 1 + 2 + 3 + 4 + 5,
-     intelnops + 1 + 2 + 3 + 4 + 5 + 6,
-     intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-}; 
-static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
-     NULL,
-     k8nops,
-     k8nops + 1,
-     k8nops + 1 + 2,
-     k8nops + 1 + 2 + 3,
-     k8nops + 1 + 2 + 3 + 4,
-     k8nops + 1 + 2 + 3 + 4 + 5,
-     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
-     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-}; 
-static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 
-     NULL,
-     k7nops,
-     k7nops + 1,
-     k7nops + 1 + 2,
-     k7nops + 1 + 2 + 3,
-     k7nops + 1 + 2 + 3 + 4,
-     k7nops + 1 + 2 + 3 + 4 + 5,
-     k7nops + 1 + 2 + 3 + 4 + 5 + 6,
-     k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-}; 
-static struct nop { 
-     int cpuid; 
-     unsigned char **noptable; 
-} noptypes[] = { 
-     { X86_FEATURE_K8, k8_nops }, 
-     { X86_FEATURE_K7, k7_nops }, 
-     { -1, NULL }
-}; 
-
-/* Replace instructions with better alternatives for this CPU type.
-
-   This runs before SMP is initialized to avoid SMP problems with
-   self modifying code. This implies that assymetric systems where
-   APs have less capabilities than the boot processor are not handled. 
-   In this case boot with "noreplacement". */ 
-void apply_alternatives(void *start, void *end) 
-{ 
-       struct alt_instr *a; 
-       int diff, i, k;
-        unsigned char **noptable = intel_nops; 
-       for (i = 0; noptypes[i].cpuid >= 0; i++) { 
-               if (boot_cpu_has(noptypes[i].cpuid)) { 
-                       noptable = noptypes[i].noptable;
-                       break;
-               }
-       } 
-       for (a = start; (void *)a < end; a++) { 
-               if (!boot_cpu_has(a->cpuid))
-                       continue;
-               BUG_ON(a->replacementlen > a->instrlen); 
-               memcpy(a->instr, a->replacement, a->replacementlen); 
-               diff = a->instrlen - a->replacementlen; 
-               /* Pad the rest with nops */
-               for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-                       k = diff;
-                       if (k > ASM_NOP_MAX)
-                               k = ASM_NOP_MAX;
-                       memcpy(a->instr + i, noptable[k], k); 
-               } 
-       }
-} 
-
-static int no_replacement __initdata = 0; 
-void __init alternative_instructions(void)
-{
-       extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-       if (no_replacement) 
-               return;
-       apply_alternatives(__alt_instructions, __alt_instructions_end);
-}
-
-static int __init noreplacement_setup(char *s)
-{ 
-     no_replacement = 1; 
-     return 0; 
-} 
-
-__setup("noreplacement", noreplacement_setup); 
-
-static char * __init machine_specific_memory_setup(void);
-
-/*
- * Determine if we were loaded by an EFI loader.  If so, then we have also been
- * passed the efi memmap, systab, etc., so we should use these data structures
- * for initialization.  Note, the efi init code path is determined by the
- * global efi_enabled. This allows the same kernel image to be used on existing
- * systems (with a traditional BIOS) as well as on EFI systems.
- */
-void __init setup_arch(char **cmdline_p)
-{
-        int i,j;
-
-        unsigned long max_low_pfn;
-
-       HYPERVISOR_vm_assist(VMASST_CMD_enable,
-                            VMASST_TYPE_4gb_segments);
-
-       pm_idle = xen_cpu_idle;
-
-       memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
-       early_cpu_init();
-
-       /*
-        * FIXME: This isn't an official loader_type right
-        * now but does currently work with elilo.
-        * If we were configured as an EFI kernel, check to make
-        * sure that we were loaded correctly from elilo and that
-        * the system table is valid.  If not, then initialize normally.
-        */
-#ifdef CONFIG_EFI
-       if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
-               efi_enabled = 1;
-#endif
-
-       ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); /*old_decode_dev(ORIG_ROOT_DEV);*/
-       drive_info = DRIVE_INFO;
-       screen_info = SCREEN_INFO;
-       edid_info = EDID_INFO;
-       apm_info.bios = APM_BIOS_INFO;
-       ist_info = IST_INFO;
-       saved_videomode = VIDEO_MODE;
-       if( SYS_DESC_TABLE.length != 0 ) {
-               MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
-               machine_id = SYS_DESC_TABLE.table[0];
-               machine_submodel_id = SYS_DESC_TABLE.table[1];
-               BIOS_revision = SYS_DESC_TABLE.table[2];
-       }
-       aux_device_present = AUX_DEVICE_INFO;
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-       /* This is drawn from a dump from vgacon:startup in standard Linux. */
-       screen_info.orig_video_mode = 3; 
-       screen_info.orig_video_isVGA = 1;
-       screen_info.orig_video_lines = 25;
-       screen_info.orig_video_cols = 80;
-       screen_info.orig_video_ega_bx = 3;
-       screen_info.orig_video_points = 16;
-#endif
-
-#ifdef CONFIG_BLK_DEV_RAM
-       rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-       rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-       rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-#endif
-       ARCH_SETUP
-       if (efi_enabled)
-               efi_init();
-       else {
-               printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-               print_memory_map(machine_specific_memory_setup());
-       }
-
-       copy_edd();
-
-       if (!MOUNT_ROOT_RDONLY)
-               root_mountflags &= ~MS_RDONLY;
-       init_mm.start_code = (unsigned long) _text;
-       init_mm.end_code = (unsigned long) _etext;
-       init_mm.end_data = (unsigned long) _edata;
-       init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
-
-       code_resource.start = virt_to_phys(_text);
-       code_resource.end = virt_to_phys(_etext)-1;
-       data_resource.start = virt_to_phys(_etext);
-       data_resource.end = virt_to_phys(_edata)-1;
-
-       parse_cmdline_early(cmdline_p);
-
-       max_low_pfn = setup_memory();
-
-       /*
-        * NOTE: before this point _nobody_ is allowed to allocate
-        * any memory using the bootmem allocator.
-        */
-
-#ifdef CONFIG_SMP
-       smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
-#endif
-       paging_init();
-
-       pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
-       for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
-       {       
-            pfn_to_mfn_frame_list[j] = 
-                 virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
-       }
-       HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
-            virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
-
-
-#ifdef CONFIG_EARLY_PRINTK
-       {
-               char *s = strstr(*cmdline_p, "earlyprintk=");
-               if (s) {
-                       extern void setup_early_printk(char *);
-
-                       setup_early_printk(s);
-                       printk("early console enabled\n");
-               }
-       }
-#endif
-
-
-       dmi_scan_machine();
-
-#ifdef CONFIG_X86_GENERICARCH
-       generic_apic_probe(*cmdline_p);
-#endif 
-       if (efi_enabled)
-               efi_map_memmap();
-
-       /*
-        * Parse the ACPI tables for possible boot-time SMP configuration.
-        */
-       acpi_boot_init();
-
-#ifdef CONFIG_X86_LOCAL_APIC
-       if (smp_found_config)
-               get_smp_config();
-#endif
-
-       register_memory(max_low_pfn);
-
-       /* If we are a privileged guest OS then we should request IO privs. */
-       if (xen_start_info.flags & SIF_PRIVILEGED) {
-               dom0_op_t op;
-               op.cmd           = DOM0_IOPL;
-               op.u.iopl.domain = DOMID_SELF;
-               op.u.iopl.iopl   = 1;
-               if (HYPERVISOR_dom0_op(&op) != 0)
-                       panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
-               current->thread.io_pl = 1;
-       }
-
-       if (xen_start_info.flags & SIF_INITDOMAIN) {
-               if (!(xen_start_info.flags & SIF_PRIVILEGED))
-                       panic("Xen granted us console access "
-                             "but not privileged status");
-
-#ifdef CONFIG_VT
-#if defined(CONFIG_VGA_CONSOLE)
-               if (!efi_enabled ||
-                   (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
-                       conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
-               conswitchp = &dummy_con;
-#endif
-#endif
-       } else {
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-               extern const struct consw xennull_con;
-               extern int console_use_vt;
-#if defined(CONFIG_VGA_CONSOLE)
-               /* disable VGA driver */
-               ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
-#endif
-               conswitchp = &xennull_con;
-               console_use_vt = 0;
-#endif
-       }
-}
-
-#include "setup_arch_post.h"
-/*
- * Local Variables:
- * mode:c
- * c-file-style:"k&r"
- * c-basic-offset:8
- * End:
- */
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/signal.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/signal.c
deleted file mode 100644 (file)
index a8debd4..0000000
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- *  linux/arch/i386/kernel/signal.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
- *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/elf.h>
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/uaccess.h>
-#include <asm/i387.h>
-#include "sigframe.h"
-
-#define DEBUG_SIG 0
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
-{
-       struct pt_regs * regs = (struct pt_regs *) &history0;
-       sigset_t saveset;
-
-       mask &= _BLOCKABLE;
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       siginitset(&current->blocked, mask);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->eax = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(regs, &saveset))
-                       return -EINTR;
-       }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(struct pt_regs regs)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (regs.ecx != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs.eax = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&regs, &saveset))
-                       return -EINTR;
-       }
-}
-
-asmlinkage int 
-sys_sigaction(int sig, const struct old_sigaction __user *act,
-             struct old_sigaction __user *oact)
-{
-       struct k_sigaction new_ka, old_ka;
-       int ret;
-
-       if (act) {
-               old_sigset_t mask;
-               if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-                       return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
-               siginitset(&new_ka.sa.sa_mask, mask);
-       }
-
-       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
-       if (!ret && oact) {
-               if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-                       return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
-       }
-
-       return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(unsigned long ebx)
-{
-       /* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
-       struct pt_regs *regs = (struct pt_regs *)&ebx;
-       const stack_t __user *uss = (const stack_t __user *)ebx;
-       stack_t __user *uoss = (stack_t __user *)regs->ecx;
-
-       return do_sigaltstack(uss, uoss, regs->esp);
-}
-
-
-/*
- * Do a signal return; undo the signal stack.
- */
-
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax)
-{
-       unsigned int err = 0;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-#define COPY(x)                err |= __get_user(regs->x, &sc->x)
-
-#define COPY_SEG(seg)                                                  \
-       { unsigned short tmp;                                           \
-         err |= __get_user(tmp, &sc->seg);                             \
-         regs->x##seg = tmp; }
-
-#define COPY_SEG_STRICT(seg)                                           \
-       { unsigned short tmp;                                           \
-         err |= __get_user(tmp, &sc->seg);                             \
-         regs->x##seg = tmp|3; }
-
-#define GET_SEG(seg)                                                   \
-       { unsigned short tmp;                                           \
-         err |= __get_user(tmp, &sc->seg);                             \
-         loadsegment(seg,tmp); }
-
-#define        FIX_EFLAGS      (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \
-                        X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
-                        X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
-
-       GET_SEG(gs);
-       GET_SEG(fs);
-       COPY_SEG(es);
-       COPY_SEG(ds);
-       COPY(edi);
-       COPY(esi);
-       COPY(ebp);
-       COPY(esp);
-       COPY(ebx);
-       COPY(edx);
-       COPY(ecx);
-       COPY(eip);
-       COPY_SEG_STRICT(cs);
-       COPY_SEG_STRICT(ss);
-       
-       {
-               unsigned int tmpflags;
-               err |= __get_user(tmpflags, &sc->eflags);
-               regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-               regs->orig_eax = -1;            /* disable syscall checks */
-       }
-
-       {
-               struct _fpstate __user * buf;
-               err |= __get_user(buf, &sc->fpstate);
-               if (buf) {
-                       if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
-                               goto badframe;
-                       err |= restore_i387(buf);
-               }
-       }
-
-       err |= __get_user(*peax, &sc->eax);
-       return err;
-
-badframe:
-       return 1;
-}
-
-asmlinkage int sys_sigreturn(unsigned long __unused)
-{
-       struct pt_regs *regs = (struct pt_regs *) &__unused;
-       struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
-       sigset_t set;
-       int eax;
-
-       if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__get_user(set.sig[0], &frame->sc.oldmask)
-           || (_NSIG_WORDS > 1
-               && __copy_from_user(&set.sig[1], &frame->extramask,
-                                   sizeof(frame->extramask))))
-               goto badframe;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-       
-       if (restore_sigcontext(regs, &frame->sc, &eax))
-               goto badframe;
-       return eax;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}      
-
-asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-{
-       struct pt_regs *regs = (struct pt_regs *) &__unused;
-       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
-       sigset_t set;
-       int eax;
-
-       if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-               goto badframe;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-       
-       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
-               goto badframe;
-
-       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
-               goto badframe;
-
-       return eax;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}      
-
-/*
- * Set up a signal frame.
- */
-
-static int
-setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
-                struct pt_regs *regs, unsigned long mask)
-{
-       int tmp, err = 0;
-
-       tmp = 0;
-       __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
-       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-       __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
-       err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
-
-       err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
-       err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
-       err |= __put_user(regs->edi, &sc->edi);
-       err |= __put_user(regs->esi, &sc->esi);
-       err |= __put_user(regs->ebp, &sc->ebp);
-       err |= __put_user(regs->esp, &sc->esp);
-       err |= __put_user(regs->ebx, &sc->ebx);
-       err |= __put_user(regs->edx, &sc->edx);
-       err |= __put_user(regs->ecx, &sc->ecx);
-       err |= __put_user(regs->eax, &sc->eax);
-       err |= __put_user(current->thread.trap_no, &sc->trapno);
-       err |= __put_user(current->thread.error_code, &sc->err);
-       err |= __put_user(regs->eip, &sc->eip);
-       err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
-       err |= __put_user(regs->eflags, &sc->eflags);
-       err |= __put_user(regs->esp, &sc->esp_at_signal);
-       err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
-
-       tmp = save_i387(fpstate);
-       if (tmp < 0)
-         err = 1;
-       else
-         err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
-
-       /* non-iBCS2 extensions.. */
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(current->thread.cr2, &sc->cr2);
-
-       return err;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
-{
-       unsigned long esp;
-
-       /* Default to using normal stack */
-       esp = regs->esp;
-
-       /* This is the X/Open sanctioned signal stack switching.  */
-       if (ka->sa.sa_flags & SA_ONSTACK) {
-               if (sas_ss_flags(esp) == 0)
-                       esp = current->sas_ss_sp + current->sas_ss_size;
-       }
-
-       /* This is the legacy signal stack switching. */
-       else if ((regs->xss & 0xffff) != __USER_DS &&
-                !(ka->sa.sa_flags & SA_RESTORER) &&
-                ka->sa.sa_restorer) {
-               esp = (unsigned long) ka->sa.sa_restorer;
-       }
-
-       return (void __user *)((esp - frame_size) & -8ul);
-}
-
-/* These symbols are defined with the addresses in the vsyscall page.
-   See vsyscall-sigreturn.S.  */
-extern void __user __kernel_sigreturn;
-extern void __user __kernel_rt_sigreturn;
-
-static void setup_frame(int sig, struct k_sigaction *ka,
-                       sigset_t *set, struct pt_regs * regs)
-{
-       void __user *restorer;
-       struct sigframe __user *frame;
-       int err = 0;
-
-       frame = get_sigframe(ka, regs, sizeof(*frame));
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto give_sigsegv;
-
-       err |= __put_user((current_thread_info()->exec_domain
-                          && current_thread_info()->exec_domain->signal_invmap
-                          && sig < 32
-                          ? current_thread_info()->exec_domain->signal_invmap[sig]
-                          : sig),
-                         &frame->sig);
-       if (err)
-               goto give_sigsegv;
-
-       err |= setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
-       if (err)
-               goto give_sigsegv;
-
-       if (_NSIG_WORDS > 1) {
-               err |= __copy_to_user(&frame->extramask, &set->sig[1],
-                                     sizeof(frame->extramask));
-       }
-       if (err)
-               goto give_sigsegv;
-
-       restorer = &__kernel_sigreturn;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-
-       /* Set up to return from userspace.  */
-       err |= __put_user(restorer, &frame->pretcode);
-        
-       /*
-        * This is popl %eax ; movl $,%eax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
-       err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
-       err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
-
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up registers for signal handler */
-       regs->esp = (unsigned long) frame;
-       regs->eip = (unsigned long) ka->sa.sa_handler;
-
-       set_fs(USER_DS);
-       regs->xds = __USER_DS;
-       regs->xes = __USER_DS;
-       regs->xss = __USER_DS;
-       regs->xcs = __USER_CS;
-       regs->eflags &= ~TF_MASK;
-
-#if DEBUG_SIG
-       printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-               current->comm, current->pid, frame, regs->eip, frame->pretcode);
-#endif
-
-       return;
-
-give_sigsegv:
-       if (sig == SIGSEGV)
-               ka->sa.sa_handler = SIG_DFL;
-       force_sig(SIGSEGV, current);
-}
-
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-                          sigset_t *set, struct pt_regs * regs)
-{
-       void __user *restorer;
-       struct rt_sigframe __user *frame;
-       int err = 0;
-
-       frame = get_sigframe(ka, regs, sizeof(*frame));
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto give_sigsegv;
-
-       err |= __put_user((current_thread_info()->exec_domain
-                          && current_thread_info()->exec_domain->signal_invmap
-                          && sig < 32
-                          ? current_thread_info()->exec_domain->signal_invmap[sig]
-                          : sig),
-                         &frame->sig);
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               goto give_sigsegv;
-
-       /* Create the ucontext.  */
-       err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->esp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
-                               regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up to return from userspace.  */
-       restorer = &__kernel_rt_sigreturn;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-       err |= __put_user(restorer, &frame->pretcode);
-        
-       /*
-        * This is movl $,%eax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
-       err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
-       err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
-
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up registers for signal handler */
-       regs->esp = (unsigned long) frame;
-       regs->eip = (unsigned long) ka->sa.sa_handler;
-
-       set_fs(USER_DS);
-       regs->xds = __USER_DS;
-       regs->xes = __USER_DS;
-       regs->xss = __USER_DS;
-       regs->xcs = __USER_CS;
-       regs->eflags &= ~TF_MASK;
-
-#if DEBUG_SIG
-       printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-               current->comm, current->pid, frame, regs->eip, frame->pretcode);
-#endif
-
-       return;
-
-give_sigsegv:
-       if (sig == SIGSEGV)
-               ka->sa.sa_handler = SIG_DFL;
-       force_sig(SIGSEGV, current);
-}
-
-/*
- * OK, we're invoking a handler
- */    
-
-static void
-handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
-       struct pt_regs * regs)
-{
-       struct k_sigaction *ka = &current->sighand->action[sig-1];
-
-       /* Are we from a system call? */
-       if (regs->orig_eax >= 0) {
-               /* If so, check system call restarting.. */
-               switch (regs->eax) {
-                       case -ERESTART_RESTARTBLOCK:
-                       case -ERESTARTNOHAND:
-                               regs->eax = -EINTR;
-                               break;
-
-                       case -ERESTARTSYS:
-                               if (!(ka->sa.sa_flags & SA_RESTART)) {
-                                       regs->eax = -EINTR;
-                                       break;
-                               }
-                       /* fallthrough */
-                       case -ERESTARTNOINTR:
-                               regs->eax = regs->orig_eax;
-                               regs->eip -= 2;
-               }
-       }
-
-       /* Set up the stack frame */
-       if (ka->sa.sa_flags & SA_SIGINFO)
-               setup_rt_frame(sig, ka, info, oldset, regs);
-       else
-               setup_frame(sig, ka, oldset, regs);
-
-       if (ka->sa.sa_flags & SA_ONESHOT)
-               ka->sa.sa_handler = SIG_DFL;
-
-       if (!(ka->sa.sa_flags & SA_NODEFER)) {
-               spin_lock_irq(&current->sighand->siglock);
-               sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-               sigaddset(&current->blocked,sig);
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-       }
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
-{
-       siginfo_t info;
-       int signr;
-
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if ((regs->xcs & 2) != 2)
-               return 1;
-
-       if (current->flags & PF_FREEZE) {
-               refrigerator(0);
-               goto no_signal;
-       }
-
-       if (!oldset)
-               oldset = &current->blocked;
-
-       signr = get_signal_to_deliver(&info, regs, NULL);
-       if (signr > 0) {
-               /* Reenable any watchpoints before delivering the
-                * signal to user space. The processor register will
-                * have been cleared if the watchpoint triggered
-                * inside the kernel.
-                */
-               if (current->thread.debugreg[7])
-                       HYPERVISOR_set_debugreg(7,
-                                               current->thread.debugreg[7]);
-
-               /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &info, oldset, regs);
-               return 1;
-       }
-
- no_signal:
-       /* Did we come from a system call? */
-       if (regs->orig_eax >= 0) {
-               /* Restart the system call - no handlers present */
-               if (regs->eax == -ERESTARTNOHAND ||
-                   regs->eax == -ERESTARTSYS ||
-                   regs->eax == -ERESTARTNOINTR) {
-                       regs->eax = regs->orig_eax;
-                       regs->eip -= 2;
-               }
-               if (regs->eax == -ERESTART_RESTARTBLOCK){
-                       regs->eax = __NR_restart_syscall;
-                       regs->eip -= 2;
-               }
-       }
-       return 0;
-}
-
-/*
- * notification of userspace execution resumption
- * - triggered by current->work.notify_resume
- */
-__attribute__((regparm(3)))
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-                     __u32 thread_info_flags)
-{
-       /* Pending single-step? */
-       if (thread_info_flags & _TIF_SINGLESTEP) {
-               regs->eflags |= TF_MASK;
-               clear_thread_flag(TIF_SINGLESTEP);
-       }
-       /* deal with pending signal delivery */
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs,oldset);
-       
-       clear_thread_flag(TIF_IRET);
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/sysenter.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/sysenter.c
deleted file mode 100644 (file)
index f8b056f..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * linux/arch/i386/kernel/sysenter.c
- *
- * (C) Copyright 2002 Linus Torvalds
- *
- * This file contains the needed initializations to support sysenter.
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/thread_info.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <linux/string.h>
-#include <linux/elf.h>
-
-#include <asm/cpufeature.h>
-#include <asm/msr.h>
-#include <asm/pgtable.h>
-#include <asm/unistd.h>
-
-extern asmlinkage void sysenter_entry(void);
-
-void enable_sep_cpu(void *info)
-{
-       int cpu = get_cpu();
-       struct tss_struct *tss = init_tss + cpu;
-
-       tss->ss1 = __KERNEL_CS;
-       tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
-       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
-       wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
-       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
-       put_cpu();      
-}
-
-/*
- * These symbols are defined by vsyscall.o to mark the bounds
- * of the ELF DSO images included therein.
- */
-extern const char vsyscall_int80_start, vsyscall_int80_end;
-extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
-
-static int __init sysenter_setup(void)
-{
-       unsigned long page = get_zeroed_page(GFP_ATOMIC);
-
-       __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
-
-       if (1 /* XXXcl not yet */ || !boot_cpu_has(X86_FEATURE_SEP)) {
-               memcpy((void *) page,
-                      &vsyscall_int80_start,
-                      &vsyscall_int80_end - &vsyscall_int80_start);
-               return 0;
-       }
-
-       memcpy((void *) page,
-              &vsyscall_sysenter_start,
-              &vsyscall_sysenter_end - &vsyscall_sysenter_start);
-
-       on_each_cpu(enable_sep_cpu, NULL, 1, 1);
-       return 0;
-}
-
-__initcall(sysenter_setup);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/time.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/time.c
deleted file mode 100644 (file)
index 15b68ba..0000000
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
- *  linux/arch/i386/kernel/time.c
- *
- *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
- *
- * This file contains the PC-specific time handling details:
- * reading the RTC at bootup, etc..
- * 1994-07-02    Alan Modra
- *     fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1995-03-26    Markus Kuhn
- *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
- *      precision CMOS clock update
- * 1996-05-03    Ingo Molnar
- *      fixed time warps in do_[slow|fast]_gettimeoffset()
- * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
- *             "A Kernel Model for Precision Timekeeping" by Dave Mills
- * 1998-09-05    (Various)
- *     More robust do_fast_gettimeoffset() algorithm implemented
- *     (works with APM, Cyrix 6x86MX and Centaur C6),
- *     monotonic gettimeofday() with fast_get_timeoffset(),
- *     drift-proof precision TSC calibration on boot
- *     (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
- *     Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
- *     ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
- * 1998-12-16    Andrea Arcangeli
- *     Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
- *     because was not accounting lost_ticks.
- * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
- *     Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- *     serialize accesses to xtime/lost_ticks).
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/sysdev.h>
-#include <linux/bcd.h>
-#include <linux/efi.h>
-#include <linux/sysctl.h>
-
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/irq.h>
-#include <asm/msr.h>
-#include <asm/delay.h>
-#include <asm/mpspec.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/timer.h>
-
-#include "mach_time.h"
-
-#include <linux/timex.h>
-#include <linux/config.h>
-
-#include <asm/hpet.h>
-
-#include <asm/arch_hooks.h>
-
-#include "io_ports.h"
-
-extern spinlock_t i8259A_lock;
-int pit_latch_buggy;              /* extern */
-
-#include "do_timer.h"
-
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
-unsigned long cpu_khz; /* Detected as we calibrate the TSC */
-
-extern unsigned long wall_jiffies;
-
-spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
-
-spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
-EXPORT_SYMBOL(i8253_lock);
-
-struct timer_opts *cur_timer = &timer_none;
-
-/* These are peridically updated in shared_info, and then copied here. */
-u32 shadow_tsc_stamp;
-u64 shadow_system_time;
-static u32 shadow_time_version;
-static struct timeval shadow_tv;
-extern u64 processed_system_time;
-
-/*
- * We use this to ensure that gettimeofday() is monotonically increasing. We
- * only break this guarantee if the wall clock jumps backwards "a long way".
- */
-static struct timeval last_seen_tv = {0,0};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-/* Periodically propagate synchronised time base to the RTC and to Xen. */
-static long last_rtc_update, last_update_to_xen;
-#endif
-
-/* Periodically take synchronised time base from Xen, if we need it. */
-static long last_update_from_xen;   /* UTC seconds when last read Xen clock. */
-
-/* Keep track of last time we did processing/updating of jiffies and xtime. */
-u64 processed_system_time;   /* System time (ns) at last processing. */
-
-#define NS_PER_TICK (1000000000ULL/HZ)
-
-#define HANDLE_USEC_UNDERFLOW(_tv) do {                \
-       while ((_tv).tv_usec < 0) {             \
-               (_tv).tv_usec += USEC_PER_SEC;  \
-               (_tv).tv_sec--;                 \
-       }                                       \
-} while (0)
-#define HANDLE_USEC_OVERFLOW(_tv) do {         \
-       while ((_tv).tv_usec >= USEC_PER_SEC) { \
-               (_tv).tv_usec -= USEC_PER_SEC;  \
-               (_tv).tv_sec++;                 \
-       }                                       \
-} while (0)
-static inline void __normalize_time(time_t *sec, s64 *nsec)
-{
-       while (*nsec >= NSEC_PER_SEC) {
-               (*nsec) -= NSEC_PER_SEC;
-               (*sec)++;
-       }
-       while (*nsec < 0) {
-               (*nsec) += NSEC_PER_SEC;
-               (*sec)--;
-       }
-}
-
-/* Does this guest OS track Xen time, or set its wall clock independently? */
-static int independent_wallclock = 0;
-static int __init __independent_wallclock(char *str)
-{
-       independent_wallclock = 1;
-       return 1;
-}
-__setup("independent_wallclock", __independent_wallclock);
-#define INDEPENDENT_WALLCLOCK() \
-    (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
-
-/*
- * Reads a consistent set of time-base values from Xen, into a shadow data
- * area. Must be called with the xtime_lock held for writing.
- */
-static void __get_time_values_from_xen(void)
-{
-       shared_info_t *s = HYPERVISOR_shared_info;
-
-       do {
-               shadow_time_version = s->time_version2;
-               rmb();
-               shadow_tv.tv_sec    = s->wc_sec;
-               shadow_tv.tv_usec   = s->wc_usec;
-               shadow_tsc_stamp    = (u32)s->tsc_timestamp;
-               shadow_system_time  = s->system_time;
-               rmb();
-       }
-       while (shadow_time_version != s->time_version1);
-
-       cur_timer->mark_offset();
-}
-
-#define TIME_VALUES_UP_TO_DATE \
- ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
-
-/*
- * This version of gettimeofday has microsecond resolution
- * and better than microsecond precision on fast x86 machines with TSC.
- */
-void do_gettimeofday(struct timeval *tv)
-{
-       unsigned long seq;
-       unsigned long usec, sec;
-       unsigned long max_ntp_tick;
-       unsigned long flags;
-       s64 nsec;
-
-       do {
-               unsigned long lost;
-
-               seq = read_seqbegin(&xtime_lock);
-
-               usec = cur_timer->get_offset();
-               lost = jiffies - wall_jiffies;
-
-               /*
-                * If time_adjust is negative then NTP is slowing the clock
-                * so make sure not to go into next possible interval.
-                * Better to lose some accuracy than have time go backwards..
-                */
-               if (unlikely(time_adjust < 0)) {
-                       max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
-                       usec = min(usec, max_ntp_tick);
-
-                       if (lost)
-                               usec += lost * max_ntp_tick;
-               }
-               else if (unlikely(lost))
-                       usec += lost * (USEC_PER_SEC / HZ);
-
-               sec = xtime.tv_sec;
-               usec += (xtime.tv_nsec / NSEC_PER_USEC);
-
-               nsec = shadow_system_time - processed_system_time;
-               __normalize_time(&sec, &nsec);
-               usec += (long)nsec / NSEC_PER_USEC;
-
-               if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
-                       /*
-                        * We may have blocked for a long time,
-                        * rendering our calculations invalid
-                        * (e.g. the time delta may have
-                        * overflowed). Detect that and recalculate
-                        * with fresh values.
-                        */
-                       write_seqlock_irqsave(&xtime_lock, flags);
-                       __get_time_values_from_xen();
-                       write_sequnlock_irqrestore(&xtime_lock, flags);
-                       continue;
-               }
-       } while (read_seqretry(&xtime_lock, seq));
-
-       while (usec >= USEC_PER_SEC) {
-               usec -= USEC_PER_SEC;
-               sec++;
-       }
-
-       /* Ensure that time-of-day is monotonically increasing. */
-       if ((sec < last_seen_tv.tv_sec) ||
-           ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
-               sec = last_seen_tv.tv_sec;
-               usec = last_seen_tv.tv_usec;
-       } else {
-               last_seen_tv.tv_sec = sec;
-               last_seen_tv.tv_usec = usec;
-       }
-
-       tv->tv_sec = sec;
-       tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
-       time_t wtm_sec, sec = tv->tv_sec;
-       long wtm_nsec;
-       s64 nsec;
-       struct timespec xentime;
-
-       if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-               return -EINVAL;
-
-       if (!INDEPENDENT_WALLCLOCK())
-               return 0; /* Silent failure? */
-
-       write_seqlock_irq(&xtime_lock);
-
-       /*
-        * Ensure we don't get blocked for a long time so that our time delta
-        * overflows. If that were to happen then our shadow time values would
-        * be stale, so we can retry with fresh ones.
-        */
- again:
-       nsec = tv->tv_nsec - cur_timer->get_offset() * NSEC_PER_USEC;
-       if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
-               __get_time_values_from_xen();
-               goto again;
-       }
-
-       __normalize_time(&sec, &nsec);
-       set_normalized_timespec(&xentime, sec, nsec);
-
-       /*
-        * This is revolting. We need to set "xtime" correctly. However, the
-        * value in this location is the value at the most recent update of
-        * wall time.  Discover what correction gettimeofday() would have
-        * made, and then undo it!
-        */
-       nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
-
-       nsec -= (shadow_system_time - processed_system_time);
-
-       __normalize_time(&sec, &nsec);
-       wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
-       wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
-       set_normalized_timespec(&xtime, sec, nsec);
-       set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
-       time_adjust = 0;                /* stop active adjtime() */
-       time_status |= STA_UNSYNC;
-       time_maxerror = NTP_PHASE_LIMIT;
-       time_esterror = NTP_PHASE_LIMIT;
-
-       /* Reset all our running time counts. They make no sense now. */
-       last_seen_tv.tv_sec = 0;
-       last_update_from_xen = 0;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       if (xen_start_info.flags & SIF_INITDOMAIN) {
-               dom0_op_t op;
-               last_rtc_update = last_update_to_xen = 0;
-               op.cmd = DOM0_SETTIME;
-               op.u.settime.secs        = xentime.tv_sec;
-               op.u.settime.usecs       = xentime.tv_nsec / NSEC_PER_USEC;
-               op.u.settime.system_time = shadow_system_time;
-               write_sequnlock_irq(&xtime_lock);
-               HYPERVISOR_dom0_op(&op);
-       } else
-#endif
-               write_sequnlock_irq(&xtime_lock);
-
-       clock_was_set();
-       return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static int set_rtc_mmss(unsigned long nowtime)
-{
-       int retval;
-
-       /* gets recalled with irq locally disabled */
-       spin_lock(&rtc_lock);
-       if (efi_enabled)
-               retval = efi_set_rtc_mmss(nowtime);
-       else
-               retval = mach_set_rtc_mmss(nowtime);
-       spin_unlock(&rtc_lock);
-
-       return retval;
-}
-#endif
-
-/* monotonic_clock(): returns # of nanoseconds passed since time_init()
- *             Note: This function is required to return accurate
- *             time even in the absence of multiple timer ticks.
- */
-unsigned long long monotonic_clock(void)
-{
-       return cur_timer->monotonic_clock();
-}
-EXPORT_SYMBOL(monotonic_clock);
-
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- */
-static inline void do_timer_interrupt(int irq, void *dev_id,
-                                       struct pt_regs *regs)
-{
-       time_t wtm_sec, sec;
-       s64 delta, nsec;
-       long sec_diff, wtm_nsec;
-
-       do {
-               __get_time_values_from_xen();
-
-               delta = (s64)(shadow_system_time +
-                             (cur_timer->get_offset() * NSEC_PER_USEC) -
-                             processed_system_time);
-       }
-       while (!TIME_VALUES_UP_TO_DATE);
-
-       if (unlikely(delta < 0)) {
-               printk("Timer ISR: Time went backwards: %lld %lld %ld %lld\n",
-                      delta, shadow_system_time,
-                      (cur_timer->get_offset() * NSEC_PER_USEC), 
-                      processed_system_time);
-               return;
-       }
-
-       /* Process elapsed jiffies since last call. */
-       while (delta >= NS_PER_TICK) {
-               delta -= NS_PER_TICK;
-               processed_system_time += NS_PER_TICK;
-               do_timer_interrupt_hook(regs);
-       }
-
-       /*
-        * Take synchronised time from Xen once a minute if we're not
-        * synchronised ourselves, and we haven't chosen to keep an independent
-        * time base.
-        */
-       if (!INDEPENDENT_WALLCLOCK() &&
-           ((time_status & STA_UNSYNC) != 0) &&
-           (xtime.tv_sec > (last_update_from_xen + 60))) {
-               /* Adjust shadow for jiffies that haven't updated xtime yet. */
-               shadow_tv.tv_usec -= 
-                       (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
-               HANDLE_USEC_UNDERFLOW(shadow_tv);
-
-               /*
-                * Reset our running time counts if they are invalidated by
-                * a warp backwards of more than 500ms.
-                */
-               sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
-               if (unlikely(abs(sec_diff) > 1) ||
-                   unlikely(((sec_diff * USEC_PER_SEC) +
-                             (xtime.tv_nsec / NSEC_PER_USEC) -
-                             shadow_tv.tv_usec) > 500000)) {
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-                       last_rtc_update = last_update_to_xen = 0;
-#endif
-                       last_seen_tv.tv_sec = 0;
-               }
-
-               /* Update our unsynchronised xtime appropriately. */
-               sec = shadow_tv.tv_sec;
-               nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
-
-               __normalize_time(&sec, &nsec);
-               wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
-               wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
-               set_normalized_timespec(&xtime, sec, nsec);
-               set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
-               last_update_from_xen = sec;
-       }
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       if (!(xen_start_info.flags & SIF_INITDOMAIN))
-               return;
-
-       /* Send synchronised time to Xen approximately every minute. */
-       if (((time_status & STA_UNSYNC) == 0) &&
-           (xtime.tv_sec > (last_update_to_xen + 60))) {
-               dom0_op_t op;
-               struct timeval tv;
-
-               tv.tv_sec   = xtime.tv_sec;
-               tv.tv_usec  = xtime.tv_nsec / NSEC_PER_USEC;
-               tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
-               HANDLE_USEC_OVERFLOW(tv);
-
-               op.cmd = DOM0_SETTIME;
-               op.u.settime.secs        = tv.tv_sec;
-               op.u.settime.usecs       = tv.tv_usec;
-               op.u.settime.system_time = shadow_system_time;
-               HYPERVISOR_dom0_op(&op);
-
-               last_update_to_xen = xtime.tv_sec;
-       }
-
-       /*
-        * If we have an externally synchronized Linux clock, then update
-        * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-        * called as close as possible to 500 ms before the new second starts.
-        */
-       if ((time_status & STA_UNSYNC) == 0 &&
-           xtime.tv_sec > last_rtc_update + 660 &&
-           (xtime.tv_nsec / 1000)
-                       >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
-           (xtime.tv_nsec / 1000)
-                       <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
-               /* horrible...FIXME */
-               if (efi_enabled) {
-                       if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
-                               last_rtc_update = xtime.tv_sec;
-                       else
-                               last_rtc_update = xtime.tv_sec - 600;
-               } else if (set_rtc_mmss(xtime.tv_sec) == 0)
-                       last_rtc_update = xtime.tv_sec;
-               else
-                       last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
-       }
-#endif
-}
-
-/*
- * This is the same as the above, except we _also_ save the current
- * Time Stamp Counter value at the time of the timer interrupt, so that
- * we later on can estimate the time of day more exactly.
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-       /*
-        * Here we are in the timer irq handler. We just have irqs locally
-        * disabled but we don't know if the timer_bh is running on the other
-        * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
-        * the irq version of write_lock because as just said we have irq
-        * locally disabled. -arca
-        */
-       write_seqlock(&xtime_lock);
-       do_timer_interrupt(irq, NULL, regs);
-       write_sequnlock(&xtime_lock);
-       return IRQ_HANDLED;
-}
-
-/* not static: needed by APM */
-unsigned long get_cmos_time(void)
-{
-       unsigned long retval;
-
-       spin_lock(&rtc_lock);
-
-       if (efi_enabled)
-               retval = efi_get_time();
-       else
-               retval = mach_get_cmos_time();
-
-       spin_unlock(&rtc_lock);
-
-       return retval;
-}
-
-static long clock_cmos_diff;
-
-static int __time_suspend(struct sys_device *dev, u32 state)
-{
-       /*
-        * Estimate time zone so that set_time can update the clock
-        */
-       clock_cmos_diff = -get_cmos_time();
-       clock_cmos_diff += get_seconds();
-       return 0;
-}
-
-static int __time_resume(struct sys_device *dev)
-{
-       unsigned long sec = get_cmos_time() + clock_cmos_diff;
-       write_seqlock_irq(&xtime_lock);
-       xtime.tv_sec = sec;
-       xtime.tv_nsec = 0;
-       write_sequnlock_irq(&xtime_lock);
-       return 0;
-}
-
-static struct sysdev_class pit_sysclass = {
-       .resume = __time_resume,
-       .suspend = __time_suspend,
-       set_kset_name("pit"),
-};
-
-
-/* XXX this driverfs stuff should probably go elsewhere later -john */
-static struct sys_device device_i8253 = {
-       .id     = 0,
-       .cls    = &pit_sysclass,
-};
-
-static int time_init_device(void)
-{
-       int error = sysdev_class_register(&pit_sysclass);
-       if (!error)
-               error = sysdev_register(&device_i8253);
-       return error;
-}
-
-device_initcall(time_init_device);
-
-#ifdef CONFIG_HPET_TIMER
-extern void (*late_time_init)(void);
-/* Duplicate of time_init() below, with hpet_enable part added */
-void __init hpet_time_init(void)
-{
-       xtime.tv_sec = get_cmos_time();
-       wall_to_monotonic.tv_sec = -xtime.tv_sec;
-       xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
-       wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
-
-       if (hpet_enable() >= 0) {
-               printk("Using HPET for base-timer\n");
-       }
-
-       cur_timer = select_timer();
-       printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-
-       time_init_hook();
-}
-#endif
-
-/* Dynamically-mapped IRQ. */
-static int time_irq;
-
-static struct irqaction irq_timer = {
-       timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
-       NULL, NULL
-};
-
-void __init time_init(void)
-{
-#ifdef CONFIG_HPET_TIMER
-       if (is_hpet_capable()) {
-               /*
-                * HPET initialization needs to do memory-mapped io. So, let
-                * us do a late initialization after mem_init().
-                */
-               late_time_init = hpet_time_init;
-               return;
-       }
-#endif
-       __get_time_values_from_xen();
-       xtime.tv_sec = shadow_tv.tv_sec;
-       wall_to_monotonic.tv_sec = -xtime.tv_sec;
-       xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
-       wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
-       processed_system_time = shadow_system_time;
-
-       cur_timer = select_timer();
-       printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-
-       time_irq = bind_virq_to_irq(VIRQ_TIMER);
-
-       (void)setup_irq(time_irq, &irq_timer);
-}
-
-/* Convert jiffies to system time. Call with xtime_lock held for reading. */
-static inline u64 __jiffies_to_st(unsigned long j) 
-{
-       return processed_system_time + ((j - jiffies) * NS_PER_TICK);
-}
-
-/*
- * This function works out when the the next timer function has to be
- * executed (by looking at the timer list) and sets the Xen one-shot
- * domain timer to the appropriate value. This is typically called in
- * cpu_idle() before the domain blocks.
- * 
- * The function returns a non-0 value on error conditions.
- * 
- * It must be called with interrupts disabled.
- */
-int set_timeout_timer(void)
-{
-       u64 alarm = 0;
-       int ret = 0;
-
-       /*
-        * This is safe against long blocking (since calculations are
-        * not based on TSC deltas). It is also safe against warped
-        * system time since suspend-resume is cooperative and we
-        * would first get locked out. It is safe against normal
-        * updates of jiffies since interrupts are off.
-        */
-       alarm = __jiffies_to_st(next_timer_interrupt());
-
-       /* Failure is pretty bad, but we'd best soldier on. */
-       if ( HYPERVISOR_set_timer_op(alarm) != 0 )
-               ret = -1;
-
-       return ret;
-}
-
-void time_suspend(void)
-{
-}
-
-void time_resume(void)
-{
-    unsigned long flags;
-    write_lock_irqsave(&xtime_lock, flags);
-    /* Get timebases for new environment. */ 
-    __get_time_values_from_xen();
-    /* Reset our own concept of passage of system time. */
-    processed_system_time = shadow_system_time;
-    /* Accept a warp in UTC (wall-clock) time. */
-    last_seen_tv.tv_sec = 0;
-    /* Make sure we resync UTC time with Xen on next timer interrupt. */
-    last_update_from_xen = 0;
-    write_unlock_irqrestore(&xtime_lock, flags);
-}
-
-/*
- * /proc/sys/xen: This really belongs in another file. It can stay here for
- * now however.
- */
-static ctl_table xen_subtable[] = {
-       {1, "independent_wallclock", &independent_wallclock,
-        sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
-       {0}
-};
-static ctl_table xen_table[] = {
-       {123, "xen", NULL, 0, 0555, xen_subtable},
-       {0}
-};
-static int __init xen_sysctl_init(void)
-{
-       (void)register_sysctl_table(xen_table, 0);
-       return 0;
-}
-__initcall(xen_sysctl_init);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/Makefile
deleted file mode 100644 (file)
index f85f7de..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for x86 timers
-#
-
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-obj-y :=       timer_tsc.o
-c-obj-y :=     timer.o timer_none.o timer_pit.o
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-       @ln -fsn $(srctree)/arch/i386/kernel/timers/$(notdir $@) $@
-
-obj-y  += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
deleted file mode 100644 (file)
index 7348d2a..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * This code largely moved from arch/i386/kernel/time.c.
- * See comments there for proper credits.
- */
-
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/cpufreq.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-
-#include <asm/timer.h>
-#include <asm/io.h>
-/* processor.h for distable_tsc flag */
-#include <asm/processor.h>
-
-#include "io_ports.h"
-#include "mach_timer.h"
-
-#include <asm/hpet.h>
-
-#ifdef CONFIG_HPET_TIMER
-static unsigned long hpet_usec_quotient;
-static unsigned long hpet_last;
-struct timer_opts timer_tsc;
-#endif
-
-static inline void cpufreq_delayed_get(void);
-
-int tsc_disable __initdata = 0;
-
-extern spinlock_t i8253_lock;
-
-static int use_tsc;
-
-static unsigned long long monotonic_base;
-static u32 monotonic_offset;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-/* convert from cycles(64bits) => nanoseconds (64bits)
- *  basic equation:
- *             ns = cycles / (freq / ns_per_sec)
- *             ns = cycles * (ns_per_sec / freq)
- *             ns = cycles * (10^9 / (cpu_mhz * 10^6))
- *             ns = cycles * (10^3 / cpu_mhz)
- *
- *     Then we use scaling math (suggested by george@mvista.com) to get:
- *             ns = cycles * (10^3 * SC / cpu_mhz) / SC
- *             ns = cycles * cyc2ns_scale / SC
- *
- *     And since SC is a constant power of two, we can convert the div
- *  into a shift.   
- *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-static unsigned long cyc2ns_scale; 
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
-{
-       cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
-       return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
-#if 0
-static int count2; /* counter for mark_offset_tsc() */
-#endif
-
-/* Cached *multiplier* to convert TSC counts to microseconds.
- * (see the equation below).
- * Equal to 2^32 * (1 / (clocks per usec) ).
- * Initialized in time_init.
- */
-static unsigned long fast_gettimeoffset_quotient;
-
-extern u32 shadow_tsc_stamp;
-extern u64 shadow_system_time;
-
-static unsigned long get_offset_tsc(void)
-{
-       register unsigned long eax, edx;
-
-       /* Read the Time Stamp Counter */
-
-       rdtsc(eax,edx);
-
-       /* .. relative to previous jiffy (32 bits is enough) */
-       eax -= shadow_tsc_stamp;
-
-       /*
-        * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
-        *             = (tsc_low delta) * (usecs_per_clock)
-        *             = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
-        *
-        * Using a mull instead of a divl saves up to 31 clock cycles
-        * in the critical path.
-        */
-
-       __asm__("mull %2"
-               :"=a" (eax), "=d" (edx)
-               :"rm" (fast_gettimeoffset_quotient),
-                "0" (eax));
-
-       /* our adjusted time offset in microseconds */
-       return edx;
-}
-
-static unsigned long long monotonic_clock_tsc(void)
-{
-       unsigned long long last_offset, this_offset, base;
-       unsigned seq;
-       
-       /* atomically read monotonic base & last_offset */
-       do {
-               seq = read_seqbegin(&monotonic_lock);
-               last_offset = monotonic_offset;
-               base = monotonic_base;
-       } while (read_seqretry(&monotonic_lock, seq));
-
-       /* Read the Time Stamp Counter */
-       rdtscll(this_offset);
-
-       /* return the value in ns */
-       return base + cycles_2_ns(this_offset - last_offset);
-}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- */
-unsigned long long sched_clock(void)
-{
-       unsigned long long this_offset;
-
-       /*
-        * In the NUMA case we dont use the TSC as they are not
-        * synchronized across all CPUs.
-        */
-#ifndef CONFIG_NUMA
-       if (!use_tsc)
-#endif
-               /* no locking but a rare wrong value is not a big deal */
-               return jiffies_64 * (1000000000 / HZ);
-
-       /* Read the Time Stamp Counter */
-       rdtscll(this_offset);
-
-       /* return the value in ns */
-       return cycles_2_ns(this_offset);
-}
-
-
-static void mark_offset_tsc(void)
-{
-
-       /* update the monotonic base value */
-       write_seqlock(&monotonic_lock);
-       monotonic_base = shadow_system_time;
-       monotonic_offset = shadow_tsc_stamp;
-       write_sequnlock(&monotonic_lock);
-}
-
-static void delay_tsc(unsigned long loops)
-{
-       unsigned long bclock, now;
-       
-       rdtscl(bclock);
-       do
-       {
-               rep_nop();
-               rdtscl(now);
-       } while ((now-bclock) < loops);
-}
-
-#ifdef CONFIG_HPET_TIMER
-static void mark_offset_tsc_hpet(void)
-{
-       unsigned long long this_offset, last_offset;
-       unsigned long offset, temp, hpet_current;
-
-       write_seqlock(&monotonic_lock);
-       last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
-       /*
-        * It is important that these two operations happen almost at
-        * the same time. We do the RDTSC stuff first, since it's
-        * faster. To avoid any inconsistencies, we need interrupts
-        * disabled locally.
-        */
-       /*
-        * Interrupts are just disabled locally since the timer irq
-        * has the SA_INTERRUPT flag set. -arca
-        */
-       /* read Pentium cycle counter */
-
-       hpet_current = hpet_readl(HPET_COUNTER);
-       rdtsc(last_tsc_low, last_tsc_high);
-
-       /* lost tick compensation */
-       offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
-       if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) {
-               int lost_ticks = (offset - hpet_last) / hpet_tick;
-               jiffies_64 += lost_ticks;
-       }
-       hpet_last = hpet_current;
-
-       /* update the monotonic base value */
-       this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
-       monotonic_base += cycles_2_ns(this_offset - last_offset);
-       write_sequnlock(&monotonic_lock);
-
-       /* calculate delay_at_last_interrupt */
-       /*
-        * Time offset = (hpet delta) * ( usecs per HPET clock )
-        *             = (hpet delta) * ( usecs per tick / HPET clocks per tick)
-        *             = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
-        * Where,
-        * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
-        */
-       delay_at_last_interrupt = hpet_current - offset;
-       ASM_MUL64_REG(temp, delay_at_last_interrupt,
-                       hpet_usec_quotient, delay_at_last_interrupt);
-}
-#endif
-
-
-#ifdef CONFIG_CPU_FREQ
-#include <linux/workqueue.h>
-
-static unsigned int cpufreq_delayed_issched = 0;
-static unsigned int cpufreq_init = 0;
-static struct work_struct cpufreq_delayed_get_work;
-
-static void handle_cpufreq_delayed_get(void *v)
-{
-       unsigned int cpu;
-       for_each_online_cpu(cpu) {
-               cpufreq_get(cpu);
-       }
-       cpufreq_delayed_issched = 0;
-}
-
-/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
- * to verify the CPU frequency the timing core thinks the CPU is running
- * at is still correct.
- */
-static inline void cpufreq_delayed_get(void) 
-{
-       if (cpufreq_init && !cpufreq_delayed_issched) {
-               cpufreq_delayed_issched = 1;
-               printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
-               schedule_work(&cpufreq_delayed_get_work);
-       }
-}
-
-/* If the CPU frequency is scaled, TSC-based delays will need a different
- * loops_per_jiffy value to function properly.
- */
-
-static unsigned int  ref_freq = 0;
-static unsigned long loops_per_jiffy_ref = 0;
-
-#ifndef CONFIG_SMP
-static unsigned long fast_gettimeoffset_ref = 0;
-static unsigned long cpu_khz_ref = 0;
-#endif
-
-static int
-time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
-                     void *data)
-{
-       struct cpufreq_freqs *freq = data;
-
-       write_seqlock_irq(&xtime_lock);
-       if (!ref_freq) {
-               ref_freq = freq->old;
-               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
-#ifndef CONFIG_SMP
-               fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
-               cpu_khz_ref = cpu_khz;
-#endif
-       }
-
-       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE)) {
-               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-                       cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
-#ifndef CONFIG_SMP
-               if (cpu_khz)
-                       cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
-               if (use_tsc) {
-                       if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
-                               fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
-                               set_cyc2ns_scale(cpu_khz/1000);
-                       }
-               }
-#endif
-       }
-       write_sequnlock_irq(&xtime_lock);
-
-       return 0;
-}
-
-static struct notifier_block time_cpufreq_notifier_block = {
-       .notifier_call  = time_cpufreq_notifier
-};
-
-
-static int __init cpufreq_tsc(void)
-{
-       int ret;
-       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
-       ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
-                                       CPUFREQ_TRANSITION_NOTIFIER);
-       if (!ret)
-               cpufreq_init = 1;
-       return ret;
-}
-core_initcall(cpufreq_tsc);
-
-#else /* CONFIG_CPU_FREQ */
-static inline void cpufreq_delayed_get(void) { return; }
-#endif 
-
-
-static int __init init_tsc(char* override)
-{
-       unsigned long long alarm;
-       u64 __cpu_khz;
-
-       __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
-       do_div(__cpu_khz, 1000);
-       cpu_khz = (u32)__cpu_khz;
-       printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n", 
-              cpu_khz / 1000, cpu_khz % 1000);
-
-       /* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz =
-          (2^32 * 1 / (clocks/us)) */
-       {       
-               unsigned long eax=0, edx=1000;
-               __asm__("divl %2"
-                   :"=a" (fast_gettimeoffset_quotient), "=d" (edx)
-                   :"r" (cpu_khz),
-                   "0" (eax), "1" (edx));
-       }
-
-       set_cyc2ns_scale(cpu_khz/1000);
-
-       rdtscll(alarm);
-
-       use_tsc=1;
-
-       return 0;
-}
-
-static int __init tsc_setup(char *str)
-{
-       printk(KERN_WARNING "notsc: cannot disable TSC in Xen/Linux.\n");
-       return 1;
-}
-__setup("notsc", tsc_setup);
-
-
-
-/************************************************************/
-
-/* tsc timer_opts struct */
-struct timer_opts timer_tsc = {
-       .name =         "tsc",
-       .init =         init_tsc,
-       .mark_offset =  mark_offset_tsc, 
-       .get_offset =   get_offset_tsc,
-       .monotonic_clock =      monotonic_clock_tsc,
-       .delay = delay_tsc,
-};
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/traps.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/traps.c
deleted file mode 100644 (file)
index 2f3529b..0000000
+++ /dev/null
@@ -1,1031 +0,0 @@
-/*
- *  linux/arch/i386/traps.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Pentium III FXSR, SSE support
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- */
-
-/*
- * 'Traps.c' handles hardware traps and faults after we have saved some
- * state in 'asm.s'.
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/highmem.h>
-#include <linux/kallsyms.h>
-#include <linux/ptrace.h>
-#include <linux/version.h>
-
-#ifdef CONFIG_EISA
-#include <linux/ioport.h>
-#include <linux/eisa.h>
-#endif
-
-#ifdef CONFIG_MCA
-#include <linux/mca.h>
-#endif
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/debugreg.h>
-#include <asm/desc.h>
-#include <asm/i387.h>
-#include <asm/nmi.h>
-
-#include <asm/smp.h>
-#include <asm/arch_hooks.h>
-
-#include <linux/irq.h>
-#include <linux/module.h>
-
-#include "mach_traps.h"
-
-asmlinkage int system_call(void);
-asmlinkage void lcall7(void);
-asmlinkage void lcall27(void);
-
-asmlinkage void safe_page_fault(void);
-
-/* Do we ignore FPU interrupts ? */
-char ignore_fpu_irq = 0;
-
-/*
- * The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.. We have a special link segment
- * for this.
- */
-struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-
-asmlinkage void divide_error(void);
-asmlinkage void debug(void);
-asmlinkage void nmi(void);
-asmlinkage void int3(void);
-asmlinkage void overflow(void);
-asmlinkage void bounds(void);
-asmlinkage void invalid_op(void);
-asmlinkage void device_not_available(void);
-asmlinkage void double_fault(void);
-asmlinkage void coprocessor_segment_overrun(void);
-asmlinkage void invalid_TSS(void);
-asmlinkage void segment_not_present(void);
-asmlinkage void stack_segment(void);
-asmlinkage void general_protection(void);
-asmlinkage void page_fault(void);
-asmlinkage void coprocessor_error(void);
-asmlinkage void simd_coprocessor_error(void);
-asmlinkage void alignment_check(void);
-asmlinkage void fixup_4gb_segment(void);
-asmlinkage void machine_check(void);
-
-static int kstack_depth_to_print = 24;
-
-static int valid_stack_ptr(struct task_struct *task, void *p)
-{
-       if (p <= (void *)task->thread_info)
-               return 0;
-       if (kstack_end(p))
-               return 0;
-       return 1;
-}
-
-#ifdef CONFIG_FRAME_POINTER
-static void print_context_stack(struct task_struct *task, unsigned long *stack,
-                        unsigned long ebp)
-{
-       unsigned long addr;
-
-       while (valid_stack_ptr(task, (void *)ebp)) {
-               addr = *(unsigned long *)(ebp + 4);
-               printk(" [<%08lx>] ", addr);
-               print_symbol("%s", addr);
-               printk("\n");
-               ebp = *(unsigned long *)ebp;
-       }
-}
-#else
-static void print_context_stack(struct task_struct *task, unsigned long *stack,
-                        unsigned long ebp)
-{
-       unsigned long addr;
-
-       while (!kstack_end(stack)) {
-               addr = *stack++;
-               if (__kernel_text_address(addr)) {
-                       printk(" [<%08lx>]", addr);
-                       print_symbol(" %s\n", addr);
-                       printk("\n");
-               }
-       }
-}
-#endif
-
-void show_trace(struct task_struct *task, unsigned long * stack)
-{
-       unsigned long ebp;
-
-       if (!task)
-               task = current;
-
-       if (!valid_stack_ptr(task, stack)) {
-               printk("Stack pointer is garbage, not printing trace\n");
-               return;
-       }
-
-       if (task == current) {
-               /* Grab ebp right from our regs */
-               asm ("movl %%ebp, %0" : "=r" (ebp) : );
-       } else {
-               /* ebp is the last reg pushed by switch_to */
-               ebp = *(unsigned long *) task->thread.esp;
-       }
-
-       while (1) {
-               struct thread_info *context;
-               context = (struct thread_info *)
-                       ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-               print_context_stack(task, stack, ebp);
-               stack = (unsigned long*)context->previous_esp;
-               if (!stack)
-                       break;
-               printk(" =======================\n");
-       }
-}
-
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
-       unsigned long *stack;
-       int i;
-
-       if (esp == NULL) {
-               if (task)
-                       esp = (unsigned long*)task->thread.esp;
-               else
-                       esp = (unsigned long *)&esp;
-       }
-
-       stack = esp;
-       for(i = 0; i < kstack_depth_to_print; i++) {
-               if (kstack_end(stack))
-                       break;
-               if (i && ((i % 8) == 0))
-                       printk("\n       ");
-               printk("%08lx ", *stack++);
-       }
-       printk("\nCall Trace:\n");
-       show_trace(task, esp);
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
-       unsigned long stack;
-
-       show_trace(current, &stack);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-void show_registers(struct pt_regs *regs)
-{
-       int i;
-       int in_kernel = 1;
-       unsigned long esp;
-       unsigned short ss;
-
-       esp = (unsigned long) (&regs->esp);
-       ss = __KERNEL_DS;
-       if (regs->xcs & 2) {
-               in_kernel = 0;
-               esp = regs->esp;
-               ss = regs->xss & 0xffff;
-       }
-       print_modules();
-       printk("CPU:    %d\nEIP:    %04x:[<%08lx>]    %s\nEFLAGS: %08lx"
-                       "   (%s) \n",
-               smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-               print_tainted(), regs->eflags, UTS_RELEASE);
-       print_symbol("EIP is at %s\n", regs->eip);
-       printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
-               regs->eax, regs->ebx, regs->ecx, regs->edx);
-       printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
-               regs->esi, regs->edi, regs->ebp, esp);
-       printk("ds: %04x   es: %04x   ss: %04x\n",
-               regs->xds & 0xffff, regs->xes & 0xffff, ss);
-       printk("Process %s (pid: %d, threadinfo=%p task=%p)",
-               current->comm, current->pid, current_thread_info(), current);
-       /*
-        * When in-kernel, we also print out the stack and code at the
-        * time of the fault..
-        */
-       if (in_kernel) {
-
-               printk("\nStack: ");
-               show_stack(NULL, (unsigned long*)esp);
-
-               printk("Code: ");
-               if(regs->eip < PAGE_OFFSET)
-                       goto bad;
-
-               for(i=0;i<20;i++)
-               {
-                       unsigned char c;
-                       if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
-bad:
-                               printk(" Bad EIP value.");
-                               break;
-                       }
-                       printk("%02x ", c);
-               }
-       }
-       printk("\n");
-}      
-
-static void handle_BUG(struct pt_regs *regs)
-{
-       unsigned short ud2;
-       unsigned short line;
-       char *file;
-       char c;
-       unsigned long eip;
-
-       if (regs->xcs & 2)
-               goto no_bug;            /* Not in kernel */
-
-       eip = regs->eip;
-
-       if (eip < PAGE_OFFSET)
-               goto no_bug;
-       if (__get_user(ud2, (unsigned short *)eip))
-               goto no_bug;
-       if (ud2 != 0x0b0f)
-               goto no_bug;
-       if (__get_user(line, (unsigned short *)(eip + 2)))
-               goto bug;
-       if (__get_user(file, (char **)(eip + 4)) ||
-               (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
-               file = "<bad filename>";
-
-       printk("------------[ cut here ]------------\n");
-       printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
-
-no_bug:
-       return;
-
-       /* Here we know it was a BUG but file-n-line is unavailable */
-bug:
-       printk("Kernel BUG\n");
-}
-
-spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
-       static int die_counter;
-       int nl = 0;
-
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       bust_spinlocks(1);
-       handle_BUG(regs);
-       printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
-       nl = 1;
-#endif
-#ifdef CONFIG_SMP
-       printk("SMP ");
-       nl = 1;
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC");
-       nl = 1;
-#endif
-       if (nl)
-               printk("\n");
-       show_registers(regs);
-       bust_spinlocks(0);
-       spin_unlock_irq(&die_lock);
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-
-       if (panic_on_oops) {
-               printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(5 * HZ);
-               panic("Fatal exception");
-       }
-       do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-{
-       if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
-               die(str, regs, err);
-}
-
-static inline unsigned long get_cr2(void)
-{
-       unsigned long address;
-
-       /* get the address */
-       __asm__("movl %%cr2,%0":"=r" (address));
-       return address;
-}
-
-static inline void do_trap(int trapnr, int signr, char *str, int vm86,
-                          struct pt_regs * regs, long error_code, siginfo_t *info)
-{
-       if (regs->eflags & VM_MASK) {
-               if (vm86)
-                       goto vm86_trap;
-               goto trap_signal;
-       }
-
-       if (!(regs->xcs & 2))
-               goto kernel_trap;
-
-       trap_signal: {
-               struct task_struct *tsk = current;
-               tsk->thread.error_code = error_code;
-               tsk->thread.trap_no = trapnr;
-               if (info)
-                       force_sig_info(signr, info, tsk);
-               else
-                       force_sig(signr, tsk);
-               return;
-       }
-
-       kernel_trap: {
-               if (!fixup_exception(regs))
-                       die(str, regs, error_code);
-               return;
-       }
-
-       vm86_trap: {
-               int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-               if (ret) goto trap_signal;
-               return;
-       }
-}
-
-#define DO_ERROR(trapnr, signr, str, name) \
-asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-       do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-}
-
-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-       siginfo_t info; \
-       info.si_signo = signr; \
-       info.si_errno = 0; \
-       info.si_code = sicode; \
-       info.si_addr = (void __user *)siaddr; \
-       do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-}
-
-#define DO_VM86_ERROR(trapnr, signr, str, name) \
-asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-       do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
-}
-
-#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-{ \
-       siginfo_t info; \
-       info.si_signo = signr; \
-       info.si_errno = 0; \
-       info.si_code = sicode; \
-       info.si_addr = (void __user *)siaddr; \
-       do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
-}
-
-DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
-DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
-DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
-DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-#ifdef CONFIG_X86_MCE
-DO_ERROR(18, SIGBUS, "machine check", machine_check)
-#endif
-
-asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
-{
-       /*
-        * If we trapped on an LDT access then ensure that the default_ldt is
-        * loaded, if nothing else. We load default_ldt lazily because LDT
-        * switching costs time and many applications don't need it.
-        */
-       if (unlikely((error_code & 6) == 4)) {
-               unsigned long ldt;
-               __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
-               if (ldt == 0) {
-                       mmu_update_t u;
-                       u.ptr = MMU_EXTENDED_COMMAND;
-                       u.ptr |= (unsigned long)&default_ldt[0];
-                       u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
-                       if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
-                               show_trace(NULL, (unsigned long *)&u);
-                               panic("Failed to install default LDT");
-                       }
-                       return;
-               }
-       }
-
-#if 0
-       if (regs->eflags & X86_EFLAGS_IF)
-               local_irq_enable();
-#endif
-       if (regs->eflags & VM_MASK)
-               goto gp_in_vm86;
-
-       if (!(regs->xcs & 2))
-               goto gp_in_kernel;
-
-       current->thread.error_code = error_code;
-       current->thread.trap_no = 13;
-       force_sig(SIGSEGV, current);
-       return;
-
-gp_in_vm86:
-       local_irq_enable();
-       handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-       return;
-
-gp_in_kernel:
-       if (!fixup_exception(regs))
-               die("general protection fault", regs, error_code);
-}
-
-static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
-{
-       printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
-       printk("You probably have a hardware problem with your RAM chips\n");
-
-       /* Clear and disable the memory parity error line. */
-       clear_mem_error(reason);
-}
-
-static void io_check_error(unsigned char reason, struct pt_regs * regs)
-{
-       unsigned long i;
-
-       printk("NMI: IOCK error (debug interrupt?)\n");
-       show_registers(regs);
-
-       /* Re-enable the IOCK line, wait for a few seconds */
-       reason = (reason & 0xf) | 8;
-       outb(reason, 0x61);
-       i = 2000;
-       while (--i) udelay(1000);
-       reason &= ~8;
-       outb(reason, 0x61);
-}
-
-static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-{
-#ifdef CONFIG_MCA
-       /* Might actually be able to figure out what the guilty party
-       * is. */
-       if( MCA_bus ) {
-               mca_handle_nmi();
-               return;
-       }
-#endif
-       printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-               reason, smp_processor_id());
-       printk("Dazed and confused, but trying to continue\n");
-       printk("Do you have a strange power saving mode enabled?\n");
-}
-
-static void default_do_nmi(struct pt_regs * regs)
-{
-       unsigned char reason = get_nmi_reason();
-       if (!(reason & 0xc0)) {
-#ifdef CONFIG_X86_LOCAL_APIC
-               /*
-                * Ok, so this is none of the documented NMI sources,
-                * so it must be the NMI watchdog.
-                */
-               if (nmi_watchdog) {
-                       nmi_watchdog_tick(regs);
-                       return;
-               }
-#endif
-               unknown_nmi_error(reason, regs);
-               return;
-       }
-       if (reason & 0x80)
-               mem_parity_error(reason, regs);
-       if (reason & 0x40)
-               io_check_error(reason, regs);
-       /*
-        * Reassert NMI in case it became active meanwhile
-        * as it's edge-triggered.
-        */
-       reassert_nmi();
-}
-
-static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
-{
-       return 0;
-}
-static nmi_callback_t nmi_callback = dummy_nmi_callback;
-asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
-{
-       int cpu;
-
-       nmi_enter();
-
-       cpu = smp_processor_id();
-       ++nmi_count(cpu);
-
-       if (!nmi_callback(regs, cpu))
-               default_do_nmi(regs);
-
-       nmi_exit();
-}
-
-void set_nmi_callback(nmi_callback_t callback)
-{
-       nmi_callback = callback;
-}
-
-void unset_nmi_callback(void)
-{
-       nmi_callback = dummy_nmi_callback;
-}
-
-/*
- * Our handling of the processor debug registers is non-trivial.
- * We do not clear them on entry and exit from the kernel. Therefore
- * it is possible to get a watchpoint trap here from inside the kernel.
- * However, the code in ./ptrace.c has ensured that the user can
- * only set watchpoints on userspace addresses. Therefore the in-kernel
- * watchpoint trap can only occur in code which is reading/writing
- * from user space. Such code must not hold kernel locks (since it
- * can equally take a page fault), therefore it is safe to call
- * force_sig_info even though that claims and releases locks.
- * 
- * Code in ./signal.c ensures that the debug control register
- * is restored before we deliver any signal, and therefore that
- * user code runs with the correct debug control register even though
- * we clear it here.
- *
- * Being careful here means that we don't have to be as careful in a
- * lot of more complicated places (task switching can be a bit lazy
- * about restoring all the debug state, and ptrace doesn't have to
- * find every occurrence of the TF bit that could be saved away even
- * by user code)
- */
-asmlinkage void do_debug(struct pt_regs * regs, long error_code)
-{
-       unsigned int condition;
-       struct task_struct *tsk = current;
-       siginfo_t info;
-
-       condition = HYPERVISOR_get_debugreg(6);
-
-#if 0
-       /* It's safe to allow irq's after DR6 has been saved */
-       if (regs->eflags & X86_EFLAGS_IF)
-               local_irq_enable();
-#endif
-
-       /* Mask out spurious debug traps due to lazy DR7 setting */
-       if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-               if (!tsk->thread.debugreg[7])
-                       goto clear_dr7;
-       }
-
-       if (regs->eflags & VM_MASK)
-               goto debug_vm86;
-
-       /* Save debug status register where ptrace can see it */
-       tsk->thread.debugreg[6] = condition;
-
-       /* Mask out spurious TF errors due to lazy TF clearing */
-       if (condition & DR_STEP) {
-               /*
-                * The TF error should be masked out only if the current
-                * process is not traced and if the TRAP flag has been set
-                * previously by a tracing process (condition detected by
-                * the PT_DTRACE flag); remember that the i386 TRAP flag
-                * can be modified by the process itself in user mode,
-                * allowing programs to debug themselves without the ptrace()
-                * interface.
-                */
-               if ((regs->xcs & 2) == 0)
-                       goto clear_TF_reenable;
-               if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
-                       goto clear_TF;
-       }
-
-       /* Ok, finally something we can handle */
-       tsk->thread.trap_no = 1;
-       tsk->thread.error_code = error_code;
-       info.si_signo = SIGTRAP;
-       info.si_errno = 0;
-       info.si_code = TRAP_BRKPT;
-       
-       /* If this is a kernel mode trap, save the user PC on entry to 
-        * the kernel, that's what the debugger can make sense of.
-        */
-       info.si_addr = ((regs->xcs & 2) == 0) ? (void __user *)tsk->thread.eip : 
-                                               (void __user *)regs->eip;
-       force_sig_info(SIGTRAP, &info, tsk);
-
-       /* Disable additional traps. They'll be re-enabled when
-        * the signal is delivered.
-        */
-clear_dr7:
-       HYPERVISOR_set_debugreg(7, 0);
-       return;
-
-debug_vm86:
-       handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
-       return;
-
-clear_TF_reenable:
-       set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-clear_TF:
-       regs->eflags &= ~TF_MASK;
-       return;
-}
-
-/*
- * Note that we play around with the 'TS' bit in an attempt to get
- * the correct behaviour even in the presence of the asynchronous
- * IRQ13 behaviour
- */
-void math_error(void __user *eip)
-{
-       struct task_struct * task;
-       siginfo_t info;
-       unsigned short cwd, swd;
-
-       /*
-        * Save the info for the exception handler and clear the error.
-        */
-       task = current;
-       save_init_fpu(task);
-       task->thread.trap_no = 16;
-       task->thread.error_code = 0;
-       info.si_signo = SIGFPE;
-       info.si_errno = 0;
-       info.si_code = __SI_FAULT;
-       info.si_addr = eip;
-       /*
-        * (~cwd & swd) will mask out exceptions that are not set to unmasked
-        * status.  0x3f is the exception bits in these regs, 0x200 is the
-        * C1 reg you need in case of a stack fault, 0x040 is the stack
-        * fault bit.  We should only be taking one exception at a time,
-        * so if this combination doesn't produce any single exception,
-        * then we have a bad program that isn't syncronizing its FPU usage
-        * and it will suffer the consequences since we won't be able to
-        * fully reproduce the context of the exception
-        */
-       cwd = get_fpu_cwd(task);
-       swd = get_fpu_swd(task);
-       switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
-               case 0x000:
-               default:
-                       break;
-               case 0x001: /* Invalid Op */
-               case 0x041: /* Stack Fault */
-               case 0x241: /* Stack Fault | Direction */
-                       info.si_code = FPE_FLTINV;
-                       /* Should we clear the SF or let user space do it ???? */
-                       break;
-               case 0x002: /* Denormalize */
-               case 0x010: /* Underflow */
-                       info.si_code = FPE_FLTUND;
-                       break;
-               case 0x004: /* Zero Divide */
-                       info.si_code = FPE_FLTDIV;
-                       break;
-               case 0x008: /* Overflow */
-                       info.si_code = FPE_FLTOVF;
-                       break;
-               case 0x020: /* Precision */
-                       info.si_code = FPE_FLTRES;
-                       break;
-       }
-       force_sig_info(SIGFPE, &info, task);
-}
-
-asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
-{
-       ignore_fpu_irq = 1;
-       math_error((void __user *)regs->eip);
-}
-
-void simd_math_error(void __user *eip)
-{
-       struct task_struct * task;
-       siginfo_t info;
-       unsigned short mxcsr;
-
-       /*
-        * Save the info for the exception handler and clear the error.
-        */
-       task = current;
-       save_init_fpu(task);
-       task->thread.trap_no = 19;
-       task->thread.error_code = 0;
-       info.si_signo = SIGFPE;
-       info.si_errno = 0;
-       info.si_code = __SI_FAULT;
-       info.si_addr = eip;
-       /*
-        * The SIMD FPU exceptions are handled a little differently, as there
-        * is only a single status/control register.  Thus, to determine which
-        * unmasked exception was caught we must mask the exception mask bits
-        * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-        */
-       mxcsr = get_fpu_mxcsr(task);
-       switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-               case 0x000:
-               default:
-                       break;
-               case 0x001: /* Invalid Op */
-                       info.si_code = FPE_FLTINV;
-                       break;
-               case 0x002: /* Denormalize */
-               case 0x010: /* Underflow */
-                       info.si_code = FPE_FLTUND;
-                       break;
-               case 0x004: /* Zero Divide */
-                       info.si_code = FPE_FLTDIV;
-                       break;
-               case 0x008: /* Overflow */
-                       info.si_code = FPE_FLTOVF;
-                       break;
-               case 0x020: /* Precision */
-                       info.si_code = FPE_FLTRES;
-                       break;
-       }
-       force_sig_info(SIGFPE, &info, task);
-}
-
-asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
-                                         long error_code)
-{
-       if (cpu_has_xmm) {
-               /* Handle SIMD FPU exceptions on PIII+ processors. */
-               ignore_fpu_irq = 1;
-               simd_math_error((void __user *)regs->eip);
-       } else {
-               /*
-                * Handle strange cache flush from user space exception
-                * in all other cases.  This is undocumented behaviour.
-                */
-               if (regs->eflags & VM_MASK) {
-                       handle_vm86_fault((struct kernel_vm86_regs *)regs,
-                                         error_code);
-                       return;
-               }
-               die_if_kernel("cache flush denied", regs, error_code);
-               current->thread.trap_no = 19;
-               current->thread.error_code = error_code;
-               force_sig(SIGSEGV, current);
-       }
-}
-
-/*
- *  'math_state_restore()' saves the current math information in the
- * old math state array, and gets the new ones from the current task
- *
- * Careful.. There are problems with IBM-designed IRQ13 behaviour.
- * Don't touch unless you *really* know how it works.
- *
- * Must be called with kernel preemption disabled (in this case,
- * local interrupts are disabled at the call-site in entry.S).
- */
-asmlinkage void math_state_restore(struct pt_regs regs)
-{
-       struct thread_info *thread = current_thread_info();
-       struct task_struct *tsk = thread->task;
-
-       /*
-        * A trap in kernel mode can be ignored. It'll be the fast XOR or
-        * copying libraries, which will correctly save/restore state and
-        * reset the TS bit in CR0.
-        */
-       if ((regs.xcs & 2) == 0)
-               return;
-
-       clts();         /* Allow maths ops (or we recurse) */
-       if (!tsk->used_math)
-               init_fpu(tsk);
-       restore_fpu(tsk);
-       thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
-}
-
-#ifndef CONFIG_MATH_EMULATION
-
-asmlinkage void math_emulate(long arg)
-{
-       printk("math-emulation not enabled and no coprocessor found.\n");
-       printk("killing %s.\n",current->comm);
-       force_sig(SIGFPE,current);
-       schedule();
-}
-
-#endif /* CONFIG_MATH_EMULATION */
-
-#ifdef CONFIG_X86_F00F_BUG
-void __init trap_init_f00f_bug(void)
-{
-       __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
-
-       /*
-        * Update the IDT descriptor and reload the IDT so that
-        * it uses the read-only mapped virtual address.
-        */
-       idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-       __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
-}
-#endif
-
-#define _set_gate(gate_addr,type,dpl,addr,seg) \
-do { \
-  int __d0, __d1; \
-  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
-       "movw %4,%%dx\n\t" \
-       "movl %%eax,%0\n\t" \
-       "movl %%edx,%1" \
-       :"=m" (*((long *) (gate_addr))), \
-        "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
-       :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
-        "3" ((char *) (addr)),"2" ((seg) << 16)); \
-} while (0)
-
-
-/*
- * This needs to use 'idt_table' rather than 'idt', and
- * thus use the _nonmapped_ version of the IDT, as the
- * Pentium F0 0F bugfix can have resulted in the mapped
- * IDT being write-protected.
- */
-void set_intr_gate(unsigned int n, void *addr)
-{
-       _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
-}
-
-#if 0
-static void __init set_trap_gate(unsigned int n, void *addr)
-{
-       _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
-}
-
-static void __init set_system_gate(unsigned int n, void *addr)
-{
-       _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
-}
-#endif
-
-static void __init set_call_gate(void *a, void *addr)
-{
-       _set_gate(a,12,3,addr,__KERNEL_CS);
-}
-
-#if 0
-static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
-{
-       _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
-}
-#endif
-
-
-/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
-static trap_info_t trap_table[] = {
-       {  0, 0, __KERNEL_CS, (unsigned long)divide_error               },
-       {  1, 0, __KERNEL_CS, (unsigned long)debug                      },
-       {  3, 3, __KERNEL_CS, (unsigned long)int3                       },
-       {  4, 3, __KERNEL_CS, (unsigned long)overflow                   },
-       {  5, 3, __KERNEL_CS, (unsigned long)bounds                     },
-       {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                 },
-       {  7, 0, __KERNEL_CS, (unsigned long)device_not_available       },
-       {  8, 0, __KERNEL_CS, (unsigned long)double_fault               },
-       {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-       { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                },
-       { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present        },
-       { 12, 0, __KERNEL_CS, (unsigned long)stack_segment              },
-       { 13, 0, __KERNEL_CS, (unsigned long)general_protection         },
-       { 14, 0, __KERNEL_CS, (unsigned long)page_fault                 },
-       { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment          },
-       { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error          },
-       { 17, 0, __KERNEL_CS, (unsigned long)alignment_check            },
-#ifdef CONFIG_X86_MCE
-       { 18, 0, __KERNEL_CS, (unsigned long)machine_check              },
-#endif
-       { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error     },
-       { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call    },
-       {  0, 0,           0, 0                                         }
-};
-
-void __init trap_init(void)
-{
-       HYPERVISOR_set_trap_table(trap_table);    
-        HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
-
-       /*
-        * default LDT is a single-entry callgate to lcall7 for iBCS
-        * and a callgate to lcall27 for Solaris/x86 binaries
-        */
-       clear_page(&default_ldt[0]);
-       set_call_gate(&default_ldt[0],lcall7);
-       set_call_gate(&default_ldt[4],lcall27);
-       __make_page_readonly(&default_ldt[0]);
-       xen_flush_page_update_queue();
-
-       /*
-        * Should be a barrier for any external CPU state.
-        */
-       cpu_init();
-}
-
-
-/*
- * install_safe_pf_handler / install_normal_pf_handler:
- * 
- * These are used within the failsafe_callback handler in entry.S to avoid
- * taking a full page fault when reloading FS and GS. This is because FS and 
- * GS could be invalid at pretty much any point while Xen Linux executes (we 
- * don't set them to safe values on entry to the kernel). At *any* point Xen 
- * may be entered due to a hardware interrupt --- on exit from Xen an invalid 
- * FS/GS will cause our failsafe_callback to be executed. This could occur, 
- * for example, while the mmmu_update_queue is in an inconsistent state. This
- * is disastrous because the normal page-fault handler touches the update
- * queue!
- * 
- * Fortunately, within the failsafe handler it is safe to force DS/ES/FS/GS
- * to zero if they cannot be reloaded -- at this point executing a normal
- * page fault would not change this effect. The safe page-fault handler
- * ensures this end result (blow away the selector value) without the dangers
- * of the normal page-fault handler.
- * 
- * NB. Perhaps this can all go away after we have implemented writable
- * page tables. :-)
- */
-
-asmlinkage void do_safe_page_fault(struct pt_regs *regs, 
-                                   unsigned long error_code,
-                                   unsigned long address)
-{
-       if (!fixup_exception(regs))
-               die("Unhandleable 'safe' page fault!", regs, error_code);
-}
-
-unsigned long install_safe_pf_handler(void)
-{
-       static trap_info_t safe_pf[] = { 
-               { 14, 0, __KERNEL_CS, (unsigned long)safe_page_fault },
-               {  0, 0,           0, 0                              }
-       };
-       unsigned long flags;
-       local_irq_save(flags);
-       HYPERVISOR_set_trap_table(safe_pf);
-       return flags; /* This is returned in %%eax */
-}
-
-__attribute__((regparm(3))) /* This function take its arg in %%eax */
-void install_normal_pf_handler(unsigned long flags)
-{
-       static trap_info_t normal_pf[] = { 
-               { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
-               {  0, 0,           0, 0                         }
-       };
-       HYPERVISOR_set_trap_table(normal_pf);
-       local_irq_restore(flags);
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
deleted file mode 100644 (file)
index 829f1c9..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-/* ld script to make i386 Linux kernel
- * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
- */
-
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/thread_info.h>
-
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(startup_32)
-jiffies = jiffies_64;
-SECTIONS
-{
-  . = 0xC0000000 + 0x100000;
-  /* read-only */
-  _text = .;                   /* Text and read-only data */
-  .text : {
-       *(.text)
-       SCHED_TEXT
-       *(.fixup)
-       *(.gnu.warning)
-       } = 0x9090
-
-  _etext = .;                  /* End of text section */
-
-  . = ALIGN(16);               /* Exception table */
-  __start___ex_table = .;
-  __ex_table : { *(__ex_table) }
-  __stop___ex_table = .;
-
-  RODATA
-
-  /* writeable */
-  .data : {                    /* Data */
-       *(.data)
-       CONSTRUCTORS
-       }
-
-  . = ALIGN(4096);
-  __nosave_begin = .;
-  .data_nosave : { *(.data.nosave) }
-  . = ALIGN(4096);
-  __nosave_end = .;
-
-  . = ALIGN(4096);
-  .data.page_aligned : { *(.data.idt) }
-
-  . = ALIGN(32);
-  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
-  _edata = .;                  /* End of data section */
-
-  . = ALIGN(THREAD_SIZE);      /* init_task */
-  .data.init_task : { *(.data.init_task) }
-
-  /* will be freed after init */
-  . = ALIGN(4096);             /* Init code and data */
-  __init_begin = .;
-  .init.text : { 
-       _sinittext = .;
-       *(.init.text)
-       _einittext = .;
-  }
-  .init.data : { *(.init.data) }
-  . = ALIGN(16);
-  __setup_start = .;
-  .init.setup : { *(.init.setup) }
-  __setup_end = .;
-  __start___param = .;
-  __param : { *(__param) }
-  __stop___param = .;
-  __initcall_start = .;
-  .initcall.init : {
-       *(.initcall1.init) 
-       *(.initcall2.init) 
-       *(.initcall3.init) 
-       *(.initcall4.init) 
-       *(.initcall5.init) 
-       *(.initcall6.init) 
-       *(.initcall7.init)
-  }
-  __initcall_end = .;
-  __con_initcall_start = .;
-  .con_initcall.init : { *(.con_initcall.init) }
-  __con_initcall_end = .;
-  SECURITY_INIT
-  . = ALIGN(4);
-  __alt_instructions = .;
-  .altinstructions : { *(.altinstructions) } 
-  __alt_instructions_end = .; 
- .altinstr_replacement : { *(.altinstr_replacement) } 
-  /* .exit.text is discard at runtime, not link time, to deal with references
-     from .altinstructions and .eh_frame */
-  .exit.text : { *(.exit.text) }
-  .exit.data : { *(.exit.data) }
-  . = ALIGN(4096);
-  __initramfs_start = .;
-  .init.ramfs : { *(.init.ramfs) }
-  __initramfs_end = .;
-  . = ALIGN(32);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
-  . = ALIGN(4096);
-  __init_end = .;
-  /* freed after init ends here */
-       
-  __bss_start = .;             /* BSS */
-  .bss : {
-       *(.bss.page_aligned)
-       *(.bss)
-  }
-  . = ALIGN(4);
-  __bss_stop = .; 
-
-  _end = . ;
-
-  /* This is where the kernel creates the early boot page tables */
-  . = ALIGN(4096);
-  pg0 = .;
-
-  /* Sections to be discarded */
-  /DISCARD/ : {
-       *(.exitcall.exit)
-       }
-
-  /* Stabs debugging sections.  */
-  .stab 0 : { *(.stab) }
-  .stabstr 0 : { *(.stabstr) }
-  .stab.excl 0 : { *(.stab.excl) }
-  .stab.exclstr 0 : { *(.stab.exclstr) }
-  .stab.index 0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment 0 : { *(.comment) }
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.S b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.S
deleted file mode 100644 (file)
index f2bcb7f..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#include <linux/init.h>
-
-__INITDATA
-
-       .globl vsyscall_int80_start, vsyscall_int80_end
-vsyscall_int80_start:
-       .incbin "arch/xen/i386/kernel/vsyscall-int80.so"
-vsyscall_int80_end:
-
-       .globl vsyscall_sysenter_start, vsyscall_sysenter_end
-vsyscall_sysenter_start:
-       .incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
-vsyscall_sysenter_end:
-
-__FINIT
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.lds b/linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
deleted file mode 100644 (file)
index befbdb9..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
- * object prelinked to its virtual address, and with only one read-only
- * segment (that fits in one page).  This script controls its layout.
- */
-
-/* This must match <asm/fixmap.h>.  */
-/* = FIXADDR_TOP - PAGE_SIZE
-   = HYPERVISOR_VIRT_START - 2 * PAGE_SIZE - PAGE_SIZE */
-VSYSCALL_BASE = 0xfbffd000;
-
-SECTIONS
-{
-  . = VSYSCALL_BASE + SIZEOF_HEADERS;
-
-  .hash           : { *(.hash) }               :text
-  .dynsym         : { *(.dynsym) }
-  .dynstr         : { *(.dynstr) }
-  .gnu.version    : { *(.gnu.version) }
-  .gnu.version_d  : { *(.gnu.version_d) }
-  .gnu.version_r  : { *(.gnu.version_r) }
-
-  /* This linker script is used both with -r and with -shared.
-     For the layouts to match, we need to skip more than enough
-     space for the dynamic symbol table et al.  If this amount
-     is insufficient, ld -shared will barf.  Just increase it here.  */
-  . = VSYSCALL_BASE + 0x400;
-
-  .text           : { *(.text) }               :text =0x90909090
-
-  .eh_frame_hdr   : { *(.eh_frame_hdr) }       :text :eh_frame_hdr
-  .eh_frame       : { KEEP (*(.eh_frame)) }    :text
-  .dynamic        : { *(.dynamic) }            :text :dynamic
-  .useless        : {
-       *(.got.plt) *(.got)
-       *(.data .data.* .gnu.linkonce.d.*)
-       *(.dynbss)
-       *(.bss .bss.* .gnu.linkonce.b.*)
-  }                                            :text
-}
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
-  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
-  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
-  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
-}
-
-/*
- * This controls what symbols we export from the DSO.
- */
-VERSION
-{
-  LINUX_2.5 {
-    global:
-       __kernel_vsyscall;
-       __kernel_sigreturn;
-       __kernel_rt_sigreturn;
-
-    local: *;
-  };
-}
-
-/* The ELF entry point can be used to set the AT_SYSINFO value.  */
-ENTRY(__kernel_vsyscall);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile
deleted file mode 100644 (file)
index 50c9750..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Makefile for the linux i386-specific parts of the memory manager.
-#
-
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-CFLAGS += -Iarch/$(XENARCH)/mm
-
-obj-y  := init.o fault.o ioremap.o pgtable.o hypervisor.o pageattr.o
-c-obj-y        := extable.o
-
-c-obj-$(CONFIG_DISCONTIGMEM)   += discontig.o
-c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
-c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-       @ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
-
-obj-y  += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/fault.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/fault.c
deleted file mode 100644 (file)
index 2c6a9ba..0000000
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- *  linux/arch/i386/mm/fault.c
- *
- *  Copyright (C) 1995  Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h>             /* For unblank_screen() */
-#include <linux/highmem.h>
-#include <linux/module.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/hardirq.h>
-#include <asm/desc.h>
-
-extern void die(const char *,struct pt_regs *,long);
-
-pgd_t *cur_pgd;                        /* XXXsmp */
-
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out 
- */
-void bust_spinlocks(int yes)
-{
-       int loglevel_save = console_loglevel;
-
-       if (yes) {
-               oops_in_progress = 1;
-               return;
-       }
-#ifdef CONFIG_VT
-       unblank_screen();
-#endif
-       oops_in_progress = 0;
-       /*
-        * OK, the message is on the console.  Now we call printk()
-        * without oops_in_progress set so that printk will give klogd
-        * a poke.  Hold onto your hats...
-        */
-       console_loglevel = 15;          /* NMI oopser may have shut the console up */
-       printk(" ");
-       console_loglevel = loglevel_save;
-}
-
-/*
- * Return EIP plus the CS segment base.  The segment limit is also
- * adjusted, clamped to the kernel/user address space (whichever is
- * appropriate), and returned in *eip_limit.
- *
- * The segment is checked, because it might have been changed by another
- * task between the original faulting instruction and here.
- *
- * If CS is no longer a valid code segment, or if EIP is beyond the
- * limit, or if it is a kernel address when CS is not a kernel segment,
- * then the returned value will be greater than *eip_limit.
- * 
- * This is slow, but is very rarely executed.
- */
-static inline unsigned long get_segment_eip(struct pt_regs *regs,
-                                           unsigned long *eip_limit)
-{
-       unsigned long eip = regs->eip;
-       unsigned seg = regs->xcs & 0xffff;
-       u32 seg_ar, seg_limit, base, *desc;
-
-       /* The standard kernel/user address space limit. */
-       *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
-
-       /* Unlikely, but must come before segment checks. */
-       if (unlikely((regs->eflags & VM_MASK) != 0))
-               return eip + (seg << 4);
-       
-       /* By far the most common cases. */
-       if (likely(seg == __USER_CS || seg == __KERNEL_CS))
-               return eip;
-
-       /* Check the segment exists, is within the current LDT/GDT size,
-          that kernel/user (ring 0..3) has the appropriate privilege,
-          that it's a code segment, and get the limit. */
-       __asm__ ("larl %3,%0; lsll %3,%1"
-                : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
-       if ((~seg_ar & 0x9800) || eip > seg_limit) {
-               *eip_limit = 0;
-               return 1;        /* So that returned eip > *eip_limit. */
-       }
-
-       /* Get the GDT/LDT descriptor base. 
-          When you look for races in this code remember that
-          LDT and other horrors are only used in user space. */
-       if (seg & (1<<2)) {
-               /* Must lock the LDT while reading it. */
-               down(&current->mm->context.sem);
-               desc = current->mm->context.ldt;
-               desc = (void *)desc + (seg & ~7);
-       } else {
-               /* Must disable preemption while reading the GDT. */
-               desc = (u32 *)get_cpu_gdt_table(get_cpu());
-               desc = (void *)desc + (seg & ~7);
-       }
-
-       /* Decode the code segment base from the descriptor */
-       base =   (desc[0] >> 16) |
-               ((desc[1] & 0xff) << 16) |
-                (desc[1] & 0xff000000);
-
-       if (seg & (1<<2)) { 
-               up(&current->mm->context.sem);
-       } else
-               put_cpu();
-
-       /* Adjust EIP and segment limit, and clamp at the kernel limit.
-          It's legitimate for segments to wrap at 0xffffffff. */
-       seg_limit += base;
-       if (seg_limit < *eip_limit && seg_limit >= base)
-               *eip_limit = seg_limit;
-       return eip + base;
-}
-
-/* 
- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
- * Check that here and ignore it.
- */
-static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-{ 
-       unsigned long limit;
-       unsigned long instr = get_segment_eip (regs, &limit);
-       int scan_more = 1;
-       int prefetch = 0; 
-       int i;
-
-       for (i = 0; scan_more && i < 15; i++) { 
-               unsigned char opcode;
-               unsigned char instr_hi;
-               unsigned char instr_lo;
-
-               if (instr > limit)
-                       break;
-               if (__get_user(opcode, (unsigned char *) instr))
-                       break; 
-
-               instr_hi = opcode & 0xf0; 
-               instr_lo = opcode & 0x0f; 
-               instr++;
-
-               switch (instr_hi) { 
-               case 0x20:
-               case 0x30:
-                       /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
-                       scan_more = ((instr_lo & 7) == 0x6);
-                       break;
-                       
-               case 0x60:
-                       /* 0x64 thru 0x67 are valid prefixes in all modes. */
-                       scan_more = (instr_lo & 0xC) == 0x4;
-                       break;          
-               case 0xF0:
-                       /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
-                       scan_more = !instr_lo || (instr_lo>>1) == 1;
-                       break;                  
-               case 0x00:
-                       /* Prefetch instruction is 0x0F0D or 0x0F18 */
-                       scan_more = 0;
-                       if (instr > limit)
-                               break;
-                       if (__get_user(opcode, (unsigned char *) instr)) 
-                               break;
-                       prefetch = (instr_lo == 0xF) &&
-                               (opcode == 0x0D || opcode == 0x18);
-                       break;                  
-               default:
-                       scan_more = 0;
-                       break;
-               } 
-       }
-       return prefetch;
-}
-
-static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-                             unsigned long error_code)
-{
-       if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-                    boot_cpu_data.x86 >= 6)) {
-               /* Catch an obscure case of prefetch inside an NX page. */
-               if (nx_enabled && (error_code & 16))
-                       return 0;
-               return __is_prefetch(regs, addr);
-       }
-       return 0;
-} 
-
-asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
-
-/*
- * This routine handles page faults.  It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * error_code:
- *     bit 0 == 0 means no page found, 1 means protection fault
- *     bit 1 == 0 means read, 1 means write
- *     bit 2 == 0 means kernel, 1 means user-mode
- */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
-                             unsigned long address)
-{
-       struct task_struct *tsk;
-       struct mm_struct *mm;
-       struct vm_area_struct * vma;
-       unsigned long page;
-       int write;
-       siginfo_t info;
-
-       /* Set the "privileged fault" bit to something sane. */
-       error_code &= 3;
-       error_code |= (regs->xcs & 2) << 1;
-
-#if 0
-       /* It's safe to allow irq's after cr2 has been saved */
-       if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-               local_irq_enable();
-#endif
-
-       tsk = current;
-
-       info.si_code = SEGV_MAPERR;
-
-       /*
-        * We fault-in kernel-space virtual memory on-demand. The
-        * 'reference' page table is init_mm.pgd.
-        *
-        * NOTE! We MUST NOT take any locks for this case. We may
-        * be in an interrupt or a critical region, and should
-        * only copy the information from the master page table,
-        * nothing more.
-        *
-        * This verifies that the fault happens in kernel space
-        * (error_code & 4) == 0, and that the fault was not a
-        * protection error (error_code & 1) == 0.
-        */
-       if (unlikely(address >= TASK_SIZE)) { 
-               if (!(error_code & 5))
-                       goto vmalloc_fault;
-               /* 
-                * Don't take the mm semaphore here. If we fixup a prefetch
-                * fault we could otherwise deadlock.
-                */
-               goto bad_area_nosemaphore;
-       } 
-
-       mm = tsk->mm;
-
-       /*
-        * If we're in an interrupt, have no user context or are running in an
-        * atomic region then we must not take the fault..
-        */
-       if (in_atomic() || !mm)
-               goto bad_area_nosemaphore;
-
-       /* When running in the kernel we expect faults to occur only to
-        * addresses in user space.  All other faults represent errors in the
-        * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-        * erroneous fault occuring in a code path which already holds mmap_sem
-        * we will deadlock attempting to validate the fault against the
-        * address space.  Luckily the kernel only validly references user
-        * space from well defined areas of code, which are listed in the
-        * exceptions table.
-        *
-        * As the vast majority of faults will be valid we will only perform
-        * the source reference check when there is a possibilty of a deadlock.
-        * Attempt to lock the address space, if we cannot we then validate the
-        * source.  If this is invalid we can skip the address space check,
-        * thus avoiding the deadlock.
-        */
-       if (!down_read_trylock(&mm->mmap_sem)) {
-               if ((error_code & 4) == 0 &&
-                   !search_exception_tables(regs->eip))
-                       goto bad_area_nosemaphore;
-               down_read(&mm->mmap_sem);
-       }
-
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (error_code & 4) {
-               /*
-                * accessing the stack below %esp is always a bug.
-                * The "+ 32" is there due to some instructions (like
-                * pusha) doing post-decrement on the stack and that
-                * doesn't show up until later..
-                */
-               if (address + 32 < regs->esp)
-                       goto bad_area;
-       }
-       if (expand_stack(vma, address))
-               goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
-       info.si_code = SEGV_ACCERR;
-       write = 0;
-       switch (error_code & 3) {
-               default:        /* 3: write, present */
-#ifdef TEST_VERIFY_AREA
-                       if (regs->cs == KERNEL_CS)
-                               printk("WP fault at %08lx\n", regs->eip);
-#endif
-                       /* fall through */
-               case 2:         /* write, not present */
-                       if (!(vma->vm_flags & VM_WRITE))
-                               goto bad_area;
-                       write++;
-                       break;
-               case 1:         /* read, present */
-                       goto bad_area;
-               case 0:         /* read, not present */
-                       if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-                               goto bad_area;
-       }
-
- survive:
-       /*
-        * If for any reason at all we couldn't handle the fault,
-        * make sure we exit gracefully rather than endlessly redo
-        * the fault.
-        */
-       switch (handle_mm_fault(mm, vma, address, write)) {
-               case VM_FAULT_MINOR:
-                       tsk->min_flt++;
-                       break;
-               case VM_FAULT_MAJOR:
-                       tsk->maj_flt++;
-                       break;
-               case VM_FAULT_SIGBUS:
-                       goto do_sigbus;
-               case VM_FAULT_OOM:
-                       goto out_of_memory;
-               default:
-                       BUG();
-       }
-
-       /*
-        * Did it hit the DOS screen memory VA from vm86 mode?
-        */
-       if (regs->eflags & VM_MASK) {
-               unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-               if (bit < 32)
-                       tsk->thread.screen_bitmap |= 1 << bit;
-       }
-       up_read(&mm->mmap_sem);
-       return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
-       up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
-       /* User mode accesses just cause a SIGSEGV */
-       if (error_code & 4) {
-               /* 
-                * Valid to do another page fault here because this one came 
-                * from user space.
-                */
-               if (is_prefetch(regs, address, error_code))
-                       return;
-
-               tsk->thread.cr2 = address;
-               /* Kernel addresses are always protection faults */
-               tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-               tsk->thread.trap_no = 14;
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               /* info.si_code has been set above */
-               info.si_addr = (void __user *)address;
-               force_sig_info(SIGSEGV, &info, tsk);
-               return;
-       }
-
-#ifdef CONFIG_X86_F00F_BUG
-       /*
-        * Pentium F0 0F C7 C8 bug workaround.
-        */
-       if (boot_cpu_data.f00f_bug) {
-               unsigned long nr;
-               
-               nr = (address - idt_descr.address) >> 3;
-
-               if (nr == 6) {
-                       do_invalid_op(regs, 0);
-                       return;
-               }
-       }
-#endif
-
-no_context:
-       /* Are we prepared to handle this kernel fault?  */
-       if (fixup_exception(regs))
-               return;
-
-       /* 
-        * Valid to do another page fault here, because if this fault
-        * had been triggered by is_prefetch fixup_exception would have 
-        * handled it.
-        */
-       if (is_prefetch(regs, address, error_code))
-               return;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-
-       bust_spinlocks(1);
-
-#ifdef CONFIG_X86_PAE
-       if (error_code & 16) {
-               pte_t *pte = lookup_address(address);
-
-               if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
-                       printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
-       }
-#endif
-       if (address < PAGE_SIZE)
-               printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-       else
-               printk(KERN_ALERT "Unable to handle kernel paging request");
-       printk(" at virtual address %08lx\n",address);
-       printk(KERN_ALERT " printing eip:\n");
-       printk("%08lx\n", regs->eip);
-       page = ((unsigned long *) cur_pgd)[address >> 22];
-       printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page, machine_to_phys(page));
-       /*
-        * We must not directly access the pte in the highpte
-        * case, the page table might be allocated in highmem.
-        * And lets rather not kmap-atomic the pte, just in case
-        * it's allocated already.
-        */
-#ifndef CONFIG_HIGHPTE
-       if (page & 1) {
-               page &= PAGE_MASK;
-               address &= 0x003ff000;
-               page = machine_to_phys(page);
-               page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-               printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page, machine_to_phys(page));
-       }
-#endif
-       show_trace(NULL, (unsigned long *)&regs[1]);
-       die("Oops", regs, error_code);
-       bust_spinlocks(0);
-       do_exit(SIGKILL);
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
-       up_read(&mm->mmap_sem);
-       if (tsk->pid == 1) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", tsk->comm);
-       if (error_code & 4)
-               do_exit(SIGKILL);
-       goto no_context;
-
-do_sigbus:
-       up_read(&mm->mmap_sem);
-
-       /* Kernel mode? Handle exceptions or die */
-       if (!(error_code & 4))
-               goto no_context;
-
-       /* User space => ok to do another page fault */
-       if (is_prefetch(regs, address, error_code))
-               return;
-
-       tsk->thread.cr2 = address;
-       tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = 14;
-       info.si_signo = SIGBUS;
-       info.si_errno = 0;
-       info.si_code = BUS_ADRERR;
-       info.si_addr = (void __user *)address;
-       force_sig_info(SIGBUS, &info, tsk);
-       return;
-
-vmalloc_fault:
-       {
-               /*
-                * Synchronize this task's top level page-table
-                * with the 'reference' page table.
-                *
-                * Do _not_ use "tsk" here. We might be inside
-                * an interrupt in the middle of a task switch..
-                */
-               int index = pgd_index(address);
-               pgd_t *pgd, *pgd_k;
-               pmd_t *pmd, *pmd_k;
-               pte_t *pte_k;
-
-               pgd = index + cur_pgd;
-               pgd_k = init_mm.pgd + index;
-
-               if (!pgd_present(*pgd_k))
-                       goto no_context;
-
-               /*
-                * set_pgd(pgd, *pgd_k); here would be useless on PAE
-                * and redundant with the set_pmd() on non-PAE.
-                */
-
-               pmd = pmd_offset(pgd, address);
-               pmd_k = pmd_offset(pgd_k, address);
-               if (!pmd_present(*pmd_k))
-                       goto no_context;
-               set_pmd(pmd, *pmd_k);
-               xen_flush_page_update_queue(); /* flush PMD update */
-
-               pte_k = pte_offset_kernel(pmd_k, address);
-               if (!pte_present(*pte_k))
-                       goto no_context;
-               return;
-       }
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/highmem.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/highmem.c
deleted file mode 100644 (file)
index 6b6fd16..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-#include <linux/highmem.h>
-
-void *kmap(struct page *page)
-{
-       might_sleep();
-       if (page < highmem_start_page)
-               return page_address(page);
-       return kmap_high(page);
-}
-
-void kunmap(struct page *page)
-{
-       if (in_interrupt())
-               BUG();
-       if (page < highmem_start_page)
-               return;
-       kunmap_high(page);
-}
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-void *kmap_atomic(struct page *page, enum km_type type)
-{
-       enum fixed_addresses idx;
-       unsigned long vaddr;
-
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       inc_preempt_count();
-       if (page < highmem_start_page)
-               return page_address(page);
-
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
-       if (!pte_none(*(kmap_pte-idx)))
-               BUG();
-#endif
-       set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
-       __flush_tlb_one(vaddr);
-
-       return (void*) vaddr;
-}
-
-/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection */
-void *kmap_atomic_pte(struct page *page, enum km_type type)
-{
-       enum fixed_addresses idx;
-       unsigned long vaddr;
-
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       inc_preempt_count();
-       if (page < highmem_start_page)
-               return page_address(page);
-
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
-       if (!pte_none(*(kmap_pte-idx)))
-               BUG();
-#endif
-       set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL_RO));
-       __flush_tlb_one(vaddr);
-
-       return (void*) vaddr;
-}
-
-void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
-       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-       if (vaddr < FIXADDR_START) { // FIXME
-               dec_preempt_count();
-               preempt_check_resched();
-               return;
-       }
-
-       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-               BUG();
-
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(kmap_pte-idx);
-       __flush_tlb_one(vaddr);
-#endif
-
-       dec_preempt_count();
-       preempt_check_resched();
-}
-
-void kunmap_atomic_force(void *kvaddr, enum km_type type)
-{
-       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-       if (vaddr < FIXADDR_START) { // FIXME
-               dec_preempt_count();
-               preempt_check_resched();
-               return;
-       }
-
-       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-               BUG();
-
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(kmap_pte-idx);
-       __flush_tlb_one(vaddr);
-
-       dec_preempt_count();
-       preempt_check_resched();
-}
-
-struct page *kmap_atomic_to_page(void *ptr)
-{
-       unsigned long idx, vaddr = (unsigned long)ptr;
-       pte_t *pte;
-
-       if (vaddr < FIXADDR_START)
-               return virt_to_page(ptr);
-
-       idx = virt_to_fix(vaddr);
-       pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-       return pte_page(*pte);
-}
-
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c
deleted file mode 100644 (file)
index c6078aa..0000000
+++ /dev/null
@@ -1,510 +0,0 @@
-/******************************************************************************
- * mm/hypervisor.c
- * 
- * Update page tables via the hypervisor.
- * 
- * Copyright (c) 2002-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/multicall.h>
-
-/*
- * This suffices to protect us if we ever move to SMP domains.
- * Further, it protects us against interrupts. At the very least, this is
- * required for the network driver which flushes the update queue before
- * pushing new receive buffers.
- */
-static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
-
-/* Linux 2.6 isn't using the traditional batched interface. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define QUEUE_SIZE 2048
-#define pte_offset_kernel pte_offset
-#else
-#define QUEUE_SIZE 128
-#endif
-
-static mmu_update_t update_queue[QUEUE_SIZE];
-unsigned int mmu_update_queue_idx = 0;
-#define idx mmu_update_queue_idx
-
-#if MMU_UPDATE_DEBUG > 0
-page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}};
-#undef queue_l1_entry_update
-#undef queue_l2_entry_update
-#endif
-#if MMU_UPDATE_DEBUG > 3
-static void DEBUG_allow_pt_reads(void)
-{
-    pte_t *pte;
-    mmu_update_t update;
-    int i;
-    for ( i = idx-1; i >= 0; i-- )
-    {
-        pte = update_debug_queue[i].ptep;
-        if ( pte == NULL ) continue;
-        update_debug_queue[i].ptep = NULL;
-        update.ptr = virt_to_machine(pte);
-        update.val = update_debug_queue[i].pteval;
-        HYPERVISOR_mmu_update(&update, 1, NULL);
-    }
-}
-static void DEBUG_disallow_pt_read(unsigned long va)
-{
-    pte_t *pte;
-    pmd_t *pmd;
-    pgd_t *pgd;
-    unsigned long pteval;
-    /*
-     * We may fault because of an already outstanding update.
-     * That's okay -- it'll get fixed up in the fault handler.
-     */
-    mmu_update_t update;
-    pgd = pgd_offset_k(va);
-    pmd = pmd_offset(pgd, va);
-    pte = pte_offset_kernel(pmd, va); /* XXXcl */
-    update.ptr = virt_to_machine(pte);
-    pteval = *(unsigned long *)pte;
-    update.val = pteval & ~_PAGE_PRESENT;
-    HYPERVISOR_mmu_update(&update, 1, NULL);
-    update_debug_queue[idx].ptep = pte;
-    update_debug_queue[idx].pteval = pteval;
-}
-#endif
-
-#if MMU_UPDATE_DEBUG > 1
-#undef queue_pt_switch
-#undef queue_tlb_flush
-#undef queue_invlpg
-#undef queue_pgd_pin
-#undef queue_pgd_unpin
-#undef queue_pte_pin
-#undef queue_pte_unpin
-#undef queue_set_ldt
-#endif
-
-
-/*
- * MULTICALL_flush_page_update_queue:
- *   This is a version of the flush which queues as part of a multicall.
- */
-void MULTICALL_flush_page_update_queue(void)
-{
-    unsigned long flags;
-    unsigned int _idx;
-    spin_lock_irqsave(&update_lock, flags);
-    if ( (_idx = idx) != 0 ) 
-    {
-#if MMU_UPDATE_DEBUG > 1
-           if (idx > 1)
-        printk("Flushing %d entries from pt update queue\n", idx);
-#endif
-#if MMU_UPDATE_DEBUG > 3
-        DEBUG_allow_pt_reads();
-#endif
-        idx = 0;
-        wmb(); /* Make sure index is cleared first to avoid double updates. */
-        queue_multicall3(__HYPERVISOR_mmu_update, 
-                         (unsigned long)update_queue, 
-                         (unsigned long)_idx, 
-                         (unsigned long)NULL);
-    }
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void __flush_page_update_queue(void)
-{
-    unsigned int _idx = idx;
-#if MMU_UPDATE_DEBUG > 1
-    if (idx > 1)
-    printk("Flushing %d entries from pt update queue\n", idx);
-#endif
-#if MMU_UPDATE_DEBUG > 3
-    DEBUG_allow_pt_reads();
-#endif
-    idx = 0;
-    wmb(); /* Make sure index is cleared first to avoid double updates. */
-    if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
-    {
-        printk(KERN_ALERT "Failed to execute MMU updates.\n");
-        BUG();
-    }
-}
-
-void _flush_page_update_queue(void)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    if ( idx != 0 ) __flush_page_update_queue();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void increment_index(void)
-{
-    idx++;
-    if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
-}
-
-static inline void increment_index_and_flush(void)
-{
-    idx++;
-    __flush_page_update_queue();
-}
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-#if MMU_UPDATE_DEBUG > 3
-    DEBUG_disallow_pt_read((unsigned long)ptr);
-#endif
-    update_queue[idx].ptr = virt_to_machine(ptr);
-    update_queue[idx].val = val;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr = virt_to_machine(ptr);
-    update_queue[idx].val = val;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pt_switch(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_NEW_BASEPTR;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_tlb_flush(void)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_TLB_FLUSH;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_invlpg(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
-    update_queue[idx].ptr |= ptr & PAGE_MASK;
-    update_queue[idx].val  = MMUEXT_INVLPG;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_pin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_PIN_L2_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_unpin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_pin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_PIN_L1_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_unpin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_set_ldt(unsigned long ptr, unsigned long len)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND | ptr;
-    update_queue[idx].val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_machphys_update(unsigned long mfn, unsigned long pfn)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-    update_queue[idx].val = pfn;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-/* queue and flush versions of the above */
-void xen_l1_entry_update(pte_t *ptr, unsigned long val)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-#if MMU_UPDATE_DEBUG > 3
-    DEBUG_disallow_pt_read((unsigned long)ptr);
-#endif
-    update_queue[idx].ptr = virt_to_machine(ptr);
-    update_queue[idx].val = val;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr = virt_to_machine(ptr);
-    update_queue[idx].val = val;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pt_switch(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_NEW_BASEPTR;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_tlb_flush(void)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_TLB_FLUSH;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_invlpg(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
-    update_queue[idx].ptr |= ptr & PAGE_MASK;
-    update_queue[idx].val  = MMUEXT_INVLPG;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pgd_pin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_PIN_L2_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pgd_unpin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pte_pin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_PIN_L1_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pte_unpin(unsigned long ptr)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = phys_to_machine(ptr);
-    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
-    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_set_ldt(unsigned long ptr, unsigned long len)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND | ptr;
-    update_queue[idx].val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_machphys_update(unsigned long mfn, unsigned long pfn)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-    update_queue[idx].val = pfn;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-
-unsigned long allocate_empty_lowmem_region(unsigned long pages)
-{
-    pgd_t         *pgd; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long *pfn_array;
-    unsigned long  vstart;
-    unsigned long  i;
-    int            ret;
-    unsigned int   order = get_order(pages*PAGE_SIZE);
-
-    vstart = __get_free_pages(GFP_KERNEL, order);
-    if ( vstart == 0 )
-        return 0UL;
-
-    scrub_pages(vstart, 1 << order);
-
-    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
-    if ( pfn_array == NULL )
-        BUG();
-
-    for ( i = 0; i < (1<<order); i++ )
-    {
-        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
-        pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
-        queue_l1_entry_update(pte, 0);
-        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
-    }
-
-    /* Flush updates through and flush the TLB. */
-    xen_tlb_flush();
-
-    ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
-                                pfn_array, 1<<order, 0);
-    if ( unlikely(ret != (1<<order)) )
-    {
-        printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret);
-        BUG();
-    }
-
-    vfree(pfn_array);
-
-    return vstart;
-}
-
-void deallocate_lowmem_region(unsigned long vstart, unsigned long pages)
-{
-    pgd_t         *pgd; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long *pfn_array;
-    unsigned long  i;
-    int            ret;
-    unsigned int   order = get_order(pages*PAGE_SIZE);
-
-    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
-    if ( pfn_array == NULL )
-        BUG();
-
-    ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
-                                pfn_array, 1<<order, 0);
-    if ( unlikely(ret != (1<<order)) )
-    {
-        printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
-               ret);
-        BUG();
-    }
-
-    for ( i = 0; i < (1<<order); i++ )
-    {
-        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-        queue_l1_entry_update(pte, (pfn_array[i]<<PAGE_SHIFT)|__PAGE_KERNEL);
-        queue_machphys_update(pfn_array[i], __pa(vstart)>>PAGE_SHIFT);
-        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = pfn_array[i];
-    }
-
-    flush_page_update_queue();
-
-    vfree(pfn_array);
-
-    free_pages(vstart, order);
-}
-
-#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/init.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/init.c
deleted file mode 100644 (file)
index 41b5d28..0000000
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- *  linux/arch/i386/mm/init.c
- *
- *  Copyright (C) 1995  Linus Torvalds
- *
- *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/efi.h>
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/dma.h>
-#include <asm/fixmap.h>
-#include <asm/e820.h>
-#include <asm/apic.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-#include <asm-xen/hypervisor.h>
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-unsigned long highstart_pfn, highend_pfn;
-
-static int do_test_wp_bit(void);
-
-/*
- * Creates a middle page table and puts a pointer to it in the
- * given global directory entry. This only returns the gd entry
- * in non-PAE compilation mode, since the middle layer is folded.
- */
-static pmd_t * __init one_md_table_init(pgd_t *pgd)
-{
-       pmd_t *pmd_table;
-               
-#ifdef CONFIG_X86_PAE
-       pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-       set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-       if (pmd_table != pmd_offset(pgd, 0)) 
-               BUG();
-#else
-       pmd_table = pmd_offset(pgd, 0);
-#endif
-
-       return pmd_table;
-}
-
-/*
- * Create a page table and place a pointer to it in a middle page
- * directory entry.
- */
-static pte_t * __init one_page_table_init(pmd_t *pmd)
-{
-       if (pmd_none(*pmd)) {
-               pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-               set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-               if (page_table != pte_offset_kernel(pmd, 0))
-                       BUG();  
-
-               return page_table;
-       }
-       
-       return pte_offset_kernel(pmd, 0);
-}
-
-/*
- * This function initializes a certain range of kernel virtual memory 
- * with new bootmem page tables, everywhere page tables are missing in
- * the given range.
- */
-
-/*
- * NOTE: The pagetables are allocated contiguous on the physical space 
- * so we can cache the place of the first one and move around without 
- * checking the pgd every time.
- */
-static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       int pgd_idx, pmd_idx;
-       unsigned long vaddr;
-
-       vaddr = start;
-       pgd_idx = pgd_index(vaddr);
-       pmd_idx = pmd_index(vaddr);
-       pgd = pgd_base + pgd_idx;
-
-       for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++, pgd_idx++) {
-               if (pgd_none(*pgd)) 
-                       one_md_table_init(pgd);
-
-               pmd = pmd_offset(pgd, vaddr);
-               for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-                       if (pmd_none(*pmd)) 
-                               one_page_table_init(pmd);
-
-                       vaddr += PMD_SIZE;
-               }
-               pmd_idx = 0;
-       }
-}
-
-void __init protect_page(pgd_t *pgd, void *page, int mode)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-       unsigned long addr;
-
-       addr = (unsigned long)page;
-       pgd += pgd_index(addr);
-       pmd = pmd_offset(pgd, addr);
-       pte = pte_offset_kernel(pmd, addr);
-       if (!pte_present(*pte))
-               return;
-       queue_l1_entry_update(pte, mode ? pte_val_ma(*pte) & ~_PAGE_RW :
-                                       pte_val_ma(*pte) | _PAGE_RW);
-}
-
-void __init protect_pagetable(pgd_t *dpgd, pgd_t *spgd, int mode)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-       int pgd_idx, pmd_idx;
-
-       protect_page(dpgd, spgd, mode);
-
-       for (pgd_idx = 0; pgd_idx < PTRS_PER_PGD_NO_HV; spgd++, pgd_idx++) {
-               pmd = pmd_offset(spgd, 0);
-               if (pmd_none(*pmd))
-                       continue;
-               for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
-                       pte = pte_offset_kernel(pmd, 0);
-                       protect_page(dpgd, pte, mode);
-               }
-       }
-}
-
-static inline int is_kernel_text(unsigned long addr)
-{
-       if (addr >= (unsigned long)_stext && addr <= (unsigned long)__init_end)
-               return 1;
-       return 0;
-}
-
-/*
- * This maps the physical memory to kernel virtual address space, a total 
- * of max_low_pfn pages, by creating page tables starting from address 
- * PAGE_OFFSET.
- */
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-{
-       unsigned long pfn;
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-       int pgd_idx, pmd_idx, pte_ofs;
-
-       pgd_idx = pgd_index(PAGE_OFFSET);
-       pgd = pgd_base + pgd_idx;
-       pfn = 0;
-       pmd_idx = pmd_index(PAGE_OFFSET);
-       pte_ofs = pte_index(PAGE_OFFSET);
-
-       for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
-               pmd = one_md_table_init(pgd);
-               if (pfn >= max_low_pfn)
-                       continue;
-               pmd += pmd_idx;
-               for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-                       unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-
-                       /* Map with big pages if possible, otherwise create normal page tables. */
-                       if (cpu_has_pse) {
-                               unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
-
-                               if (is_kernel_text(address) || is_kernel_text(address2))
-                                       set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
-                               else
-                                       set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-                               pfn += PTRS_PER_PTE;
-                       } else {
-                               pte = one_page_table_init(pmd);
-
-                               pte += pte_ofs;
-                               for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
-                                               if (is_kernel_text(address))
-                                                       set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-                                               else
-                                                       set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-                               }
-                               pte_ofs = 0;
-                       }
-                       flush_page_update_queue();
-               }
-               pmd_idx = 0;
-       }       
-}
-
-static inline int page_kills_ppro(unsigned long pagenr)
-{
-       if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-               return 1;
-       return 0;
-}
-
-extern int is_available_memory(efi_memory_desc_t *);
-
-static inline int page_is_ram(unsigned long pagenr)
-{
-       int i;
-       unsigned long addr, end;
-
-       if (efi_enabled) {
-               efi_memory_desc_t *md;
-
-               for (i = 0; i < memmap.nr_map; i++) {
-                       md = &memmap.map[i];
-                       if (!is_available_memory(md))
-                               continue;
-                       addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-                       end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
-
-                       if ((pagenr >= addr) && (pagenr < end))
-                               return 1;
-               }
-               return 0;
-       }
-
-       for (i = 0; i < e820.nr_map; i++) {
-
-               if (e820.map[i].type != E820_RAM)       /* not usable memory */
-                       continue;
-               /*
-                *      !!!FIXME!!! Some BIOSen report areas as RAM that
-                *      are not. Notably the 640->1Mb area. We need a sanity
-                *      check here.
-                */
-               addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-               end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
-               if  ((pagenr >= addr) && (pagenr < end))
-                       return 1;
-       }
-       return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-EXPORT_SYMBOL(kmap_prot);
-EXPORT_SYMBOL(kmap_pte);
-
-#define kmap_get_fixmap_pte(vaddr)                                     \
-       pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
-       unsigned long kmap_vstart;
-
-       /* cache the first kmap pte */
-       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
-       kmap_prot = PAGE_KERNEL;
-}
-
-void __init permanent_kmaps_init(pgd_t *pgd_base)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-       unsigned long vaddr;
-
-       vaddr = PKMAP_BASE;
-       page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-
-       pgd = swapper_pg_dir + pgd_index(vaddr);
-       pmd = pmd_offset(pgd, vaddr);
-       pte = pte_offset_kernel(pmd, vaddr);
-       pkmap_page_table = pte; 
-}
-
-void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
-{
-       if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
-               ClearPageReserved(page);
-               set_bit(PG_highmem, &page->flags);
-               set_page_count(page, 1);
-               __free_page(page);
-               totalhigh_pages++;
-       } else
-               SetPageReserved(page);
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-void __init set_highmem_pages_init(int bad_ppro) 
-{
-       int pfn;
-       for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-               one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-       totalram_pages += totalhigh_pages;
-}
-#else
-extern void set_highmem_pages_init(int);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-#else
-#define kmap_init() do { } while (0)
-#define permanent_kmaps_init(pgd_base) do { } while (0)
-#define set_highmem_pages_init(bad_ppro) do { } while (0)
-#endif /* CONFIG_HIGHMEM */
-
-unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-
-#ifndef CONFIG_DISCONTIGMEM
-#define remap_numa_kva() do {} while (0)
-#else
-extern void __init remap_numa_kva(void);
-#endif
-
-static void __init pagetable_init (void)
-{
-       unsigned long vaddr;
-       pgd_t *pgd_base = swapper_pg_dir;
-
-#ifdef CONFIG_X86_PAE
-       int i;
-       /* Init entries of the first-level page table to the zero page */
-       for (i = 0; i < PTRS_PER_PGD; i++)
-               set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
-#endif
-
-       /* Enable PSE if available */
-       if (cpu_has_pse) {
-               set_in_cr4(X86_CR4_PSE);
-       }
-
-       /* Enable PGE if available */
-       if (cpu_has_pge) {
-               set_in_cr4(X86_CR4_PGE);
-               __PAGE_KERNEL |= _PAGE_GLOBAL;
-               __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
-       }
-
-       kernel_physical_mapping_init(pgd_base);
-       remap_numa_kva();
-
-       /*
-        * Fixed mappings, only the page table structure has to be
-        * created - mappings will be set by set_fixmap():
-        */
-       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-       page_table_range_init(vaddr, 0, pgd_base);
-
-       permanent_kmaps_init(pgd_base);
-
-#ifdef CONFIG_X86_PAE
-       /*
-        * Add low memory identity-mappings - SMP needs it when
-        * starting up on an AP from real-mode. In the non-PAE
-        * case we already have these mappings through head.S.
-        * All user-space mappings are explicitly cleared after
-        * SMP startup.
-        */
-       pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
-#endif
-}
-
-#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
-/*
- * Swap suspend & friends need this for resume because things like the intel-agp
- * driver might have split up a kernel 4MB mapping.
- */
-char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-       __attribute__ ((aligned (PAGE_SIZE)));
-
-static inline void save_pg_dir(void)
-{
-       memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-}
-#else
-static inline void save_pg_dir(void)
-{
-}
-#endif
-
-void zap_low_mappings (void)
-{
-       int i;
-
-       save_pg_dir();
-
-       /*
-        * Zap initial low-memory mappings.
-        *
-        * Note that "pgd_clear()" doesn't do it for
-        * us, because pgd_clear() is a no-op on i386.
-        */
-       for (i = 0; i < USER_PTRS_PER_PGD; i++)
-#ifdef CONFIG_X86_PAE
-               set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-#else
-               set_pgd(swapper_pg_dir+i, __pgd(0));
-#endif
-       flush_tlb_all();
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-void __init zone_sizes_init(void)
-{
-       unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
-       unsigned int max_dma, high, low;
-       
-       max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-       low = max_low_pfn;
-       high = highend_pfn;
-       
-       if (low < max_dma)
-               zones_size[ZONE_DMA] = low;
-       else {
-               zones_size[ZONE_DMA] = max_dma;
-               zones_size[ZONE_NORMAL] = low - max_dma;
-#ifdef CONFIG_HIGHMEM
-               zones_size[ZONE_HIGHMEM] = high - low;
-#endif
-       }
-       free_area_init(zones_size);     
-}
-#else
-extern void zone_sizes_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-static int disable_nx __initdata = 0;
-u64 __supported_pte_mask = ~_PAGE_NX;
-
-/*
- * noexec = on|off
- *
- * Control non executable mappings.
- *
- * on      Enable
- * off     Disable
- */
-static int __init noexec_setup(char *str)
-{
-       if (!strncmp(str, "on",2) && cpu_has_nx) {
-               __supported_pte_mask |= _PAGE_NX;
-               disable_nx = 0;
-       } else if (!strncmp(str,"off",3)) {
-               disable_nx = 1;
-               __supported_pte_mask &= ~_PAGE_NX;
-       }
-       return 1;
-}
-
-__setup("noexec=", noexec_setup);
-
-#ifdef CONFIG_X86_PAE
-int nx_enabled = 0;
-
-static void __init set_nx(void)
-{
-       unsigned int v[4], l, h;
-
-       if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-               cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-               if ((v[3] & (1 << 20)) && !disable_nx) {
-                       rdmsr(MSR_EFER, l, h);
-                       l |= EFER_NX;
-                       wrmsr(MSR_EFER, l, h);
-                       nx_enabled = 1;
-                       __supported_pte_mask |= _PAGE_NX;
-               }
-       }
-}
-
-/*
- * Enables/disables executability of a given kernel page and
- * returns the previous setting.
- */
-int __init set_kernel_exec(unsigned long vaddr, int enable)
-{
-       pte_t *pte;
-       int ret = 1;
-
-       if (!nx_enabled)
-               goto out;
-
-       pte = lookup_address(vaddr);
-       BUG_ON(!pte);
-
-       if (!pte_exec_kernel(*pte))
-               ret = 0;
-
-       if (enable)
-               pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-       else
-               pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
-       __flush_tlb_all();
-out:
-       return ret;
-}
-
-#endif
-
-/*
- * paging_init() sets up the page tables - note that the first 8MB are
- * already mapped by head.S.
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
- */
-void __init paging_init(void)
-{
-       pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
-       pgd_t *new_pgd = swapper_pg_dir;
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-       int i;
-#endif
-
-#ifdef CONFIG_X86_PAE
-       set_nx();
-       if (nx_enabled)
-               printk("NX (Execute Disable) protection: active\n");
-#endif
-
-       pagetable_init();
-
-       /*
-        * Write-protect both page tables within both page tables.
-        * That's three ops, as the old p.t. is already protected
-        * within the old p.t. Then pin the new table, switch tables,
-        * and unprotect the old table.
-        */
-       protect_pagetable(new_pgd, old_pgd, PROT_ON);
-       protect_pagetable(new_pgd, new_pgd, PROT_ON);
-       protect_pagetable(old_pgd, new_pgd, PROT_ON);
-       queue_pgd_pin(__pa(new_pgd));
-       load_cr3(new_pgd);
-       queue_pgd_unpin(__pa(old_pgd));
-       __flush_tlb_all(); /* implicit flush */
-       protect_pagetable(new_pgd, old_pgd, PROT_OFF);
-       flush_page_update_queue();
-
-       /* Completely detached from old tables, so free them. */
-       free_bootmem(__pa(old_pgd), xen_start_info.nr_pt_frames << PAGE_SHIFT);
-
-#ifdef CONFIG_X86_PAE
-       /*
-        * We will bail out later - printk doesn't work right now so
-        * the user would just see a hanging kernel.
-        */
-       if (cpu_has_pae)
-               set_in_cr4(X86_CR4_PAE);
-#endif
-       __flush_tlb_all();
-
-       kmap_init();
-       zone_sizes_init();
-
-       /* Switch to the real shared_info page, and clear the dummy page. */
-       flush_page_update_queue();
-       set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
-       HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-       memset(empty_zero_page, 0, sizeof(empty_zero_page));
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       /* Setup mapping of lower 1st MB */
-       for (i = 0; i < NR_FIX_ISAMAPS; i++)
-               if (xen_start_info.flags & SIF_PRIVILEGED)
-                       set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-               else
-                       set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
-                                        virt_to_machine(empty_zero_page));
-#endif
-}
-
-/*
- * Test if the WP bit works in supervisor mode. It isn't supported on 386's
- * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
- * used to involve black magic jumps to work around some nasty CPU bugs,
- * but fortunately the switch to using exceptions got rid of all that.
- */
-
-void __init test_wp_bit(void)
-{
-       printk("Checking if this processor honours the WP bit even in supervisor mode... ");
-
-       /* Any page-aligned address will do, the test is non-destructive */
-       __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-       boot_cpu_data.wp_works_ok = do_test_wp_bit();
-       clear_fixmap(FIX_WP_TEST);
-
-       if (!boot_cpu_data.wp_works_ok) {
-               printk("No.\n");
-#ifdef CONFIG_X86_WP_WORKS_OK
-               panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-#endif
-       } else {
-               printk("Ok.\n");
-       }
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-static void __init set_max_mapnr_init(void)
-{
-#ifdef CONFIG_HIGHMEM
-       highmem_start_page = pfn_to_page(highstart_pfn);
-       max_mapnr = num_physpages = highend_pfn;
-#else
-       max_mapnr = num_physpages = max_low_pfn;
-#endif
-}
-#define __free_all_bootmem() free_all_bootmem()
-#else
-#define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
-extern void set_max_mapnr_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-static struct kcore_list kcore_mem, kcore_vmalloc; 
-
-void __init mem_init(void)
-{
-       extern int ppro_with_ram_bug(void);
-       int codesize, reservedpages, datasize, initsize;
-       int tmp;
-       int bad_ppro;
-
-#ifndef CONFIG_DISCONTIGMEM
-       if (!mem_map)
-               BUG();
-#endif
-       
-       bad_ppro = ppro_with_ram_bug();
-
-#ifdef CONFIG_HIGHMEM
-       /* check that fixmap and pkmap do not overlap */
-       if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-               printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-               printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-                               PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-               BUG();
-       }
-#endif
-       set_max_mapnr_init();
-
-#ifdef CONFIG_HIGHMEM
-       high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
-#else
-       high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-#endif
-
-       /* this will put all low memory onto the freelists */
-       totalram_pages += __free_all_bootmem();
-
-       reservedpages = 0;
-       for (tmp = 0; tmp < max_low_pfn; tmp++)
-               /*
-                * Only count reserved RAM pages
-                */
-               if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-                       reservedpages++;
-
-       set_highmem_pages_init(bad_ppro);
-
-       codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-       datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
-       kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-       kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-                  VMALLOC_END-VMALLOC_START);
-
-       printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-               (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-               num_physpages << (PAGE_SHIFT-10),
-               codesize >> 10,
-               reservedpages << (PAGE_SHIFT-10),
-               datasize >> 10,
-               initsize >> 10,
-               (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
-              );
-
-#ifdef CONFIG_X86_PAE
-       if (!cpu_has_pae)
-               panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-#endif
-       if (boot_cpu_data.wp_works_ok < 0)
-               test_wp_bit();
-
-       /*
-        * Subtle. SMP is doing it's boot stuff late (because it has to
-        * fork idle threads) - but it also needs low mappings for the
-        * protected-mode entry to work. We zap these entries only after
-        * the WP-bit has been tested.
-        */
-#ifndef CONFIG_SMP
-       zap_low_mappings();
-#endif
-}
-
-kmem_cache_t *pgd_cache;
-kmem_cache_t *pmd_cache;
-kmem_cache_t *pte_cache;
-
-void __init pgtable_cache_init(void)
-{
-       pte_cache = kmem_cache_create("pte",
-                               PTRS_PER_PTE*sizeof(pte_t),
-                               PTRS_PER_PTE*sizeof(pte_t),
-                               0,
-                               pte_ctor,
-                               pte_dtor);
-       if (!pte_cache)
-               panic("pgtable_cache_init(): Cannot create pte cache");
-       if (PTRS_PER_PMD > 1) {
-               pmd_cache = kmem_cache_create("pmd",
-                                       PTRS_PER_PMD*sizeof(pmd_t),
-                                       PTRS_PER_PMD*sizeof(pmd_t),
-                                       0,
-                                       pmd_ctor,
-                                       NULL);
-               if (!pmd_cache)
-                       panic("pgtable_cache_init(): cannot create pmd cache");
-       }
-       pgd_cache = kmem_cache_create("pgd",
-                               PTRS_PER_PGD*sizeof(pgd_t),
-                               PTRS_PER_PGD*sizeof(pgd_t),
-                               0,
-                               pgd_ctor,
-                               pgd_dtor);
-       if (!pgd_cache)
-               panic("pgtable_cache_init(): Cannot create pgd cache");
-}
-
-/*
- * This function cannot be __init, since exceptions don't work in that
- * section.  Put this after the callers, so that it cannot be inlined.
- */
-static int do_test_wp_bit(void)
-{
-       char tmp_reg;
-       int flag;
-
-       __asm__ __volatile__(
-               "       movb %0,%1      \n"
-               "1:     movb %1,%0      \n"
-               "       xorl %2,%2      \n"
-               "2:                     \n"
-               ".section __ex_table,\"a\"\n"
-               "       .align 4        \n"
-               "       .long 1b,2b     \n"
-               ".previous              \n"
-               :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
-                "=q" (tmp_reg),
-                "=r" (flag)
-               :"2" (1)
-               :"memory");
-       
-       return flag;
-}
-
-void free_initmem(void)
-{
-       unsigned long addr;
-
-       addr = (unsigned long)(&__init_begin);
-       for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               set_page_count(virt_to_page(addr), 1);
-               free_page(addr);
-               totalram_pages++;
-       }
-       printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       if (start < end)
-               printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-       for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(start));
-               set_page_count(virt_to_page(start), 1);
-               free_page(start);
-               totalram_pages++;
-       }
-}
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/ioremap.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/ioremap.c
deleted file mode 100644 (file)
index ddf2ebb..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * arch/i386/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-
-#ifndef CONFIG_XEN_PHYSDEV_ACCESS
-
-void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-{ return NULL; }
-
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{ return NULL; }
-
-void iounmap(void *addr)
-{ }
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{ return NULL; }
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{ }
-
-#else
-
-static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
-       unsigned long phys_addr, unsigned long flags)
-{
-       unsigned long end;
-       unsigned long pfn;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       if (address >= end)
-               BUG();
-       pfn = phys_addr >> PAGE_SHIFT;
-       do {
-               if (!pte_none(*pte)) {
-                       printk("remap_area_pte: page already exists\n");
-                       BUG();
-               }
-               set_pte(pte, pfn_pte_ma(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
-                                       _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
-               address += PAGE_SIZE;
-               pfn++;
-               pte++;
-       } while (address && (address < end));
-}
-
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long phys_addr, unsigned long flags)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       phys_addr -= address;
-       if (address >= end)
-               BUG();
-       do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               remap_area_pte(pte, address, end - address, address + phys_addr, flags);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return 0;
-}
-
-static int remap_area_pages(unsigned long address, unsigned long phys_addr,
-                                unsigned long size, unsigned long flags)
-{
-       int error;
-       pgd_t * dir;
-       unsigned long end = address + size;
-
-       phys_addr -= address;
-       dir = pgd_offset(&init_mm, address);
-       flush_cache_all();
-       if (address >= end)
-               BUG();
-       spin_lock(&init_mm.page_table_lock);
-       do {
-               pmd_t *pmd;
-               pmd = pmd_alloc(&init_mm, dir, address);
-               error = -ENOMEM;
-               if (!pmd)
-                       break;
-               if (remap_area_pmd(pmd, address, end - address,
-                                        phys_addr + address, flags))
-                       break;
-               error = 0;
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
-       flush_tlb_all();
-       return error;
-}
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-{
-       void * addr;
-       struct vm_struct * area;
-       unsigned long offset, last_addr;
-
-       /* Don't allow wraparound or zero size */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr)
-               return NULL;
-
-        if (phys_addr >= 0x0 && last_addr < 0x100000)
-                return isa_bus_to_virt(phys_addr);
-
-       /*
-        * Don't remap the low PCI/ISA area, it's always mapped..
-        */
-       if (phys_addr >= 0xA0000 && last_addr < 0x100000)
-               return phys_to_virt(phys_addr);
-
-       /*
-        * Don't allow anybody to remap normal RAM that we're using..
-        */
-       if (machine_to_phys(phys_addr) < virt_to_phys(high_memory)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = bus_to_virt(phys_addr);
-               t_end = t_addr + (size - 1);
-          
-               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-                       if(!PageReserved(page))
-                               return NULL;
-       }
-
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr+1) - phys_addr;
-
-       /*
-        * Ok, go for it..
-        */
-       area = get_vm_area(size, VM_IOREMAP);
-       if (!area)
-               return NULL;
-       area->phys_addr = phys_addr;
-       addr = area->addr;
-       if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
-               vunmap(addr);
-               return NULL;
-       }
-       return (void *) (offset + (char *)addr);
-}
-
-
-/**
- * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address. 
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many 
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- * 
- * Must be freed with iounmap.
- */
-
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
-       unsigned long last_addr;
-       void *p = __ioremap(phys_addr, size, _PAGE_PCD);
-       if (!p) 
-               return p; 
-
-       /* Guaranteed to be > phys_addr, as per __ioremap() */
-       last_addr = phys_addr + size - 1;
-
-       if (machine_to_phys(last_addr) < virt_to_phys(high_memory)) { 
-               struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-               unsigned long npages;
-
-               phys_addr &= PAGE_MASK;
-
-               /* This might overflow and become zero.. */
-               last_addr = PAGE_ALIGN(last_addr);
-
-               /* .. but that's ok, because modulo-2**n arithmetic will make
-               * the page-aligned "last - first" come out right.
-               */
-               npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-
-               if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
-                       iounmap(p); 
-                       p = NULL;
-               }
-               global_flush_tlb();
-       }
-
-       return p;                                       
-}
-
-void iounmap(void *addr)
-{
-       struct vm_struct *p;
-        if ((unsigned long)addr <= 0x100000)
-                return;
-       if (addr <= high_memory) 
-               return; 
-       p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
-       if (!p) { 
-               printk("__iounmap: bad address %p\n", addr);
-               return;
-       } 
-
-       if (p->flags && machine_to_phys(p->phys_addr) < virt_to_phys(high_memory)) { 
-               change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-                                p->size >> PAGE_SHIFT,
-                                PAGE_KERNEL);                           
-               global_flush_tlb();
-       } 
-       kfree(p); 
-}
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
-       unsigned long offset, last_addr;
-       unsigned int nrpages;
-       enum fixed_addresses idx;
-
-       /* Don't allow wraparound or zero size */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr)
-               return NULL;
-
-        if (phys_addr >= 0x0 && last_addr < 0x100000)
-                return isa_bus_to_virt(phys_addr);
-
-       /*
-        * Don't remap the low PCI/ISA area, it's always mapped..
-        */
-       if (phys_addr >= 0xA0000 && last_addr < 0x100000)
-               return phys_to_virt(phys_addr);
-
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr) - phys_addr;
-
-       /*
-        * Mappings have to fit in the FIX_BTMAP area.
-        */
-       nrpages = size >> PAGE_SHIFT;
-       if (nrpages > NR_FIX_BTMAPS)
-               return NULL;
-
-       /*
-        * Ok, go for it..
-        */
-       idx = FIX_BTMAP_BEGIN;
-       while (nrpages > 0) {
-               set_fixmap_ma(idx, phys_addr);
-               phys_addr += PAGE_SIZE;
-               --idx;
-               --nrpages;
-       }
-       return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-       unsigned long virt_addr;
-       unsigned long offset;
-       unsigned int nrpages;
-       enum fixed_addresses idx;
-
-       virt_addr = (unsigned long)addr;
-        if (virt_addr < 0x100000)
-                return;
-       if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-               return;
-       offset = virt_addr & ~PAGE_MASK;
-       nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-
-       idx = FIX_BTMAP_BEGIN;
-       while (nrpages > 0) {
-               clear_fixmap(idx);
-               --idx;
-               --nrpages;
-       }
-}
-
-#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
-
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
-  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
-  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-static inline void direct_remap_area_pte(pte_t *pte, 
-                                        unsigned long address, 
-                                        unsigned long size,
-                                       mmu_update_t **v)
-{
-    unsigned long end;
-
-    address &= ~PMD_MASK;
-    end = address + size;
-    if (end > PMD_SIZE)
-        end = PMD_SIZE;
-    if (address >= end)
-        BUG();
-
-    do {
-        (*v)->ptr = virt_to_machine(pte);
-        (*v)++;
-        address += PAGE_SIZE;
-        pte++;
-    } while (address && (address < end));
-}
-
-static inline int direct_remap_area_pmd(struct mm_struct *mm,
-                                        pmd_t *pmd, 
-                                        unsigned long address, 
-                                        unsigned long size,
-                                       mmu_update_t **v)
-{
-    unsigned long end;
-
-    address &= ~PGDIR_MASK;
-    end = address + size;
-    if (end > PGDIR_SIZE)
-        end = PGDIR_SIZE;
-    if (address >= end)
-        BUG();
-    do {
-        pte_t *pte = pte_alloc_map(mm, pmd, address);
-        if (!pte)
-            return -ENOMEM;
-        direct_remap_area_pte(pte, address, end - address, v);
-       pte_unmap(pte);
-        address = (address + PMD_SIZE) & PMD_MASK;
-        pmd++;
-    } while (address && (address < end));
-    return 0;
-}
-int __direct_remap_area_pages(struct mm_struct *mm,
-                             unsigned long address, 
-                             unsigned long size, 
-                             mmu_update_t *v)
-{
-    pgd_t * dir;
-    unsigned long end = address + size;
-
-    dir = pgd_offset(mm, address);
-    flush_cache_all();
-    if (address >= end)
-        BUG();
-    spin_lock(&mm->page_table_lock);
-    do {
-        pmd_t *pmd = pmd_alloc(mm, dir, address);
-        if (!pmd)
-           return -ENOMEM;
-        direct_remap_area_pmd(mm, pmd, address, end - address, &v);
-        address = (address + PGDIR_SIZE) & PGDIR_MASK;
-        dir++;
-
-    } while (address && (address < end));
-    spin_unlock(&mm->page_table_lock);
-    flush_tlb_all();
-    return 0;
-}
-
-
-int direct_remap_area_pages(struct mm_struct *mm,
-                            unsigned long address, 
-                            unsigned long machine_addr,
-                            unsigned long size, 
-                            pgprot_t prot,
-                            domid_t  domid)
-{
-    int i;
-    unsigned long start_address;
-#define MAX_DIRECTMAP_MMU_QUEUE 130
-    mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
-
-    u[0].ptr  = MMU_EXTENDED_COMMAND;
-    u[0].val  = MMUEXT_SET_FOREIGNDOM;
-    u[0].val |= (unsigned long)domid << 16;
-    v = w = &u[1];
-
-    start_address = address;
-
-    for( i = 0; i < size; i += PAGE_SIZE )
-    {
-       if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
-       {
-           /* Fill in the PTE pointers. */
-           __direct_remap_area_pages( mm,
-                                      start_address, 
-                                      address-start_address, 
-                                      w);
-           
-           if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
-               return -EFAULT;     
-           v = w;
-           start_address = address;
-       }
-
-       /*
-         * Fill in the machine address: PTE ptr is done later by
-         * __direct_remap_area_pages(). 
-         */
-        v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
-
-        machine_addr += PAGE_SIZE;
-        address += PAGE_SIZE; 
-        v++;
-    }
-
-    if ( v != w )
-    {
-       /* get the ptep's filled in */
-       __direct_remap_area_pages(mm,
-                                  start_address, 
-                                  address-start_address, 
-                                  w);   
-       if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
-           return -EFAULT;         
-    }
-    
-    return 0;
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c
deleted file mode 100644 (file)
index 5734145..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-/* 
- * Copyright 2002 Andi Kleen, SuSE Labs. 
- * Thanks to Ben LaHaise for precious feedback.
- */ 
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/tlbflush.h>
-
-static spinlock_t cpa_lock = SPIN_LOCK_UNLOCKED;
-static struct list_head df_list = LIST_HEAD_INIT(df_list);
-
-
-pte_t *lookup_address(unsigned long address) 
-{ 
-       pgd_t *pgd = pgd_offset_k(address); 
-       pmd_t *pmd;
-       if (pgd_none(*pgd))
-               return NULL;
-       pmd = pmd_offset(pgd, address);                
-       if (pmd_none(*pmd))
-               return NULL;
-       if (pmd_large(*pmd))
-               return (pte_t *)pmd;
-        return pte_offset_kernel(pmd, address);
-} 
-
-static struct page *split_large_page(unsigned long address, pgprot_t prot)
-{ 
-       int i; 
-       unsigned long addr;
-       struct page *base;
-       pte_t *pbase;
-
-       spin_unlock_irq(&cpa_lock);
-       base = alloc_pages(GFP_KERNEL, 0);
-       spin_lock_irq(&cpa_lock);
-       if (!base) 
-               return NULL;
-
-       address = __pa(address);
-       addr = address & LARGE_PAGE_MASK; 
-       pbase = (pte_t *)page_address(base);
-       for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-               pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
-                                  addr == address ? prot : PAGE_KERNEL);
-       }
-       return base;
-} 
-
-static void flush_kernel_map(void *dummy) 
-{ 
-       /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
-       if (boot_cpu_data.x86_model >= 4) 
-               wbinvd();
-       /* Flush all to work around Errata in early athlons regarding 
-        * large page flushing. 
-        */
-       __flush_tlb_all();      
-}
-
-static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
-{ 
-       struct page *page;
-       unsigned long flags;
-
-       set_pte_atomic(kpte, pte);      /* change init_mm */
-       if (PTRS_PER_PMD > 1)
-               return;
-
-       spin_lock_irqsave(&pgd_lock, flags);
-       for (page = pgd_list; page; page = (struct page *)page->index) {
-               pgd_t *pgd;
-               pmd_t *pmd;
-               pgd = (pgd_t *)page_address(page) + pgd_index(address);
-               pmd = pmd_offset(pgd, address);
-               set_pte_atomic((pte_t *)pmd, pte);
-       }
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-/* 
- * No more special protections in this 2/4MB area - revert to a
- * large page again. 
- */
-static inline void revert_page(struct page *kpte_page, unsigned long address)
-{
-       pte_t *linear = (pte_t *) 
-               pmd_offset(pgd_offset(&init_mm, address), address);
-       set_pmd_pte(linear,  address,
-                   pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
-                           PAGE_KERNEL_LARGE));
-}
-
-static int
-__change_page_attr(struct page *page, pgprot_t prot)
-{ 
-       pte_t *kpte; 
-       unsigned long address;
-       struct page *kpte_page;
-
-#ifdef CONFIG_HIGHMEM
-       if (page >= highmem_start_page) 
-               BUG(); 
-#endif
-       address = (unsigned long)page_address(page);
-
-       kpte = lookup_address(address);
-       if (!kpte)
-               return -EINVAL;
-       kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
-       if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
-               if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
-                       pte_t old = *kpte;
-                       pte_t standard = mk_pte(page, PAGE_KERNEL); 
-                       set_pte_atomic(kpte, mk_pte(page, prot)); 
-                       if (pte_same(old,standard))
-                               get_page(kpte_page);
-               } else {
-                       struct page *split = split_large_page(address, prot); 
-                       if (!split)
-                               return -ENOMEM;
-                       get_page(kpte_page);
-                       set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
-               }       
-       } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
-               set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
-               __put_page(kpte_page);
-       }
-
-       if (cpu_has_pse && (page_count(kpte_page) == 1)) {
-               list_add(&kpte_page->lru, &df_list);
-               revert_page(kpte_page, address);
-       } 
-       return 0;
-} 
-
-static inline void flush_map(void)
-{
-       on_each_cpu(flush_kernel_map, NULL, 1, 1);
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- * 
- * The caller needs to ensure that there are no conflicting mappings elsewhere.
- * This function only deals with the kernel linear map.
- * 
- * Caller must call global_flush_tlb() after this.
- */
-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
-       int err = 0; 
-       int i; 
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpa_lock, flags);
-       for (i = 0; i < numpages; i++, page++) { 
-               err = __change_page_attr(page, prot);
-               if (err) 
-                       break; 
-       }       
-       spin_unlock_irqrestore(&cpa_lock, flags);
-       return err;
-}
-
-void global_flush_tlb(void)
-{ 
-       LIST_HEAD(l);
-       struct list_head* n;
-
-       BUG_ON(irqs_disabled());
-
-       spin_lock_irq(&cpa_lock);
-       list_splice_init(&df_list, &l);
-       spin_unlock_irq(&cpa_lock);
-       flush_map();
-       n = l.next;
-       while (n != &l) {
-               struct page *pg = list_entry(n, struct page, lru);
-               n = n->next;
-               __free_page(pg);
-       }
-} 
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
-       if (PageHighMem(page))
-               return;
-       /* the return value is ignored - the calls cannot fail,
-        * large pages are disabled at boot time.
-        */
-       change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
-       /* we should perform an IPI and flush all tlbs,
-        * but that can deadlock->flush only current cpu.
-        */
-       __flush_tlb_all();
-}
-EXPORT_SYMBOL(kernel_map_pages);
-#endif
-
-EXPORT_SYMBOL(change_page_attr);
-EXPORT_SYMBOL(global_flush_tlb);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pgtable.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pgtable.c
deleted file mode 100644 (file)
index 4516df1..0000000
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- *  linux/arch/i386/mm/pgtable.c
- */
-
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/spinlock.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/fixmap.h>
-#include <asm/e820.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/io.h>
-
-void show_mem(void)
-{
-       int total = 0, reserved = 0;
-       int shared = 0, cached = 0;
-       int highmem = 0;
-       struct page *page;
-       pg_data_t *pgdat;
-       unsigned long i;
-
-       printk("Mem-info:\n");
-       show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       for_each_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       page = pgdat->node_mem_map + i;
-                       total++;
-                       if (PageHighMem(page))
-                               highmem++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-       }
-       printk("%d pages of RAM\n", total);
-       printk("%d pages of HIGHMEM\n",highmem);
-       printk("%d reserved pages\n",reserved);
-       printk("%d pages shared\n",shared);
-       printk("%d pages swap cached\n",cached);
-}
-
-/*
- * Associate a virtual page frame with a given physical page frame 
- * and protection flags for that frame.
- */ 
-static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-
-       pgd = swapper_pg_dir + pgd_index(vaddr);
-       if (pgd_none(*pgd)) {
-               BUG();
-               return;
-       }
-       pmd = pmd_offset(pgd, vaddr);
-       if (pmd_none(*pmd)) {
-               BUG();
-               return;
-       }
-       pte = pte_offset_kernel(pmd, vaddr);
-       /* <pfn,flags> stored as-is, to permit clearing entries */
-       set_pte(pte, pfn_pte(pfn, flags));
-
-       /*
-        * It's enough to flush this one mapping.
-        * (PGE mappings get flushed as well)
-        */
-       __flush_tlb_one(vaddr);
-}
-
-/*
- * Associate a virtual page frame with a given physical page frame 
- * and protection flags for that frame.
- */ 
-static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
-                          pgprot_t flags)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-
-       pgd = swapper_pg_dir + pgd_index(vaddr);
-       if (pgd_none(*pgd)) {
-               BUG();
-               return;
-       }
-       pmd = pmd_offset(pgd, vaddr);
-       if (pmd_none(*pmd)) {
-               BUG();
-               return;
-       }
-       pte = pte_offset_kernel(pmd, vaddr);
-       /* <pfn,flags> stored as-is, to permit clearing entries */
-       set_pte(pte, pfn_pte_ma(pfn, flags));
-
-       /*
-        * It's enough to flush this one mapping.
-        * (PGE mappings get flushed as well)
-        */
-       __flush_tlb_one(vaddr);
-}
-
-/*
- * Associate a large virtual page frame with a given physical page frame 
- * and protection flags for that frame. pfn is for the base of the page,
- * vaddr is what the page gets mapped to - both must be properly aligned. 
- * The pmd must already be instantiated. Assumes PAE mode.
- */ 
-void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-
-       if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
-               printk ("set_pmd_pfn: vaddr misaligned\n");
-               return; /* BUG(); */
-       }
-       if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
-               printk ("set_pmd_pfn: pfn misaligned\n");
-               return; /* BUG(); */
-       }
-       pgd = swapper_pg_dir + pgd_index(vaddr);
-       if (pgd_none(*pgd)) {
-               printk ("set_pmd_pfn: pgd_none\n");
-               return; /* BUG(); */
-       }
-       pmd = pmd_offset(pgd, vaddr);
-       set_pmd(pmd, pfn_pmd(pfn, flags));
-       /*
-        * It's enough to flush this one mapping.
-        * (PGE mappings get flushed as well)
-        */
-       __flush_tlb_one(vaddr);
-}
-
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
-{
-       unsigned long address = __fix_to_virt(idx);
-
-       if (idx >= __end_of_fixed_addresses) {
-               BUG();
-               return;
-       }
-       set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-}
-
-void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
-{
-       unsigned long address = __fix_to_virt(idx);
-
-       if (idx >= __end_of_fixed_addresses) {
-               BUG();
-               return;
-       }
-       set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
-}
-
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-       if (pte) {
-               clear_page(pte);
-               __make_page_readonly(pte);
-               xen_flush_page_update_queue();
-       }
-       return pte;
-}
-
-void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
-{
-       struct page *page = virt_to_page(pte);
-       SetPageForeign(page, pte_free);
-       set_page_count(page, 1);
-
-       clear_page(pte);
-       __make_page_readonly(pte);
-       queue_pte_pin(virt_to_phys(pte));
-       flush_page_update_queue();
-}
-
-void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
-{
-       struct page *page = virt_to_page(pte);
-       ClearPageForeign(page);
-
-       queue_pte_unpin(virt_to_phys(pte));
-       __make_page_writable(pte);
-       flush_page_update_queue();
-}
-
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-       pte_t *ptep;
-
-#ifdef CONFIG_HIGHPTE
-       struct page *pte;
-
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
-       if (pte == NULL)
-               return pte;
-       if (pte >= highmem_start_page) {
-               clear_highpage(pte);
-               return pte;
-       }
-       /* not a highmem page -- free page and grab one from the cache */
-       __free_page(pte);
-#endif
-       ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
-       if (ptep)
-               return virt_to_page(ptep);
-       return NULL;
-}
-
-void pte_free(struct page *pte)
-{
-       set_page_count(pte, 1);
-#ifdef CONFIG_HIGHPTE
-       if (pte < highmem_start_page)
-#endif
-               kmem_cache_free(pte_cache,
-                               phys_to_virt(page_to_pseudophys(pte)));
-#ifdef CONFIG_HIGHPTE
-       else
-               __free_page(pte);
-#endif
-}
-
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
-{
-       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-}
-
-/*
- * List of all pgd's needed for non-PAE so it can invalidate entries
- * in both cached and uncached pgd's; not needed for PAE since the
- * kernel pmd is shared. If PAE were not to share the pmd a similar
- * tactic would be needed. This is essentially codepath-based locking
- * against pageattr.c; it is the unique case in which a valid change
- * of kernel pagetables can't be lazily synchronized by vmalloc faults.
- * vmalloc faults work because attached pagetables are never freed.
- * If the locking proves to be non-performant, a ticketing scheme with
- * checks at dup_mmap(), exec(), and other mmlist addition points
- * could be used. The locking scheme was chosen on the basis of
- * manfred's recommendations and having no core impact whatsoever.
- * -- wli
- */
-spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED;
-struct page *pgd_list;
-
-static inline void pgd_list_add(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-       page->index = (unsigned long)pgd_list;
-       if (pgd_list)
-               pgd_list->private = (unsigned long)&page->index;
-       pgd_list = page;
-       page->private = (unsigned long)&pgd_list;
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
-       struct page *next, **pprev, *page = virt_to_page(pgd);
-       next = (struct page *)page->index;
-       pprev = (struct page **)page->private;
-       *pprev = next;
-       if (next)
-               next->private = (unsigned long)pprev;
-}
-
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-{
-       unsigned long flags;
-
-       if (PTRS_PER_PMD == 1)
-               spin_lock_irqsave(&pgd_lock, flags);
-
-       memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-                       swapper_pg_dir + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-
-       if (PTRS_PER_PMD > 1)
-               goto out;
-
-       pgd_list_add(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
- out:
-       __make_page_readonly(pgd);
-       queue_pgd_pin(__pa(pgd));
-       flush_page_update_queue();
-}
-
-/* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-{
-       unsigned long flags; /* can be called from interrupt context */
-
-       queue_pgd_unpin(__pa(pgd));
-       __make_page_writable(pgd);
-       flush_page_update_queue();
-
-       if (PTRS_PER_PMD > 1)
-               return;
-
-       spin_lock_irqsave(&pgd_lock, flags);
-       pgd_list_del(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-       int i;
-       pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
-
-       if (PTRS_PER_PMD == 1 || !pgd)
-               return pgd;
-
-       for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-               pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-               if (!pmd)
-                       goto out_oom;
-               set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd))));
-       }
-       return pgd;
-
-out_oom:
-       for (i--; i >= 0; i--)
-               kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-       kmem_cache_free(pgd_cache, pgd);
-       return NULL;
-}
-
-void pgd_free(pgd_t *pgd)
-{
-       int i;
-
-       /* in the PAE case user pgd entries are overwritten before usage */
-       if (PTRS_PER_PMD > 1)
-               for (i = 0; i < USER_PTRS_PER_PGD; ++i)
-                       kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-       /* in the non-PAE case, clear_page_tables() clears user pgd entries */
-       kmem_cache_free(pgd_cache, pgd);
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/Makefile
deleted file mode 100644 (file)
index 175f5f4..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-CFLAGS += -Iarch/$(XENARCH)/pci
-
-c-obj-y                                := i386.o
-
-c-obj-$(CONFIG_PCI_BIOS)               += pcbios.o
-c-obj-$(CONFIG_PCI_MMCONFIG)   += mmconfig.o
-obj-$(CONFIG_PCI_DIRECT)       += direct.o
-
-c-pci-y                                := fixup.o
-c-pci-$(CONFIG_ACPI_PCI)       += acpi.o
-c-pci-y                                += legacy.o
-pci-y                          += irq.o
-
-c-pci-$(CONFIG_X86_VISWS)      := visws.o fixup.o
-pci-$(CONFIG_X86_VISWS)                :=
-c-pci-$(CONFIG_X86_NUMAQ)      := numa.o
-pci-$(CONFIG_X86_NUMAQ)                := irq.o
-
-obj-y                          += $(pci-y)
-c-obj-y                                += $(c-pci-y) common.o
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-       @ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
-
-obj-y  += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/direct.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/direct.c
deleted file mode 100644 (file)
index 6612213..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * direct.c - Low-level direct PCI config space access
- */
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include "pci.h"
-
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-#include <asm/hypervisor-ifs/physdev.h>
-
-/*
- * Functions for accessing PCI configuration space with type xen accesses
- */
-
-static int pci_conf_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
-{
-       unsigned long flags;
-       physdev_op_t op;
-       int ret;
-
-       if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
-               return -EINVAL;
-
-       spin_lock_irqsave(&pci_config_lock, flags);
-
-       op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
-       op.u.pci_cfgreg_read.bus  = bus;
-       op.u.pci_cfgreg_read.dev  = (devfn & ~0x7) >> 3;
-       op.u.pci_cfgreg_read.func = devfn & 0x7;
-       op.u.pci_cfgreg_read.reg  = reg;
-       op.u.pci_cfgreg_read.len  = len;
-
-       ret = HYPERVISOR_physdev_op(&op);
-       if (ret == 0)
-               *value = op.u.pci_cfgreg_read.value;
-
-       spin_unlock_irqrestore(&pci_config_lock, flags);
-
-       return ret;
-}
-
-static int pci_conf_write (int seg, int bus, int devfn, int reg, int len, u32 value)
-{
-       unsigned long flags;
-       physdev_op_t op;
-       int ret;
-
-       if ((bus > 255) || (devfn > 255) || (reg > 255)) 
-               return -EINVAL;
-
-       spin_lock_irqsave(&pci_config_lock, flags);
-
-       op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
-       op.u.pci_cfgreg_write.bus   = bus;
-       op.u.pci_cfgreg_write.dev   = (devfn & ~0x7) >> 3;
-       op.u.pci_cfgreg_write.func  = devfn & 0x7;
-       op.u.pci_cfgreg_write.reg   = reg;
-       op.u.pci_cfgreg_write.len   = len;
-       op.u.pci_cfgreg_write.value = value;
-
-       ret = HYPERVISOR_physdev_op(&op);
-
-       spin_unlock_irqrestore(&pci_config_lock, flags);
-
-       return ret;
-}
-
-struct pci_raw_ops pci_direct_xen = {
-       .read =         pci_conf_read,
-       .write =        pci_conf_write,
-};
-
-
-static int __init pci_direct_init(void)
-{
-
-        printk(KERN_INFO "PCI: Using configuration type Xen\n");
-        raw_pci_ops = &pci_direct_xen;
-        return 0;
-}
-
-arch_initcall(pci_direct_init);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/irq.c b/linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/irq.c
deleted file mode 100644 (file)
index 7dc8dfb..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *     Low-Level PCI Support for PC -- Routing of Interrupts
- *
- *     (c) 1999--2000 Martin Mares <mj@ucw.cz>
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/io_apic.h>
-#include <asm/hw_irq.h>
-
-#include "pci.h"
-
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-#include <asm/hypervisor-ifs/physdev.h>
-
-/*
- * Never use: 0, 1, 2 (timer, keyboard, and cascade)
- * Avoid using: 13, 14 and 15 (FP error and IDE).
- * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
- */
-unsigned int pcibios_irq_mask = 0xfff8;
-
-static int pirq_penalty[16] = {
-       1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
-       0, 0, 0, 0, 1000, 100000, 100000, 100000
-};
-
-int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
-
-
-static int __init pcibios_irq_init(void)
-{
-       int bus;
-       physdev_op_t op;
-
-       DBG("PCI: IRQ init\n");
-
-       if (pcibios_enable_irq || raw_pci_ops == NULL)
-               return 0;
-
-       op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
-       if (HYPERVISOR_physdev_op(&op) != 0) {
-               printk(KERN_WARNING "PCI: System does not support PCI\n");
-               return 0;
-       }
-
-       printk(KERN_INFO "PCI: Probing PCI hardware\n");
-       for (bus = 0; bus < 256; bus++)
-               if (test_bit(bus, (unsigned long *)
-                       &op.u.pci_probe_root_buses.busmask[0]))
-                       (void)pcibios_scan_root(bus);
-
-       pcibios_enable_irq = pirq_enable_irq;
-
-       return 0;
-}
-
-subsys_initcall(pcibios_irq_init);
-
-
-void pcibios_penalize_isa_irq(int irq)
-{
-       /*
-        *  If any ISAPnP device reports an IRQ in its list of possible
-        *  IRQ's, we try to avoid assigning it to PCI devices.
-        */
-       pirq_penalty[irq] += 100;
-}
-
-int pirq_enable_irq(struct pci_dev *dev)
-{
-       int err;
-       u8  pin;
-       physdev_op_t op;
-
-       /* Inform Xen that we are going to use this device. */
-       op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
-       op.u.pci_initialise_device.bus  = dev->bus->number;
-       op.u.pci_initialise_device.dev  = PCI_SLOT(dev->devfn);
-       op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
-       if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
-               return err;
-
-       /* Now we can bind to the very final IRQ line. */
-       pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
-       dev->irq = pin;
-
-       /* Sanity-check that an interrupt-producing device is routed
-        * to an IRQ. */
-       pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-       if (pin != 0) {
-               if (dev->irq != 0)
-                       printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
-                           dev->irq, dev->slot_name);
-               else
-                       printk(KERN_WARNING "PCI: No IRQ known for interrupt "
-                           "pin %c of device %s.\n", 'A' + pin - 1,
-                           dev->slot_name);
-       }
-
-       return 0;
-}
-
-int pci_vector_resources(int last, int nr_released)
-{
-       int count = nr_released;
-
-       int next = last;
-       int offset = (last % 8);
-
-       while (next < FIRST_SYSTEM_VECTOR) {
-               next += 8;
-#ifdef CONFIG_X86_64
-               if (next == IA32_SYSCALL_VECTOR)
-                       continue;
-#else
-               if (next == SYSCALL_VECTOR)
-                       continue;
-#endif
-               count++;
-               if (next >= FIRST_SYSTEM_VECTOR) {
-                       if (offset%8) {
-                               next = FIRST_DEVICE_VECTOR + offset;
-                               offset++;
-                               continue;
-                       }
-                       count--;
-               }
-       }
-
-       return count;
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/Makefile b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/Makefile
deleted file mode 100644 (file)
index 38fabf7..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-XENARCH        := $(subst ",,$(CONFIG_XENARCH))
-
-$(obj)/vmlinux.lds.s:
-       @ln -fsn $(srctree)/arch/$(ARCH)/$(XENARCH)/kernel/vmlinux.lds.s $@
-
-extra-y += vmlinux.lds.s
-
-obj-y  := ctrl_if.o evtchn.o fixup.o process.o reboot.o xen_proc.o empty.o \
-           gnttab.o skbuff.o
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/ctrl_if.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/ctrl_if.c
deleted file mode 100644 (file)
index 78195fa..0000000
+++ /dev/null
@@ -1,543 +0,0 @@
-/******************************************************************************
- * ctrl_if.c
- * 
- * Management functions for special interface to the domain controller.
- * 
- * Copyright (c) 2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/evtchn.h>
-
-#if 0
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
-                           __FILE__ , __LINE__ , ## _a )
-#else
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-/*
- * Only used by initial domain which must create its own control-interface
- * event channel. This value is picked up by the user-space domain controller
- * via an ioctl.
- */
-int initdom_ctrlif_domcontroller_port = -1;
-
-static int        ctrl_if_evtchn;
-static int        ctrl_if_irq;
-static spinlock_t ctrl_if_lock;
-
-static struct irqaction ctrl_if_irq_action;
-
-static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
-static CONTROL_RING_IDX ctrl_if_rx_req_cons;
-
-/* Incoming message requests. */
-    /* Primary message type -> message handler. */
-static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
-    /* Primary message type -> callback in process context? */
-static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
-    /* Is it late enough during bootstrap to use schedule_task()? */
-static int safe_to_schedule_task;
-    /* Queue up messages to be handled in process context. */
-static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
-static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
-static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
-
-/* Incoming message responses: message identifier -> message handler/id. */
-static struct {
-    ctrl_msg_handler_t fn;
-    unsigned long      id;
-} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
-
-/* For received messages that must be deferred to process context. */
-static void __ctrl_if_rxmsg_deferred(void *unused);
-static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
-                    __ctrl_if_rxmsg_deferred,
-                    NULL);
-
-/* Deferred callbacks for people waiting for space in the transmit ring. */
-static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
-
-static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
-static void __ctrl_if_tx_tasklet(unsigned long data);
-static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
-
-static void __ctrl_if_rx_tasklet(unsigned long data);
-static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
-
-#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
-#define TX_FULL(_c)   \
-    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
-
-static void ctrl_if_notify_controller(void)
-{
-    notify_via_evtchn(ctrl_if_evtchn);
-}
-
-static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
-{
-    msg->length = 0;
-    ctrl_if_send_response(msg);
-}
-
-static void __ctrl_if_tx_tasklet(unsigned long data)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-    ctrl_msg_t   *msg;
-    int           was_full = TX_FULL(ctrl_if);
-    CONTROL_RING_IDX rp;
-
-    rp = ctrl_if->tx_resp_prod;
-    rmb(); /* Ensure we see all requests up to 'rp'. */
-
-    while ( ctrl_if_tx_resp_cons != rp )
-    {
-        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
-
-        DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", 
-                ctrl_if_tx_resp_cons,
-                ctrl_if->tx_resp_prod,
-                msg->type, msg->subtype);
-
-        /* Execute the callback handler, if one was specified. */
-        if ( msg->id != 0xFF )
-        {
-            (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
-                msg, ctrl_if_txmsg_id_mapping[msg->id].id);
-            smp_mb(); /* Execute, /then/ free. */
-            ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
-        }
-
-        /*
-         * Step over the message in the ring /after/ finishing reading it. As 
-         * soon as the index is updated then the message may get blown away.
-         */
-        smp_mb();
-        ctrl_if_tx_resp_cons++;
-    }
-
-    if ( was_full && !TX_FULL(ctrl_if) )
-    {
-        wake_up(&ctrl_if_tx_wait);
-        run_task_queue(&ctrl_if_tx_tq);
-    }
-}
-
-static void __ctrl_if_rxmsg_deferred(void *unused)
-{
-    ctrl_msg_t *msg;
-    CONTROL_RING_IDX dp;
-
-    dp = ctrl_if_rxmsg_deferred_prod;
-    rmb(); /* Ensure we see all deferred requests up to 'dp'. */
-
-    while ( ctrl_if_rxmsg_deferred_cons != dp )
-    {
-        msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
-            ctrl_if_rxmsg_deferred_cons++)];
-        (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
-    }
-}
-
-static void __ctrl_if_rx_tasklet(unsigned long data)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-    ctrl_msg_t    msg, *pmsg;
-    CONTROL_RING_IDX rp, dp;
-
-    dp = ctrl_if_rxmsg_deferred_prod;
-    rp = ctrl_if->rx_req_prod;
-    rmb(); /* Ensure we see all requests up to 'rp'. */
-
-    while ( ctrl_if_rx_req_cons != rp )
-    {
-        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
-        memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
-
-        DPRINTK("Rx-Req %u/%u :: %d/%d\n", 
-                ctrl_if_rx_req_cons-1,
-                ctrl_if->rx_req_prod,
-                msg.type, msg.subtype);
-
-        if ( msg.length != 0 )
-            memcpy(msg.msg, pmsg->msg, msg.length);
-
-        if ( test_bit(msg.type, 
-                      (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
-            memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
-                   &msg, offsetof(ctrl_msg_t, msg) + msg.length);
-        else
-            (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
-    }
-
-    if ( dp != ctrl_if_rxmsg_deferred_prod )
-    {
-        wmb();
-        ctrl_if_rxmsg_deferred_prod = dp;
-        schedule_work(&ctrl_if_rxmsg_deferred_work);
-    }
-}
-
-static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
-                                     struct pt_regs *regs)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-
-    if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
-        tasklet_schedule(&ctrl_if_tx_tasklet);
-
-    if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
-        tasklet_schedule(&ctrl_if_rx_tasklet);
-
-    return IRQ_HANDLED;
-}
-
-int
-ctrl_if_send_message_noblock(
-    ctrl_msg_t *msg, 
-    ctrl_msg_handler_t hnd,
-    unsigned long id)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-    unsigned long flags;
-    int           i;
-
-    spin_lock_irqsave(&ctrl_if_lock, flags);
-
-    if ( TX_FULL(ctrl_if) )
-    {
-        spin_unlock_irqrestore(&ctrl_if_lock, flags);
-        return -EAGAIN;
-    }
-
-    msg->id = 0xFF;
-    if ( hnd != NULL )
-    {
-        for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
-            continue;
-        ctrl_if_txmsg_id_mapping[i].fn = hnd;
-        ctrl_if_txmsg_id_mapping[i].id = id;
-        msg->id = i;
-    }
-
-    DPRINTK("Tx-Req %u/%u :: %d/%d\n", 
-            ctrl_if->tx_req_prod, 
-            ctrl_if_tx_resp_cons,
-            msg->type, msg->subtype);
-
-    memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
-           msg, sizeof(*msg));
-    wmb(); /* Write the message before letting the controller peek at it. */
-    ctrl_if->tx_req_prod++;
-
-    spin_unlock_irqrestore(&ctrl_if_lock, flags);
-
-    ctrl_if_notify_controller();
-
-    return 0;
-}
-
-int
-ctrl_if_send_message_block(
-    ctrl_msg_t *msg, 
-    ctrl_msg_handler_t hnd, 
-    unsigned long id,
-    long wait_state)
-{
-    DECLARE_WAITQUEUE(wait, current);
-    int rc;
-
-    /* Fast path. */
-    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
-        return rc;
-
-    add_wait_queue(&ctrl_if_tx_wait, &wait);
-
-    for ( ; ; )
-    {
-        set_current_state(wait_state);
-
-        if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
-            break;
-
-        rc = -ERESTARTSYS;
-        if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
-            break;
-
-        schedule();
-    }
-
-    set_current_state(TASK_RUNNING);
-    remove_wait_queue(&ctrl_if_tx_wait, &wait);
-
-    return rc;
-}
-
-/* Allow a reponse-callback handler to find context of a blocked requester.  */
-struct rsp_wait {
-    ctrl_msg_t         *msg;  /* Buffer for the response message.            */
-    struct task_struct *task; /* The task that is blocked on the response.   */
-    int                 done; /* Indicate to 'task' that response is rcv'ed. */
-};
-
-static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
-{
-    struct rsp_wait    *wait = (struct rsp_wait *)id;
-    struct task_struct *task = wait->task;
-
-    memcpy(wait->msg, msg, sizeof(*msg));
-    wmb();
-    wait->done = 1;
-
-    wake_up_process(task);
-}
-
-int
-ctrl_if_send_message_and_get_response(
-    ctrl_msg_t *msg, 
-    ctrl_msg_t *rmsg,
-    long wait_state)
-{
-    struct rsp_wait wait;
-    int rc;
-
-    wait.msg  = rmsg;
-    wait.done = 0;
-    wait.task = current;
-
-    if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
-                                          (unsigned long)&wait,
-                                          wait_state)) != 0 )
-        return rc;
-
-    for ( ; ; )
-    {
-        /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
-        set_current_state(TASK_UNINTERRUPTIBLE);
-        if ( wait.done )
-            break;
-        schedule();
-    }
-
-    set_current_state(TASK_RUNNING);
-    return 0;
-}
-
-int
-ctrl_if_enqueue_space_callback(
-    struct tq_struct *task)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-
-    /* Fast path. */
-    if ( !TX_FULL(ctrl_if) )
-        return 0;
-
-    (void)queue_task(task, &ctrl_if_tx_tq);
-
-    /*
-     * We may race execution of the task queue, so return re-checked status. If
-     * the task is not executed despite the ring being non-full then we will
-     * certainly return 'not full'.
-     */
-    smp_mb();
-    return TX_FULL(ctrl_if);
-}
-
-void
-ctrl_if_send_response(
-    ctrl_msg_t *msg)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-    unsigned long flags;
-    ctrl_msg_t   *dmsg;
-
-    /*
-     * NB. The response may the original request message, modified in-place.
-     * In this situation we may have src==dst, so no copying is required.
-     */
-    spin_lock_irqsave(&ctrl_if_lock, flags);
-
-    DPRINTK("Tx-Rsp %u :: %d/%d\n", 
-            ctrl_if->rx_resp_prod, 
-            msg->type, msg->subtype);
-
-    dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
-    if ( dmsg != msg )
-        memcpy(dmsg, msg, sizeof(*msg));
-
-    wmb(); /* Write the message before letting the controller peek at it. */
-    ctrl_if->rx_resp_prod++;
-
-    spin_unlock_irqrestore(&ctrl_if_lock, flags);
-
-    ctrl_if_notify_controller();
-}
-
-int
-ctrl_if_register_receiver(
-    u8 type, 
-    ctrl_msg_handler_t hnd, 
-    unsigned int flags)
-{
-    unsigned long _flags;
-    int inuse;
-
-    spin_lock_irqsave(&ctrl_if_lock, _flags);
-
-    inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
-
-    if ( inuse )
-    {
-        printk(KERN_INFO "Receiver %p already established for control "
-               "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
-    }
-    else
-    {
-        ctrl_if_rxmsg_handler[type] = hnd;
-        clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
-        if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
-        {
-            set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
-            if ( !safe_to_schedule_task )
-                BUG();
-        }
-    }
-
-    spin_unlock_irqrestore(&ctrl_if_lock, _flags);
-
-    return !inuse;
-}
-
-void 
-ctrl_if_unregister_receiver(
-    u8 type,
-    ctrl_msg_handler_t hnd)
-{
-    unsigned long flags;
-
-    spin_lock_irqsave(&ctrl_if_lock, flags);
-
-    if ( ctrl_if_rxmsg_handler[type] != hnd )
-        printk(KERN_INFO "Receiver %p is not registered for control "
-               "messages of type %d.\n", hnd, type);
-    else
-        ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
-
-    spin_unlock_irqrestore(&ctrl_if_lock, flags);
-
-    /* Ensure that @hnd will not be executed after this function returns. */
-    tasklet_unlock_wait(&ctrl_if_rx_tasklet);
-}
-
-void ctrl_if_suspend(void)
-{
-    free_irq(ctrl_if_irq, NULL);
-    unbind_evtchn_from_irq(ctrl_if_evtchn);
-}
-
-void ctrl_if_resume(void)
-{
-    control_if_t *ctrl_if = get_ctrl_if();
-
-    if ( xen_start_info.flags & SIF_INITDOMAIN )
-    {
-        /*
-         * The initial domain must create its own domain-controller link.
-         * The controller is probably not running at this point, but will
-         * pick up its end of the event channel from 
-         */
-        evtchn_op_t op;
-        op.cmd = EVTCHNOP_bind_interdomain;
-        op.u.bind_interdomain.dom1 = DOMID_SELF;
-        op.u.bind_interdomain.dom2 = DOMID_SELF;
-        op.u.bind_interdomain.port1 = 0;
-        op.u.bind_interdomain.port2 = 0;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            BUG();
-        xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
-        initdom_ctrlif_domcontroller_port   = op.u.bind_interdomain.port2;
-    }
-
-    /* Sync up with shared indexes. */
-    ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
-    ctrl_if_rx_req_cons  = ctrl_if->rx_resp_prod;
-
-    ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
-    ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn);
-
-#define SA_STATIC_ACTION 0x01000000 /* so that free_irq() doesn't do kfree() */
-    memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
-    ctrl_if_irq_action.handler = ctrl_if_interrupt;
-    ctrl_if_irq_action.name    = "ctrl-if";
-    ctrl_if_irq_action.flags   = SA_STATIC_ACTION;
-    (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
-}
-
-void __init ctrl_if_init(void)
-{
-        int i;
-
-    for ( i = 0; i < 256; i++ )
-        ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
-
-    spin_lock_init(&ctrl_if_lock);
-
-    ctrl_if_resume();
-}
-
-
-/* This is called after it is safe to call schedule_task(). */
-static int __init ctrl_if_late_setup(void)
-{
-    safe_to_schedule_task = 1;
-    return 0;
-}
-__initcall(ctrl_if_late_setup);
-
-
-/*
- * !! The following are DANGEROUS FUNCTIONS !!
- * Use with care [for example, see xencons_force_flush()].
- */
-
-int ctrl_if_transmitter_empty(void)
-{
-    return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
-}
-
-void ctrl_if_discard_responses(void)
-{
-    ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
-}
-
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/empty.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/empty.c
deleted file mode 100644 (file)
index 32f03e7..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-
-#include <linux/string.h>
-#include <asm-xen/hypervisor.h>
-
-#if 0
-static __inline__ int HYPERVISOR_console_write(const char *str, int count)
-{
-       int ret;
-       __asm__ __volatile__ (
-               TRAP_INSTR
-               : "=a" (ret) : "0" (__HYPERVISOR_console_write), 
-               "b" (str), "c" (count) : "memory" );
-
-
-       return ret;
-}
-#endif
-
-#if 01
-void
-xen_puts(const char *str)
-{
-
-       (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(str), (char *)str);
-}
-
-asmlinkage int CLPRINTK(const char *fmt, ...)
-{
-       va_list args;
-       int printk_len;
-       static char printk_buf[1024+1];
-    
-       /* Emit the output into the temporary buffer */
-       va_start(args, fmt);
-       printk_len = vsnprintf(printk_buf, sizeof(printk_buf)-1, fmt, args);
-       va_end(args);
-
-       printk_buf[printk_len] = 0;
-       /* Send the processed output directly to Xen. */
-       (void)HYPERVISOR_console_io(CONSOLEIO_write, printk_len, printk_buf);
-
-       return 0;
-}
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/evtchn.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/evtchn.c
deleted file mode 100644 (file)
index b779ac1..0000000
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- * evtchn.c
- * 
- * Communication via Xen event channels.
- * 
- * Copyright (c) 2002-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/version.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <asm/synch_bitops.h>
-#include <asm/hypervisor-ifs/event_channel.h>
-#include <asm/hypervisor-ifs/physdev.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-EXPORT_SYMBOL(force_evtchn_callback);
-EXPORT_SYMBOL(evtchn_do_upcall);
-#endif
-
-/*
- * This lock protects updates to the following mapping and reference-count
- * arrays. The lock does not need to be acquired to read the mapping tables.
- */
-static spinlock_t irq_mapping_update_lock;
-
-/* IRQ <-> event-channel mappings. */
-static int evtchn_to_irq[NR_EVENT_CHANNELS];
-static int irq_to_evtchn[NR_IRQS];
-
-/* IRQ <-> VIRQ mapping. */
-static int virq_to_irq[NR_VIRQS];
-
-/* Reference counts for bindings to IRQs. */
-static int irq_bindcount[NR_IRQS];
-
-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
-
-/* Upcall to generic IRQ layer. */
-extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
-
-#define VALID_EVTCHN(_chn) ((_chn) != -1)
-
-/*
- * Force a proper event-channel callback from Xen after clearing the
- * callback mask. We do this in a very simple manner, by making a call
- * down into Xen. The pending flag will be checked by Xen on return.
- */
-void force_evtchn_callback(void)
-{
-    (void)HYPERVISOR_xen_version(0);
-}
-
-void evtchn_do_upcall(struct pt_regs *regs)
-{
-    unsigned long  l1, l2;
-    unsigned int   l1i, l2i, port;
-    int            irq;
-    unsigned long  flags;
-    shared_info_t *s = HYPERVISOR_shared_info;
-
-    local_irq_save(flags);
-    
-    while ( s->vcpu_data[0].evtchn_upcall_pending )
-    {
-        s->vcpu_data[0].evtchn_upcall_pending = 0;
-        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-        l1 = xchg(&s->evtchn_pending_sel, 0);
-        while ( (l1i = ffs(l1)) != 0 )
-        {
-            l1i--;
-            l1 &= ~(1 << l1i);
-        
-            l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
-            while ( (l2i = ffs(l2)) != 0 )
-            {
-                l2i--;
-                l2 &= ~(1 << l2i);
-            
-                port = (l1i << 5) + l2i;
-                if ( (irq = evtchn_to_irq[port]) != -1 )
-                    do_IRQ(irq, regs);
-                else
-                    evtchn_device_upcall(port);
-            }
-        }
-    }
-
-    local_irq_restore(flags);
-}
-
-static int find_unbound_irq(void)
-{
-    int irq;
-
-    for ( irq = 0; irq < NR_IRQS; irq++ )
-        if ( irq_bindcount[irq] == 0 )
-            break;
-
-    if ( irq == NR_IRQS )
-        panic("No available IRQ to bind to: increase NR_IRQS!\n");
-
-    return irq;
-}
-
-int bind_virq_to_irq(int virq)
-{
-    evtchn_op_t op;
-    int evtchn, irq;
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( (irq = virq_to_irq[virq]) == -1 )
-    {
-        op.cmd              = EVTCHNOP_bind_virq;
-        op.u.bind_virq.virq = virq;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to bind virtual IRQ %d\n", virq);
-        evtchn = op.u.bind_virq.port;
-
-        irq = find_unbound_irq();
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-
-        virq_to_irq[virq] = irq;
-    }
-
-    irq_bindcount[irq]++;
-
-    spin_unlock(&irq_mapping_update_lock);
-    
-    return irq;
-}
-
-void unbind_virq_from_irq(int virq)
-{
-    evtchn_op_t op;
-    int irq    = virq_to_irq[virq];
-    int evtchn = irq_to_evtchn[irq];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( --irq_bindcount[irq] == 0 )
-    {
-        op.cmd          = EVTCHNOP_close;
-        op.u.close.dom  = DOMID_SELF;
-        op.u.close.port = evtchn;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to unbind virtual IRQ %d\n", virq);
-
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-        virq_to_irq[virq]     = -1;
-    }
-
-    spin_unlock(&irq_mapping_update_lock);
-}
-
-int bind_evtchn_to_irq(int evtchn)
-{
-    int irq;
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
-    {
-        irq = find_unbound_irq();
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-    }
-
-    irq_bindcount[irq]++;
-
-    spin_unlock(&irq_mapping_update_lock);
-    
-    return irq;
-}
-
-void unbind_evtchn_from_irq(int evtchn)
-{
-    int irq = evtchn_to_irq[evtchn];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    if ( --irq_bindcount[irq] == 0 )
-    {
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-    }
-
-    spin_unlock(&irq_mapping_update_lock);
-}
-
-
-/*
- * Interface to generic handling in irq.c
- */
-
-static unsigned int startup_dynirq(unsigned int irq)
-{
-    unmask_evtchn(irq_to_evtchn[irq]);
-    return 0;
-}
-
-static void shutdown_dynirq(unsigned int irq)
-{
-    mask_evtchn(irq_to_evtchn[irq]);
-}
-
-static void enable_dynirq(unsigned int irq)
-{
-    unmask_evtchn(irq_to_evtchn[irq]);
-}
-
-static void disable_dynirq(unsigned int irq)
-{
-    mask_evtchn(irq_to_evtchn[irq]);
-}
-
-static void ack_dynirq(unsigned int irq)
-{
-    mask_evtchn(irq_to_evtchn[irq]);
-    clear_evtchn(irq_to_evtchn[irq]);
-}
-
-static void end_dynirq(unsigned int irq)
-{
-    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-        unmask_evtchn(irq_to_evtchn[irq]);
-}
-
-static struct hw_interrupt_type dynirq_type = {
-    "Dynamic-irq",
-    startup_dynirq,
-    shutdown_dynirq,
-    enable_dynirq,
-    disable_dynirq,
-    ack_dynirq,
-    end_dynirq,
-    NULL
-};
-
-static inline void pirq_unmask_notify(int pirq)
-{
-    physdev_op_t op;
-    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
-    {
-        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
-        (void)HYPERVISOR_physdev_op(&op);
-    }
-}
-
-static inline void pirq_query_unmask(int pirq)
-{
-    physdev_op_t op;
-    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
-    op.u.irq_status_query.irq = pirq;
-    (void)HYPERVISOR_physdev_op(&op);
-    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
-    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
-        set_bit(pirq, &pirq_needs_unmask_notify[0]);
-}
-
-/*
- * On startup, if there is no action associated with the IRQ then we are
- * probing. In this case we should not share with others as it will confuse us.
- */
-#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
-
-static unsigned int startup_pirq(unsigned int irq)
-{
-    evtchn_op_t op;
-    int evtchn;
-
-    op.cmd               = EVTCHNOP_bind_pirq;
-    op.u.bind_pirq.pirq  = irq;
-    /* NB. We are happy to share unless we are probing. */
-    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-    {
-        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
-            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
-        return 0;
-    }
-    evtchn = op.u.bind_pirq.port;
-
-    pirq_query_unmask(irq_to_pirq(irq));
-
-    evtchn_to_irq[evtchn] = irq;
-    irq_to_evtchn[irq]    = evtchn;
-
-    unmask_evtchn(evtchn);
-    pirq_unmask_notify(irq_to_pirq(irq));
-
-    return 0;
-}
-
-static void shutdown_pirq(unsigned int irq)
-{
-    evtchn_op_t op;
-    int evtchn = irq_to_evtchn[irq];
-
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-
-    mask_evtchn(evtchn);
-
-    op.cmd          = EVTCHNOP_close;
-    op.u.close.dom  = DOMID_SELF;
-    op.u.close.port = evtchn;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-        panic("Failed to unbind physical IRQ %d\n", irq);
-
-    evtchn_to_irq[evtchn] = -1;
-    irq_to_evtchn[irq]    = -1;
-}
-
-static void enable_pirq(unsigned int irq)
-{
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    unmask_evtchn(evtchn);
-    pirq_unmask_notify(irq_to_pirq(irq));
-}
-
-static void disable_pirq(unsigned int irq)
-{
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
-}
-
-static void ack_pirq(unsigned int irq)
-{
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
-    clear_evtchn(evtchn);
-}
-
-static void end_pirq(unsigned int irq)
-{
-    int evtchn = irq_to_evtchn[irq];
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-    {
-        unmask_evtchn(evtchn);
-        pirq_unmask_notify(irq_to_pirq(irq));
-    }
-}
-
-static struct hw_interrupt_type pirq_type = {
-    "Phys-irq",
-    startup_pirq,
-    shutdown_pirq,
-    enable_pirq,
-    disable_pirq,
-    ack_pirq,
-    end_pirq,
-    NULL
-};
-
-static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
-                                       struct pt_regs *regs)
-{
-    /* nothing */
-    return IRQ_HANDLED;
-}
-
-static struct irqaction misdirect_action = {
-    misdirect_interrupt, 
-    SA_INTERRUPT, 
-    CPU_MASK_NONE, 
-    "misdirect", 
-    NULL, 
-    NULL
-};
-
-void irq_suspend(void)
-{
-    int pirq, virq, irq, evtchn;
-
-    /* Unbind VIRQs from event channels. */
-    for ( virq = 0; virq < NR_VIRQS; virq++ )
-    {
-        if ( (irq = virq_to_irq[virq]) == -1 )
-            continue;
-        evtchn = irq_to_evtchn[irq];
-
-        /* Mark the event channel as unused in our table. */
-        evtchn_to_irq[evtchn] = -1;
-        irq_to_evtchn[irq]    = -1;
-    }
-
-    /* Check that no PIRQs are still bound. */
-    for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
-        if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
-            panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
-                  pirq, evtchn);
-}
-
-void irq_resume(void)
-{
-    evtchn_op_t op;
-    int         virq, irq, evtchn;
-
-    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
-        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
-
-    for ( virq = 0; virq < NR_VIRQS; virq++ )
-    {
-        if ( (irq = virq_to_irq[virq]) == -1 )
-            continue;
-
-        /* Get a new binding from Xen. */
-        op.cmd              = EVTCHNOP_bind_virq;
-        op.u.bind_virq.virq = virq;
-        if ( HYPERVISOR_event_channel_op(&op) != 0 )
-            panic("Failed to bind virtual IRQ %d\n", virq);
-        evtchn = op.u.bind_virq.port;
-        
-        /* Record the new mapping. */
-        evtchn_to_irq[evtchn] = irq;
-        irq_to_evtchn[irq]    = evtchn;
-
-        /* Ready for use. */
-        unmask_evtchn(evtchn);
-    }
-}
-
-void __init init_IRQ(void)
-{
-    int i;
-
-    spin_lock_init(&irq_mapping_update_lock);
-
-    /* No VIRQ -> IRQ mappings. */
-    for ( i = 0; i < NR_VIRQS; i++ )
-        virq_to_irq[i] = -1;
-
-    /* No event-channel -> IRQ mappings. */
-    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
-    {
-        evtchn_to_irq[i] = -1;
-        mask_evtchn(i); /* No event channels are 'live' right now. */
-    }
-
-    /* No IRQ -> event-channel mappings. */
-    for ( i = 0; i < NR_IRQS; i++ )
-        irq_to_evtchn[i] = -1;
-
-    for ( i = 0; i < NR_DYNIRQS; i++ )
-    {
-        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-        irq_bindcount[dynirq_to_irq(i)] = 0;
-
-        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
-        irq_desc[dynirq_to_irq(i)].action  = 0;
-        irq_desc[dynirq_to_irq(i)].depth   = 1;
-        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
-    }
-
-    for ( i = 0; i < NR_PIRQS; i++ )
-    {
-        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-        irq_bindcount[pirq_to_irq(i)] = 1;
-
-        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
-        irq_desc[pirq_to_irq(i)].action  = 0;
-        irq_desc[pirq_to_irq(i)].depth   = 1;
-        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
-    }
-
-    (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
-
-    /* This needs to be done early, but after the IRQ subsystem is alive. */
-    ctrl_if_init();
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/fixup.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/fixup.c
deleted file mode 100644 (file)
index c6a9044..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/******************************************************************************
- * fixup.c
- * 
- * Binary-rewriting of certain IA32 instructions, on notification by Xen.
- * Used to avoid repeated slow emulation of common instructions used by the
- * user-space TLS (Thread-Local Storage) libraries.
- * 
- * **** NOTE ****
- *  Issues with the binary rewriting have caused it to be removed. Instead
- *  we rely on Xen's emulator to boot the kernel, and then print a banner
- *  message recommending that the user disables /lib/tls.
- * 
- * Copyright (c) 2004, K A Fraser
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-
-#define DP(_f) printk(KERN_ALERT "  " _f "\n")
-
-asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-{
-    static unsigned long printed = 0;
-    int i;
-
-    if ( !test_and_set_bit(0, &printed) )
-    {
-        HYPERVISOR_vm_assist(VMASST_CMD_disable,
-                             VMASST_TYPE_4gb_segments_notify);
-
-        DP("");
-        DP("***************************************************************");
-        DP("***************************************************************");
-        DP("** WARNING: Currently emulating unsupported memory accesses  **");
-        DP("**          in /lib/tls libraries. The emulation is very     **");
-        DP("**          slow, and may not work correctly with all        **");
-        DP("**          programs (e.g., some may 'Segmentation fault').  **");
-        DP("**          TO ENSURE FULL PERFORMANCE AND CORRECT FUNCTION, **");
-        DP("**          YOU MUST EXECUTE THE FOLLOWING AS ROOT:          **");
-        DP("**          mv /lib/tls /lib/tls.disabled                    **");
-        DP("***************************************************************");
-        DP("***************************************************************");
-        DP("");
-
-        for ( i = 5; i > 0; i-- )
-        {
-            printk("Pausing... %d", i);
-            mdelay(1000);
-            printk("\b\b\b\b\b\b\b\b\b\b\b\b");
-        }
-        printk("Continuing...\n\n");
-    }
-}
-
-static int __init fixup_init(void)
-{
-    HYPERVISOR_vm_assist(VMASST_CMD_enable,
-                         VMASST_TYPE_4gb_segments_notify);
-    return 0;
-}
-__initcall(fixup_init);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/gnttab.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/gnttab.c
deleted file mode 100644 (file)
index af892ae..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-/******************************************************************************
- * gnttab.c
- * 
- * Two sets of functionality:
- * 1. Granting foreign access to our memory reservation.
- * 2. Accessing others' memory reservations via grant references.
- * (i.e., mechanisms for both sender and recipient of grant references)
- * 
- * Copyright (c) 2004, K A Fraser
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <asm/pgtable.h>
-#include <asm/fixmap.h>
-#include <asm-xen/gnttab.h>
-
-#ifndef set_fixmap_ma
-#define set_fixmap_ma set_fixmap
-#endif
-
-#if 1
-#define ASSERT(_p) \
-    if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
-    #_p , __LINE__, __FILE__); *(int*)0=0; }
-#else
-#define ASSERT(_p) ((void)0)
-#endif
-
-EXPORT_SYMBOL(gnttab_grant_foreign_access);
-EXPORT_SYMBOL(gnttab_end_foreign_access);
-EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
-EXPORT_SYMBOL(gnttab_end_foreign_transfer);
-
-#define NR_GRANT_REFS 512
-static grant_ref_t gnttab_free_list[NR_GRANT_REFS];
-static grant_ref_t gnttab_free_head;
-
-static grant_entry_t *shared;
-
-/*
- * Lock-free grant-entry allocator
- */
-
-static inline int
-get_free_entry(
-    void)
-{
-    grant_ref_t fh, nfh = gnttab_free_head;
-    do { if ( unlikely((fh = nfh) == NR_GRANT_REFS) ) return -1; }
-    while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh,
-                                    gnttab_free_list[fh])) != fh) );
-    return fh;
-}
-
-static inline void
-put_free_entry(
-    grant_ref_t ref)
-{
-    grant_ref_t fh, nfh = gnttab_free_head;
-    do { gnttab_free_list[ref] = fh = nfh; wmb(); }
-    while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh, ref)) != fh) );
-}
-
-/*
- * Public grant-issuing interface functions
- */
-
-int
-gnttab_grant_foreign_access(
-    domid_t domid, unsigned long frame, int readonly)
-{
-    int ref;
-    
-    if ( unlikely((ref = get_free_entry()) == -1) )
-        return -ENOSPC;
-
-    shared[ref].frame = frame;
-    shared[ref].domid = domid;
-    wmb();
-    shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-
-    return ref;
-}
-
-void
-gnttab_end_foreign_access(
-    grant_ref_t ref, int readonly)
-{
-    u16 flags, nflags;
-
-    nflags = shared[ref].flags;
-    do {
-        if ( (flags = nflags) & (GTF_reading|GTF_writing) )
-            printk(KERN_ALERT "WARNING: g.e. still in use!\n");
-    }
-    while ( (nflags = cmpxchg(&shared[ref].flags, flags, 0)) != flags );
-
-    put_free_entry(ref);
-}
-
-int
-gnttab_grant_foreign_transfer(
-    domid_t domid)
-{
-    int ref;
-
-    if ( unlikely((ref = get_free_entry()) == -1) )
-        return -ENOSPC;
-
-    shared[ref].frame = 0;
-    shared[ref].domid = domid;
-    wmb();
-    shared[ref].flags = GTF_accept_transfer;
-
-    return ref;
-}
-
-unsigned long
-gnttab_end_foreign_transfer(
-    grant_ref_t ref)
-{
-    unsigned long frame = 0;
-    u16           flags;
-
-    flags = shared[ref].flags;
-    ASSERT(flags == (GTF_accept_transfer | GTF_transfer_committed));
-
-    /*
-     * If a transfer is committed then wait for the frame address to appear.
-     * Otherwise invalidate the grant entry against future use.
-     */
-    if ( likely(flags != GTF_accept_transfer) ||
-         (cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
-        while ( unlikely((frame = shared[ref].frame) == 0) )
-            cpu_relax();
-
-    put_free_entry(ref);
-
-    return frame;
-}
-
-void __init gnttab_init(void)
-{
-    gnttab_setup_table_t setup;
-    unsigned long        frame;
-    int                  i;
-
-    for ( i = 0; i < NR_GRANT_REFS; i++ )
-        gnttab_free_list[i] = i + 1;
-
-    setup.dom        = DOMID_SELF;
-    setup.nr_frames  = 1;
-    setup.frame_list = &frame;
-    if ( HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0 )
-        BUG();
-    if ( setup.status != 0 )
-        BUG();
-
-    set_fixmap_ma(FIX_GNTTAB, frame << PAGE_SHIFT);
-    shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB);
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/process.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/process.c
deleted file mode 100644 (file)
index 3656196..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#include <stdarg.h>
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/platform.h>
-#include <linux/pm.h>
-#include <linux/rcupdate.h>
-
-extern int set_timeout_timer(void);
-
-void xen_cpu_idle (void)
-{
-       int cpu = smp_processor_id();
-
-       local_irq_disable();
-       if (need_resched() || RCU_curlist(cpu)) {
-               local_irq_enable();
-               return;
-       }
-       if (set_timeout_timer() == 0) {
-               /* NB. Blocking reenable events in a race-free manner. */
-               HYPERVISOR_block();
-               return;
-       }
-       local_irq_enable();
-       HYPERVISOR_yield();
-}
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/reboot.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/reboot.c
deleted file mode 100644 (file)
index 9b9761e..0000000
+++ /dev/null
@@ -1,243 +0,0 @@
-
-#define __KERNEL_SYSCALLS__
-static int errno;
-#include <linux/errno.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/unistd.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/hypervisor-ifs/dom0_ops.h>
-#include <asm-xen/suspend.h>
-#include <asm-xen/queues.h>
-
-void machine_restart(char * __unused)
-{
-       /* We really want to get pending console data out before we die. */
-       extern void xencons_force_flush(void);
-       xencons_force_flush();
-       HYPERVISOR_reboot();
-}
-
-void machine_halt(void)
-{
-       machine_power_off();
-}
-
-void machine_power_off(void)
-{
-       /* We really want to get pending console data out before we die. */
-       extern void xencons_force_flush(void);
-       xencons_force_flush();
-       HYPERVISOR_shutdown();
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-int reboot_thru_bios = 0;      /* for dmi_scan.c */
-EXPORT_SYMBOL(machine_restart);
-EXPORT_SYMBOL(machine_halt);
-EXPORT_SYMBOL(machine_power_off);
-#endif
-
-
-/******************************************************************************
- * Stop/pickle callback handling.
- */
-
-#include <asm/suspend.h>
-
-/* Ignore multiple shutdown requests. */
-static int shutting_down = -1;
-
-static void __do_suspend(void)
-{
-    int i, j;
-    suspend_record_t *suspend_record;
-
-    /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
-       /* XXX SMH: yes it would :-( */ 
-#ifdef CONFIG_XEN_BLKDEV_FRONTEND
-    extern void blkdev_suspend(void);
-    extern void blkdev_resume(void);
-#else
-#define blkdev_suspend() do{}while(0)
-#define blkdev_resume()  do{}while(0)
-#endif
-
-#ifdef CONFIG_XEN_NETDEV_FRONTEND
-    extern void netif_suspend(void);
-    extern void netif_resume(void);  
-#else
-#define netif_suspend() do{}while(0)
-#define netif_resume()  do{}while(0)
-#endif
-
-    extern void time_suspend(void);
-    extern void time_resume(void);
-    extern unsigned long max_pfn;
-    extern unsigned long *pfn_to_mfn_frame_list;
-
-    suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
-    if ( suspend_record == NULL )
-        goto out;
-
-    suspend_record->nr_pfns = max_pfn; /* final number of pfns */
-
-    __cli();
-
-    netif_suspend();
-
-    blkdev_suspend();
-
-    time_suspend();
-
-    ctrl_if_suspend();
-
-    irq_suspend();
-
-    HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-    clear_fixmap(FIX_SHARED_INFO);
-
-    memcpy(&suspend_record->resume_info, &xen_start_info, sizeof(xen_start_info));
-
-    HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
-
-    HYPERVISOR_vm_assist(VMASST_CMD_enable,
-                        VMASST_TYPE_4gb_segments);
-#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
-    HYPERVISOR_vm_assist(VMASST_CMD_enable,
-                        VMASST_TYPE_writable_pagetables);
-#endif
-
-    shutting_down = -1; 
-
-    memcpy(&xen_start_info, &suspend_record->resume_info, sizeof(xen_start_info));
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
-#else
-    set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
-#endif
-
-    HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-
-    memset(empty_zero_page, 0, PAGE_SIZE);
-
-    for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
-    {
-        pfn_to_mfn_frame_list[j] = 
-            virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
-    }
-    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
-        virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
-
-
-    irq_resume();
-
-    ctrl_if_resume();
-
-    time_resume();
-
-    blkdev_resume();
-
-    netif_resume();
-
-    __sti();
-
- out:
-    if ( suspend_record != NULL )
-        free_page((unsigned long)suspend_record);
-}
-
-static int shutdown_process(void *__unused)
-{
-    static char *envp[] = { "HOME=/", "TERM=linux", 
-                            "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-    static char *restart_argv[]  = { "/sbin/shutdown", "-r", "now", NULL };
-    static char *poweroff_argv[] = { "/sbin/halt",     "-p",        NULL };
-
-    extern asmlinkage long sys_reboot(int magic1, int magic2,
-                                      unsigned int cmd, void *arg);
-
-    daemonize(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        "shutdown"
-#endif
-        );
-
-    switch ( shutting_down )
-    {
-    case CMSG_SHUTDOWN_POWEROFF:
-        if ( execve("/sbin/halt", poweroff_argv, envp) < 0 )
-        {
-            sys_reboot(LINUX_REBOOT_MAGIC1,
-                       LINUX_REBOOT_MAGIC2,
-                       LINUX_REBOOT_CMD_POWER_OFF,
-                       NULL);
-        }
-        break;
-
-    case CMSG_SHUTDOWN_REBOOT:
-        if ( execve("/sbin/shutdown", restart_argv, envp) < 0 )
-        {
-            sys_reboot(LINUX_REBOOT_MAGIC1,
-                       LINUX_REBOOT_MAGIC2,
-                       LINUX_REBOOT_CMD_RESTART,
-                       NULL);
-        }
-        break;
-    }
-
-    shutting_down = -1; /* could try again */
-
-    return 0;
-}
-
-static void __shutdown_handler(void *unused)
-{
-    int err;
-
-    if ( shutting_down != CMSG_SHUTDOWN_SUSPEND )
-    {
-        err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
-        if ( err < 0 )
-            printk(KERN_ALERT "Error creating shutdown process!\n");
-    }
-    else
-    {
-        __do_suspend();
-    }
-}
-
-static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
-{
-    static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
-
-    if ( (shutting_down == -1) &&
-         ((msg->subtype == CMSG_SHUTDOWN_POWEROFF) ||
-          (msg->subtype == CMSG_SHUTDOWN_REBOOT) ||
-          (msg->subtype == CMSG_SHUTDOWN_SUSPEND)) )
-    {
-        shutting_down = msg->subtype;
-        schedule_work(&shutdown_work);
-    }
-    else
-    {
-        printk("Ignore spurious shutdown request\n");
-    }
-
-    ctrl_if_send_response(msg);
-}
-
-static int __init setup_shutdown_event(void)
-{
-    ctrl_if_register_receiver(CMSG_SHUTDOWN, shutdown_handler, 0);
-    return 0;
-}
-
-__initcall(setup_shutdown_event);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c
deleted file mode 100644 (file)
index 90274fd..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/page.h>
-
-EXPORT_SYMBOL(__dev_alloc_skb);
-
-/* Referenced in netback.c. */
-/*static*/ kmem_cache_t *skbuff_cachep;
-
-/* Size must be cacheline-aligned (alloc_skb uses SKB_DATA_ALIGN). */
-#define XEN_SKB_SIZE \
-    ((PAGE_SIZE - sizeof(struct skb_shared_info)) & ~(SMP_CACHE_BYTES - 1))
-
-struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
-{
-    struct sk_buff *skb;
-    skb = alloc_skb_from_cache(skbuff_cachep, length + 16, gfp_mask);
-    if ( likely(skb != NULL) )
-        skb_reserve(skb, 16);
-    return skb;
-}
-
-static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
-{
-    scrub_pages(buf, 1);
-}
-
-static int __init skbuff_init(void)
-{
-    skbuff_cachep = kmem_cache_create(
-        "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL);
-    return 0;
-}
-__initcall(skbuff_init);
diff --git a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/xen_proc.c b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/xen_proc.c
deleted file mode 100644 (file)
index 9c06dcd..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-
-static struct proc_dir_entry *xen_base;
-
-struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
-{
-    if ( xen_base == NULL )
-        if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
-            panic("Couldn't create /proc/xen");
-    return create_proc_entry(name, mode, xen_base);
-}
-
-void remove_xen_proc_entry(const char *name)
-{
-    remove_proc_entry(name, xen_base);
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/Makefile b/linux-2.6.8.1-xen-sparse/drivers/Makefile
deleted file mode 100644 (file)
index f21855a..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Makefile for the Linux kernel device drivers.
-#
-# 15 Sep 2000, Christoph Hellwig <hch@infradead.org>
-# Rewritten to use lists instead of if-statements.
-#
-
-obj-$(CONFIG_PCI)              += pci/
-obj-$(CONFIG_PARISC)           += parisc/
-obj-$(CONFIG_ACPI_BOOT)                += acpi/
-# PnP must come after ACPI since it will eventually need to check if acpi
-# was used and do nothing if so
-obj-$(CONFIG_PNP)              += pnp/
-
-# char/ comes before serial/ etc so that the VT console is the boot-time
-# default.
-obj-y                          += char/
-obj-y                          += serial/
-obj-$(CONFIG_PARPORT)          += parport/
-obj-y                          += base/ block/ misc/ net/ media/
-obj-$(CONFIG_NUBUS)            += nubus/
-obj-$(CONFIG_ATM)              += atm/
-obj-$(CONFIG_PPC_PMAC)         += macintosh/
-obj-$(CONFIG_ARCH_XEN)         += xen/
-obj-$(CONFIG_IDE)              += ide/
-obj-$(CONFIG_FC4)              += fc4/
-obj-$(CONFIG_SCSI)             += scsi/
-obj-$(CONFIG_FUSION)           += message/
-obj-$(CONFIG_IEEE1394)         += ieee1394/
-obj-y                          += cdrom/ video/
-obj-$(CONFIG_MTD)              += mtd/
-obj-$(CONFIG_PCMCIA)           += pcmcia/
-obj-$(CONFIG_DIO)              += dio/
-obj-$(CONFIG_SBUS)             += sbus/
-obj-$(CONFIG_ZORRO)            += zorro/
-obj-$(CONFIG_MAC)              += macintosh/
-obj-$(CONFIG_PARIDE)           += block/paride/
-obj-$(CONFIG_TC)               += tc/
-obj-$(CONFIG_USB)              += usb/
-obj-$(CONFIG_USB_GADGET)       += usb/gadget/
-obj-$(CONFIG_INPUT)            += input/
-obj-$(CONFIG_GAMEPORT)         += input/gameport/
-obj-$(CONFIG_SERIO)            += input/serio/
-obj-$(CONFIG_I2O)              += message/
-obj-$(CONFIG_I2C)              += i2c/
-obj-$(CONFIG_W1)               += w1/
-obj-$(CONFIG_PHONE)            += telephony/
-obj-$(CONFIG_MD)               += md/
-obj-$(CONFIG_BT)               += bluetooth/
-obj-$(CONFIG_ISDN)             += isdn/
-obj-$(CONFIG_MCA)              += mca/
-obj-$(CONFIG_EISA)             += eisa/
-obj-$(CONFIG_CPU_FREQ)         += cpufreq/
-obj-y                          += firmware/
diff --git a/linux-2.6.8.1-xen-sparse/drivers/char/mem.c b/linux-2.6.8.1-xen-sparse/drivers/char/mem.c
deleted file mode 100644 (file)
index 3970f88..0000000
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- *  linux/drivers/char/mem.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Added devfs support. 
- *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
- *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/raw.h>
-#include <linux/tty.h>
-#include <linux/capability.h>
-#include <linux/smp_lock.h>
-#include <linux/devfs_fs_kernel.h>
-#include <linux/ptrace.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-
-#ifdef CONFIG_IA64
-# include <linux/efi.h>
-#endif
-
-#ifdef CONFIG_FB
-extern void fbmem_init(void);
-#endif
-#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
-extern void tapechar_init(void);
-#endif
-
-/*
- * Architectures vary in how they handle caching for addresses
- * outside of main memory.
- *
- */
-static inline int uncached_access(struct file *file, unsigned long addr)
-{
-#if defined(__i386__)
-       /*
-        * On the PPro and successors, the MTRRs are used to set
-        * memory types for physical addresses outside main memory,
-        * so blindly setting PCD or PWT on those pages is wrong.
-        * For Pentiums and earlier, the surround logic should disable
-        * caching for the high addresses through the KEN pin, but
-        * we maintain the tradition of paranoia in this code.
-        */
-       if (file->f_flags & O_SYNC)
-               return 1;
-       return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
-                 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
-                 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
-                 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
-         && addr >= __pa(high_memory);
-#elif defined(__x86_64__)
-       /* 
-        * This is broken because it can generate memory type aliases,
-        * which can cause cache corruptions
-        * But it is only available for root and we have to be bug-to-bug
-        * compatible with i386.
-        */
-       if (file->f_flags & O_SYNC)
-               return 1;
-       /* same behaviour as i386. PAT always set to cached and MTRRs control the
-          caching behaviour. 
-          Hopefully a full PAT implementation will fix that soon. */      
-       return 0;
-#elif defined(CONFIG_IA64)
-       /*
-        * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
-        */
-       return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-#elif defined(CONFIG_PPC64)
-       /* On PPC64, we always do non-cacheable access to the IO hole and
-        * cacheable elsewhere. Cache paradox can checkstop the CPU and
-        * the high_memory heuristic below is wrong on machines with memory
-        * above the IO hole... Ah, and of course, XFree86 doesn't pass
-        * O_SYNC when mapping us to tap IO space. Surprised ?
-        */
-       return !page_is_ram(addr);
-#else
-       /*
-        * Accessing memory above the top the kernel knows about or through a file pointer
-        * that was marked O_SYNC will be done non-cached.
-        */
-       if (file->f_flags & O_SYNC)
-               return 1;
-       return addr >= __pa(high_memory);
-#endif
-}
-
-#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
-static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
-{
-       unsigned long end_mem;
-
-       end_mem = __pa(high_memory);
-       if (addr >= end_mem)
-               return 0;
-
-       if (*count > end_mem - addr)
-               *count = end_mem - addr;
-
-       return 1;
-}
-#endif
-
-static ssize_t do_write_mem(void *p, unsigned long realp,
-                           const char __user * buf, size_t count, loff_t *ppos)
-{
-       ssize_t written;
-       unsigned long copied;
-
-       written = 0;
-#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
-       /* we don't have page 0 mapped on sparc and m68k.. */
-       if (realp < PAGE_SIZE) {
-               unsigned long sz = PAGE_SIZE-realp;
-               if (sz > count) sz = count; 
-               /* Hmm. Do something? */
-               buf+=sz;
-               p+=sz;
-               count-=sz;
-               written+=sz;
-       }
-#endif
-       copied = copy_from_user(p, buf, count);
-       if (copied) {
-               ssize_t ret = written + (count - copied);
-
-               if (ret)
-                       return ret;
-               return -EFAULT;
-       }
-       written += count;
-       *ppos += written;
-       return written;
-}
-
-
-/*
- * This funcion reads the *physical* memory. The f_pos points directly to the 
- * memory location. 
- */
-static ssize_t read_mem(struct file * file, char __user * buf,
-                       size_t count, loff_t *ppos)
-{
-       unsigned long p = *ppos;
-       ssize_t read;
-
-       if (!valid_phys_addr_range(p, &count))
-               return -EFAULT;
-       read = 0;
-#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
-       /* we don't have page 0 mapped on sparc and m68k.. */
-       if (p < PAGE_SIZE) {
-               unsigned long sz = PAGE_SIZE-p;
-               if (sz > count) 
-                       sz = count; 
-               if (sz > 0) {
-                       if (clear_user(buf, sz))
-                               return -EFAULT;
-                       buf += sz; 
-                       p += sz; 
-                       count -= sz; 
-                       read += sz; 
-               }
-       }
-#endif
-       if (copy_to_user(buf, __va(p), count))
-               return -EFAULT;
-       read += count;
-       *ppos += read;
-       return read;
-}
-
-static ssize_t write_mem(struct file * file, const char __user * buf, 
-                        size_t count, loff_t *ppos)
-{
-       unsigned long p = *ppos;
-
-       if (!valid_phys_addr_range(p, &count))
-               return -EFAULT;
-       return do_write_mem(__va(p), p, buf, count, ppos);
-}
-
-static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-{
-#if !defined(CONFIG_XEN)
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-       int uncached;
-
-       uncached = uncached_access(file, offset);
-#ifdef pgprot_noncached
-       if (uncached)
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
-
-       /* Don't try to swap out physical pages.. */
-       vma->vm_flags |= VM_RESERVED;
-
-       /*
-        * Don't dump addresses that are not real memory to a core file.
-        */
-       if (uncached)
-               vma->vm_flags |= VM_IO;
-
-       if (remap_page_range(vma, vma->vm_start, offset, vma->vm_end-vma->vm_start,
-                            vma->vm_page_prot))
-               return -EAGAIN;
-       return 0;
-#elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
-       return -ENXIO;
-#else
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-
-       if (!(xen_start_info.flags & SIF_PRIVILEGED))
-               return -ENXIO;
-
-       /* Currently we're not smart about setting PTE cacheability. */
-       vma->vm_flags |= VM_RESERVED | VM_IO;
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset, 
-                               vma->vm_end-vma->vm_start, vma->vm_page_prot,
-                               DOMID_IO))
-               return -EAGAIN;
-       return 0;
-#endif
-}
-
-extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
-
-/*
- * This function reads the *virtual* memory as seen by the kernel.
- */
-static ssize_t read_kmem(struct file *file, char __user *buf, 
-                        size_t count, loff_t *ppos)
-{
-       unsigned long p = *ppos;
-       ssize_t read = 0;
-       ssize_t virtr = 0;
-       char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
-               
-       if (p < (unsigned long) high_memory) {
-               read = count;
-               if (count > (unsigned long) high_memory - p)
-                       read = (unsigned long) high_memory - p;
-
-#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
-               /* we don't have page 0 mapped on sparc and m68k.. */
-               if (p < PAGE_SIZE && read > 0) {
-                       size_t tmp = PAGE_SIZE - p;
-                       if (tmp > read) tmp = read;
-                       if (clear_user(buf, tmp))
-                               return -EFAULT;
-                       buf += tmp;
-                       p += tmp;
-                       read -= tmp;
-                       count -= tmp;
-               }
-#endif
-               if (copy_to_user(buf, (char *)p, read))
-                       return -EFAULT;
-               p += read;
-               buf += read;
-               count -= read;
-       }
-
-       if (count > 0) {
-               kbuf = (char *)__get_free_page(GFP_KERNEL);
-               if (!kbuf)
-                       return -ENOMEM;
-               while (count > 0) {
-                       int len = count;
-
-                       if (len > PAGE_SIZE)
-                               len = PAGE_SIZE;
-                       len = vread(kbuf, (char *)p, len);
-                       if (!len)
-                               break;
-                       if (copy_to_user(buf, kbuf, len)) {
-                               free_page((unsigned long)kbuf);
-                               return -EFAULT;
-                       }
-                       count -= len;
-                       buf += len;
-                       virtr += len;
-                       p += len;
-               }
-               free_page((unsigned long)kbuf);
-       }
-       *ppos = p;
-       return virtr + read;
-}
-
-/*
- * This function writes to the *virtual* memory as seen by the kernel.
- */
-static ssize_t write_kmem(struct file * file, const char __user * buf, 
-                         size_t count, loff_t *ppos)
-{
-       unsigned long p = *ppos;
-       ssize_t wrote = 0;
-       ssize_t virtr = 0;
-       ssize_t written;
-       char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
-
-       if (p < (unsigned long) high_memory) {
-
-               wrote = count;
-               if (count > (unsigned long) high_memory - p)
-                       wrote = (unsigned long) high_memory - p;
-
-               written = do_write_mem((void*)p, p, buf, wrote, ppos);
-               if (written != wrote)
-                       return written;
-               wrote = written;
-               p += wrote;
-               buf += wrote;
-               count -= wrote;
-       }
-
-       if (count > 0) {
-               kbuf = (char *)__get_free_page(GFP_KERNEL);
-               if (!kbuf)
-                       return wrote ? wrote : -ENOMEM;
-               while (count > 0) {
-                       int len = count;
-
-                       if (len > PAGE_SIZE)
-                               len = PAGE_SIZE;
-                       if (len) {
-                               written = copy_from_user(kbuf, buf, len);
-                               if (written) {
-                                       ssize_t ret;
-
-                                       free_page((unsigned long)kbuf);
-                                       ret = wrote + virtr + (len - written);
-                                       return ret ? ret : -EFAULT;
-                               }
-                       }
-                       len = vwrite(kbuf, (char *)p, len);
-                       count -= len;
-                       buf += len;
-                       virtr += len;
-                       p += len;
-               }
-               free_page((unsigned long)kbuf);
-       }
-
-       *ppos = p;
-       return virtr + wrote;
-}
-
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-static ssize_t read_port(struct file * file, char __user * buf,
-                        size_t count, loff_t *ppos)
-{
-       unsigned long i = *ppos;
-       char __user *tmp = buf;
-
-       if (verify_area(VERIFY_WRITE,buf,count))
-               return -EFAULT; 
-       while (count-- > 0 && i < 65536) {
-               if (__put_user(inb(i),tmp) < 0) 
-                       return -EFAULT;  
-               i++;
-               tmp++;
-       }
-       *ppos = i;
-       return tmp-buf;
-}
-
-static ssize_t write_port(struct file * file, const char __user * buf,
-                         size_t count, loff_t *ppos)
-{
-       unsigned long i = *ppos;
-       const char __user * tmp = buf;
-
-       if (verify_area(VERIFY_READ,buf,count))
-               return -EFAULT;
-       while (count-- > 0 && i < 65536) {
-               char c;
-               if (__get_user(c, tmp)) 
-                       return -EFAULT; 
-               outb(c,i);
-               i++;
-               tmp++;
-       }
-       *ppos = i;
-       return tmp-buf;
-}
-#endif
-
-static ssize_t read_null(struct file * file, char __user * buf,
-                        size_t count, loff_t *ppos)
-{
-       return 0;
-}
-
-static ssize_t write_null(struct file * file, const char __user * buf,
-                         size_t count, loff_t *ppos)
-{
-       return count;
-}
-
-#ifdef CONFIG_MMU
-/*
- * For fun, we are using the MMU for this.
- */
-static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
-{
-       struct mm_struct *mm;
-       struct vm_area_struct * vma;
-       unsigned long addr=(unsigned long)buf;
-
-       mm = current->mm;
-       /* Oops, this was forgotten before. -ben */
-       down_read(&mm->mmap_sem);
-
-       /* For private mappings, just map in zero pages. */
-       for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
-               unsigned long count;
-
-               if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
-                       goto out_up;
-               if (vma->vm_flags & VM_SHARED)
-                       break;
-               count = vma->vm_end - addr;
-               if (count > size)
-                       count = size;
-
-               zap_page_range(vma, addr, count, NULL);
-               zeromap_page_range(vma, addr, count, PAGE_COPY);
-
-               size -= count;
-               buf += count;
-               addr += count;
-               if (size == 0)
-                       goto out_up;
-       }
-
-       up_read(&mm->mmap_sem);
-       
-       /* The shared case is hard. Let's do the conventional zeroing. */ 
-       do {
-               unsigned long unwritten = clear_user(buf, PAGE_SIZE);
-               if (unwritten)
-                       return size + unwritten - PAGE_SIZE;
-               cond_resched();
-               buf += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       } while (size);
-
-       return size;
-out_up:
-       up_read(&mm->mmap_sem);
-       return size;
-}
-
-static ssize_t read_zero(struct file * file, char __user * buf, 
-                        size_t count, loff_t *ppos)
-{
-       unsigned long left, unwritten, written = 0;
-
-       if (!count)
-               return 0;
-
-       if (!access_ok(VERIFY_WRITE, buf, count))
-               return -EFAULT;
-
-       left = count;
-
-       /* do we want to be clever? Arbitrary cut-off */
-       if (count >= PAGE_SIZE*4) {
-               unsigned long partial;
-
-               /* How much left of the page? */
-               partial = (PAGE_SIZE-1) & -(unsigned long) buf;
-               unwritten = clear_user(buf, partial);
-               written = partial - unwritten;
-               if (unwritten)
-                       goto out;
-               left -= partial;
-               buf += partial;
-               unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
-               written += (left & PAGE_MASK) - unwritten;
-               if (unwritten)
-                       goto out;
-               buf += left & PAGE_MASK;
-               left &= ~PAGE_MASK;
-       }
-       unwritten = clear_user(buf, left);
-       written += left - unwritten;
-out:
-       return written ? written : -EFAULT;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
-       if (vma->vm_flags & VM_SHARED)
-               return shmem_zero_setup(vma);
-       if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
-               return -EAGAIN;
-       return 0;
-}
-#else /* CONFIG_MMU */
-static ssize_t read_zero(struct file * file, char * buf, 
-                        size_t count, loff_t *ppos)
-{
-       size_t todo = count;
-
-       while (todo) {
-               size_t chunk = todo;
-
-               if (chunk > 4096)
-                       chunk = 4096;   /* Just for latency reasons */
-               if (clear_user(buf, chunk))
-                       return -EFAULT;
-               buf += chunk;
-               todo -= chunk;
-               cond_resched();
-       }
-       return count;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
-       return -ENOSYS;
-}
-#endif /* CONFIG_MMU */
-
-static ssize_t write_full(struct file * file, const char __user * buf,
-                         size_t count, loff_t *ppos)
-{
-       return -ENOSPC;
-}
-
-/*
- * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
- * can fopen() both devices with "a" now.  This was previously impossible.
- * -- SRB.
- */
-
-static loff_t null_lseek(struct file * file, loff_t offset, int orig)
-{
-       return file->f_pos = 0;
-}
-
-/*
- * The memory devices use the full 32/64 bits of the offset, and so we cannot
- * check against negative addresses: they are ok. The return value is weird,
- * though, in that case (0).
- *
- * also note that seeking relative to the "end of file" isn't supported:
- * it has no meaning, so it returns -EINVAL.
- */
-static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-{
-       loff_t ret;
-
-       down(&file->f_dentry->d_inode->i_sem);
-       switch (orig) {
-               case 0:
-                       file->f_pos = offset;
-                       ret = file->f_pos;
-                       force_successful_syscall_return();
-                       break;
-               case 1:
-                       file->f_pos += offset;
-                       ret = file->f_pos;
-                       force_successful_syscall_return();
-                       break;
-               default:
-                       ret = -EINVAL;
-       }
-       up(&file->f_dentry->d_inode->i_sem);
-       return ret;
-}
-
-static int open_port(struct inode * inode, struct file * filp)
-{
-       return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-#define mmap_kmem      mmap_mem
-#define zero_lseek     null_lseek
-#define full_lseek      null_lseek
-#define write_zero     write_null
-#define read_full       read_zero
-#define open_mem       open_port
-#define open_kmem      open_mem
-
-static struct file_operations mem_fops = {
-       .llseek         = memory_lseek,
-       .read           = read_mem,
-       .write          = write_mem,
-       .mmap           = mmap_mem,
-       .open           = open_mem,
-};
-
-static struct file_operations kmem_fops = {
-       .llseek         = memory_lseek,
-       .read           = read_kmem,
-       .write          = write_kmem,
-       .mmap           = mmap_kmem,
-       .open           = open_kmem,
-};
-
-static struct file_operations null_fops = {
-       .llseek         = null_lseek,
-       .read           = read_null,
-       .write          = write_null,
-};
-
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-static struct file_operations port_fops = {
-       .llseek         = memory_lseek,
-       .read           = read_port,
-       .write          = write_port,
-       .open           = open_port,
-};
-#endif
-
-static struct file_operations zero_fops = {
-       .llseek         = zero_lseek,
-       .read           = read_zero,
-       .write          = write_zero,
-       .mmap           = mmap_zero,
-};
-
-static struct file_operations full_fops = {
-       .llseek         = full_lseek,
-       .read           = read_full,
-       .write          = write_full,
-};
-
-static ssize_t kmsg_write(struct file * file, const char __user * buf,
-                         size_t count, loff_t *ppos)
-{
-       char *tmp;
-       int ret;
-
-       tmp = kmalloc(count + 1, GFP_KERNEL);
-       if (tmp == NULL)
-               return -ENOMEM;
-       ret = -EFAULT;
-       if (!copy_from_user(tmp, buf, count)) {
-               tmp[count] = 0;
-               ret = printk("%s", tmp);
-       }
-       kfree(tmp);
-       return ret;
-}
-
-static struct file_operations kmsg_fops = {
-       .write =        kmsg_write,
-};
-
-static int memory_open(struct inode * inode, struct file * filp)
-{
-       switch (iminor(inode)) {
-               case 1:
-                       filp->f_op = &mem_fops;
-                       break;
-               case 2:
-                       filp->f_op = &kmem_fops;
-                       break;
-               case 3:
-                       filp->f_op = &null_fops;
-                       break;
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-               case 4:
-                       filp->f_op = &port_fops;
-                       break;
-#endif
-               case 5:
-                       filp->f_op = &zero_fops;
-                       break;
-               case 7:
-                       filp->f_op = &full_fops;
-                       break;
-               case 8:
-                       filp->f_op = &random_fops;
-                       break;
-               case 9:
-                       filp->f_op = &urandom_fops;
-                       break;
-               case 11:
-                       filp->f_op = &kmsg_fops;
-                       break;
-               default:
-                       return -ENXIO;
-       }
-       if (filp->f_op && filp->f_op->open)
-               return filp->f_op->open(inode,filp);
-       return 0;
-}
-
-static struct file_operations memory_fops = {
-       .open           = memory_open,  /* just a selector for the real open */
-};
-
-static const struct {
-       unsigned int            minor;
-       char                    *name;
-       umode_t                 mode;
-       struct file_operations  *fops;
-} devlist[] = { /* list of minor devices */
-       {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
-       {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
-       {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-       {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
-#endif
-       {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
-       {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
-       {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
-       {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
-       {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
-};
-
-static struct class_simple *mem_class;
-
-static int __init chr_dev_init(void)
-{
-       int i;
-
-       if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
-               printk("unable to get major %d for memory devs\n", MEM_MAJOR);
-
-       mem_class = class_simple_create(THIS_MODULE, "mem");
-       for (i = 0; i < ARRAY_SIZE(devlist); i++) {
-               class_simple_device_add(mem_class,
-                                       MKDEV(MEM_MAJOR, devlist[i].minor),
-                                       NULL, devlist[i].name);
-               devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
-                               S_IFCHR | devlist[i].mode, devlist[i].name);
-       }
-       
-#if defined (CONFIG_FB)
-       fbmem_init();
-#endif
-       return 0;
-}
-
-fs_initcall(chr_dev_init);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/char/tty_io.c b/linux-2.6.8.1-xen-sparse/drivers/char/tty_io.c
deleted file mode 100644 (file)
index bdb0910..0000000
+++ /dev/null
@@ -1,2472 +0,0 @@
-/*
- *  linux/drivers/char/tty_io.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-/*
- * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
- * or rs-channels. It also implements echoing, cooked mode etc.
- *
- * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
- *
- * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
- * tty_struct and tty_queue structures.  Previously there was an array
- * of 256 tty_struct's which was statically allocated, and the
- * tty_queue structures were allocated at boot time.  Both are now
- * dynamically allocated only when the tty is open.
- *
- * Also restructured routines so that there is more of a separation
- * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
- * the low-level tty routines (serial.c, pty.c, console.c).  This
- * makes for cleaner and more compact code.  -TYT, 9/17/92 
- *
- * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
- * which can be dynamically activated and de-activated by the line
- * discipline handling modules (like SLIP).
- *
- * NOTE: pay no attention to the line discipline code (yet); its
- * interface is still subject to change in this version...
- * -- TYT, 1/31/92
- *
- * Added functionality to the OPOST tty handling.  No delays, but all
- * other bits should be there.
- *     -- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
- *
- * Rewrote canonical mode and added more termios flags.
- *     -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
- *
- * Reorganized FASYNC support so mouse code can share it.
- *     -- ctm@ardi.com, 9Sep95
- *
- * New TIOCLINUX variants added.
- *     -- mj@k332.feld.cvut.cz, 19-Nov-95
- * 
- * Restrict vt switching via ioctl()
- *      -- grif@cs.ucr.edu, 5-Dec-95
- *
- * Move console and virtual terminal code to more appropriate files,
- * implement CONFIG_VT and generalize console device interface.
- *     -- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
- *
- * Rewrote init_dev and release_dev to eliminate races.
- *     -- Bill Hawes <whawes@star.net>, June 97
- *
- * Added devfs support.
- *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
- *
- * Added support for a Unix98-style ptmx device.
- *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
- *
- * Reduced memory usage for older ARM systems
- *      -- Russell King <rmk@arm.linux.org.uk>
- *
- * Move do_SAK() into process context.  Less stack use in devfs functions.
- * alloc_tty_struct() always uses kmalloc() -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
-#include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/smp_lock.h>
-#include <linux/device.h>
-#include <linux/idr.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-#include <linux/devfs_fs_kernel.h>
-
-#include <linux/kmod.h>
-
-#undef TTY_DEBUG_HANGUP
-
-#define TTY_PARANOIA_CHECK 1
-#define CHECK_TTY_COUNT 1
-
-struct termios tty_std_termios = {     /* for the benefit of tty drivers  */
-       .c_iflag = ICRNL | IXON,
-       .c_oflag = OPOST | ONLCR,
-       .c_cflag = B38400 | CS8 | CREAD | HUPCL,
-       .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
-                  ECHOCTL | ECHOKE | IEXTEN,
-       .c_cc = INIT_C_CC
-};
-
-EXPORT_SYMBOL(tty_std_termios);
-
-LIST_HEAD(tty_drivers);                        /* linked list of tty drivers */
-struct tty_ldisc ldiscs[NR_LDISCS];    /* line disc dispatch table     */
-
-/* Semaphore to protect creating and releasing a tty */
-DECLARE_MUTEX(tty_sem);
-
-#ifdef CONFIG_VT
-int console_use_vt = 1;
-#endif
-#ifdef CONFIG_UNIX98_PTYS
-extern struct tty_driver *ptm_driver;  /* Unix98 pty masters; for /dev/ptmx */
-extern int pty_limit;          /* Config limit on Unix98 ptys */
-static DEFINE_IDR(allocated_ptys);
-static DECLARE_MUTEX(allocated_ptys_lock);
-#endif
-
-extern void disable_early_printk(void);
-
-static void initialize_tty_struct(struct tty_struct *tty);
-
-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
-static int tty_open(struct inode *, struct file *);
-static int tty_release(struct inode *, struct file *);
-int tty_ioctl(struct inode * inode, struct file * file,
-             unsigned int cmd, unsigned long arg);
-static int tty_fasync(int fd, struct file * filp, int on);
-extern void rs_360_init(void);
-static void release_mem(struct tty_struct *tty, int idx);
-
-
-static struct tty_struct *alloc_tty_struct(void)
-{
-       struct tty_struct *tty;
-
-       tty = kmalloc(sizeof(struct tty_struct), GFP_KERNEL);
-       if (tty)
-               memset(tty, 0, sizeof(struct tty_struct));
-       return tty;
-}
-
-static inline void free_tty_struct(struct tty_struct *tty)
-{
-       kfree(tty);
-}
-
-#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
-
-char *tty_name(struct tty_struct *tty, char *buf)
-{
-       if (!tty) /* Hmm.  NULL pointer.  That's fun. */
-               strcpy(buf, "NULL tty");
-       else
-               strcpy(buf, tty->name);
-       return buf;
-}
-
-EXPORT_SYMBOL(tty_name);
-
-inline int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
-                             const char *routine)
-{
-#ifdef TTY_PARANOIA_CHECK
-       if (!tty) {
-               printk(KERN_WARNING
-                       "null TTY for (%d:%d) in %s\n",
-                       imajor(inode), iminor(inode), routine);
-               return 1;
-       }
-       if (tty->magic != TTY_MAGIC) {
-               printk(KERN_WARNING
-                       "bad magic number for tty struct (%d:%d) in %s\n",
-                       imajor(inode), iminor(inode), routine);
-               return 1;
-       }
-#endif
-       return 0;
-}
-
-static int check_tty_count(struct tty_struct *tty, const char *routine)
-{
-#ifdef CHECK_TTY_COUNT
-       struct list_head *p;
-       int count = 0;
-       
-       file_list_lock();
-       list_for_each(p, &tty->tty_files) {
-               count++;
-       }
-       file_list_unlock();
-       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-           tty->driver->subtype == PTY_TYPE_SLAVE &&
-           tty->link && tty->link->count)
-               count++;
-       if (tty->count != count) {
-               printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
-                                   "!= #fd's(%d) in %s\n",
-                      tty->name, tty->count, count, routine);
-               return count;
-       }       
-#endif
-       return 0;
-}
-
-int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
-{
-       if (disc < N_TTY || disc >= NR_LDISCS)
-               return -EINVAL;
-       
-       if (new_ldisc) {
-               ldiscs[disc] = *new_ldisc;
-               ldiscs[disc].flags |= LDISC_FLAG_DEFINED;
-               ldiscs[disc].num = disc;
-       } else
-               memset(&ldiscs[disc], 0, sizeof(struct tty_ldisc));
-       
-       return 0;
-}
-
-EXPORT_SYMBOL(tty_register_ldisc);
-
-/* Set the discipline of a tty line. */
-static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
-{
-       int     retval = 0;
-       struct  tty_ldisc o_ldisc;
-       char buf[64];
-
-       if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
-               return -EINVAL;
-       /* Eduardo Blanco <ejbs@cs.cs.com.uy> */
-       /* Cyrus Durgin <cider@speakeasy.org> */
-       if (!(ldiscs[ldisc].flags & LDISC_FLAG_DEFINED)) {
-               request_module("tty-ldisc-%d", ldisc);
-       }
-       if (!(ldiscs[ldisc].flags & LDISC_FLAG_DEFINED))
-               return -EINVAL;
-
-       if (tty->ldisc.num == ldisc)
-               return 0;       /* We are already in the desired discipline */
-
-       if (!try_module_get(ldiscs[ldisc].owner))
-               return -EINVAL;
-       
-       o_ldisc = tty->ldisc;
-
-       tty_wait_until_sent(tty, 0);
-       
-       /* Shutdown the current discipline. */
-       if (tty->ldisc.close)
-               (tty->ldisc.close)(tty);
-
-       /* Now set up the new line discipline. */
-       tty->ldisc = ldiscs[ldisc];
-       tty->termios->c_line = ldisc;
-       if (tty->ldisc.open)
-               retval = (tty->ldisc.open)(tty);
-       if (retval < 0) {
-               tty->ldisc = o_ldisc;
-               tty->termios->c_line = tty->ldisc.num;
-               if (tty->ldisc.open && (tty->ldisc.open(tty) < 0)) {
-                       tty->ldisc = ldiscs[N_TTY];
-                       tty->termios->c_line = N_TTY;
-                       if (tty->ldisc.open) {
-                               int r = tty->ldisc.open(tty);
-
-                               if (r < 0)
-                                       panic("Couldn't open N_TTY ldisc for "
-                                             "%s --- error %d.",
-                                             tty_name(tty, buf), r);
-                       }
-               }
-       } else {
-               module_put(o_ldisc.owner);
-       }
-       
-       if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc)
-               tty->driver->set_ldisc(tty);
-       return retval;
-}
-
-/*
- * This routine returns a tty driver structure, given a device number
- */
-struct tty_driver *get_tty_driver(dev_t device, int *index)
-{
-       struct tty_driver *p;
-
-       list_for_each_entry(p, &tty_drivers, tty_drivers) {
-               dev_t base = MKDEV(p->major, p->minor_start);
-               if (device < base || device >= base + p->num)
-                       continue;
-               *index = device - base;
-               return p;
-       }
-       return NULL;
-}
-
-/*
- * If we try to write to, or set the state of, a terminal and we're
- * not in the foreground, send a SIGTTOU.  If the signal is blocked or
- * ignored, go ahead and perform the operation.  (POSIX 7.2)
- */
-int tty_check_change(struct tty_struct * tty)
-{
-       if (current->signal->tty != tty)
-               return 0;
-       if (tty->pgrp <= 0) {
-               printk(KERN_WARNING "tty_check_change: tty->pgrp <= 0!\n");
-               return 0;
-       }
-       if (process_group(current) == tty->pgrp)
-               return 0;
-       if (is_ignored(SIGTTOU))
-               return 0;
-       if (is_orphaned_pgrp(process_group(current)))
-               return -EIO;
-       (void) kill_pg(process_group(current), SIGTTOU, 1);
-       return -ERESTARTSYS;
-}
-
-EXPORT_SYMBOL(tty_check_change);
-
-static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
-                               size_t count, loff_t *ppos)
-{
-       return 0;
-}
-
-static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
-                                size_t count, loff_t *ppos)
-{
-       return -EIO;
-}
-
-/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait)
-{
-       return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
-}
-
-static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
-                            unsigned int cmd, unsigned long arg)
-{
-       return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
-}
-
-static struct file_operations tty_fops = {
-       .llseek         = no_llseek,
-       .read           = tty_read,
-       .write          = tty_write,
-       .poll           = tty_poll,
-       .ioctl          = tty_ioctl,
-       .open           = tty_open,
-       .release        = tty_release,
-       .fasync         = tty_fasync,
-};
-
-static struct file_operations console_fops = {
-       .llseek         = no_llseek,
-       .read           = tty_read,
-       .write          = redirected_tty_write,
-       .poll           = tty_poll,
-       .ioctl          = tty_ioctl,
-       .open           = tty_open,
-       .release        = tty_release,
-       .fasync         = tty_fasync,
-};
-
-static struct file_operations hung_up_tty_fops = {
-       .llseek         = no_llseek,
-       .read           = hung_up_tty_read,
-       .write          = hung_up_tty_write,
-       .poll           = hung_up_tty_poll,
-       .ioctl          = hung_up_tty_ioctl,
-       .release        = tty_release,
-};
-
-static spinlock_t redirect_lock = SPIN_LOCK_UNLOCKED;
-static struct file *redirect;
-/*
- * This can be called by the "eventd" kernel thread.  That is process synchronous,
- * but doesn't hold any locks, so we need to make sure we have the appropriate
- * locks for what we're doing..
- */
-void do_tty_hangup(void *data)
-{
-       struct tty_struct *tty = (struct tty_struct *) data;
-       struct file * cons_filp = NULL;
-       struct file *filp, *f = NULL;
-       struct task_struct *p;
-       struct pid *pid;
-       int    closecount = 0, n;
-
-       if (!tty)
-               return;
-
-       /* inuse_filps is protected by the single kernel lock */
-       lock_kernel();
-
-       spin_lock(&redirect_lock);
-       if (redirect && redirect->private_data == tty) {
-               f = redirect;
-               redirect = NULL;
-       }
-       spin_unlock(&redirect_lock);
-       
-       check_tty_count(tty, "do_tty_hangup");
-       file_list_lock();
-       list_for_each_entry(filp, &tty->tty_files, f_list) {
-               if (filp->f_op->write == redirected_tty_write)
-                       cons_filp = filp;
-               if (filp->f_op->write != tty_write)
-                       continue;
-               closecount++;
-               tty_fasync(-1, filp, 0);        /* can't block */
-               filp->f_op = &hung_up_tty_fops;
-       }
-       file_list_unlock();
-       
-       /* FIXME! What are the locking issues here? This may me overdoing things..
-       * this question is especially important now that we've removed the irqlock. */
-       {
-               unsigned long flags;
-
-               local_irq_save(flags); // FIXME: is this safe?
-               if (tty->ldisc.flush_buffer)
-                       tty->ldisc.flush_buffer(tty);
-               if (tty->driver->flush_buffer)
-                       tty->driver->flush_buffer(tty);
-               if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
-                   tty->ldisc.write_wakeup)
-                       (tty->ldisc.write_wakeup)(tty);
-               local_irq_restore(flags); // FIXME: is this safe?
-       }
-
-       wake_up_interruptible(&tty->write_wait);
-       wake_up_interruptible(&tty->read_wait);
-
-       /*
-        * Shutdown the current line discipline, and reset it to
-        * N_TTY.
-        */
-       if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
-               *tty->termios = tty->driver->init_termios;
-       if (tty->ldisc.num != ldiscs[N_TTY].num) {
-               if (tty->ldisc.close)
-                       (tty->ldisc.close)(tty);
-               module_put(tty->ldisc.owner);
-               
-               tty->ldisc = ldiscs[N_TTY];
-               tty->termios->c_line = N_TTY;
-               if (tty->ldisc.open) {
-                       int i = (tty->ldisc.open)(tty);
-                       if (i < 0)
-                               printk(KERN_ERR "do_tty_hangup: N_TTY open: "
-                                               "error %d\n", -i);
-               }
-       }
-       
-       read_lock(&tasklist_lock);
-       if (tty->session > 0) {
-               struct list_head *l;
-               for_each_task_pid(tty->session, PIDTYPE_SID, p, l, pid) {
-                       if (p->signal->tty == tty)
-                               p->signal->tty = NULL;
-                       if (!p->signal->leader)
-                               continue;
-                       send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p);
-                       send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p);
-                       if (tty->pgrp > 0)
-                               p->signal->tty_old_pgrp = tty->pgrp;
-               }
-       }
-       read_unlock(&tasklist_lock);
-
-       tty->flags = 0;
-       tty->session = 0;
-       tty->pgrp = -1;
-       tty->ctrl_status = 0;
-       /*
-        *      If one of the devices matches a console pointer, we
-        *      cannot just call hangup() because that will cause
-        *      tty->count and state->count to go out of sync.
-        *      So we just call close() the right number of times.
-        */
-       if (cons_filp) {
-               if (tty->driver->close)
-                       for (n = 0; n < closecount; n++)
-                               tty->driver->close(tty, cons_filp);
-       } else if (tty->driver->hangup)
-               (tty->driver->hangup)(tty);
-       unlock_kernel();
-       if (f)
-               fput(f);
-}
-
-void tty_hangup(struct tty_struct * tty)
-{
-#ifdef TTY_DEBUG_HANGUP
-       char    buf[64];
-       
-       printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
-#endif
-       schedule_work(&tty->hangup_work);
-}
-
-EXPORT_SYMBOL(tty_hangup);
-
-void tty_vhangup(struct tty_struct * tty)
-{
-#ifdef TTY_DEBUG_HANGUP
-       char    buf[64];
-
-       printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
-#endif
-       do_tty_hangup((void *) tty);
-}
-EXPORT_SYMBOL(tty_vhangup);
-
-int tty_hung_up_p(struct file * filp)
-{
-       return (filp->f_op == &hung_up_tty_fops);
-}
-
-EXPORT_SYMBOL(tty_hung_up_p);
-
-/*
- * This function is typically called only by the session leader, when
- * it wants to disassociate itself from its controlling tty.
- *
- * It performs the following functions:
- *     (1)  Sends a SIGHUP and SIGCONT to the foreground process group
- *     (2)  Clears the tty from being controlling the session
- *     (3)  Clears the controlling tty for all processes in the
- *             session group.
- *
- * The argument on_exit is set to 1 if called when a process is
- * exiting; it is 0 if called by the ioctl TIOCNOTTY.
- */
-void disassociate_ctty(int on_exit)
-{
-       struct tty_struct *tty;
-       struct task_struct *p;
-       struct list_head *l;
-       struct pid *pid;
-       int tty_pgrp = -1;
-
-       lock_kernel();
-
-       tty = current->signal->tty;
-       if (tty) {
-               tty_pgrp = tty->pgrp;
-               if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
-                       tty_vhangup(tty);
-       } else {
-               if (current->signal->tty_old_pgrp) {
-                       kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit);
-                       kill_pg(current->signal->tty_old_pgrp, SIGCONT, on_exit);
-               }
-               unlock_kernel();        
-               return;
-       }
-       if (tty_pgrp > 0) {
-               kill_pg(tty_pgrp, SIGHUP, on_exit);
-               if (!on_exit)
-                       kill_pg(tty_pgrp, SIGCONT, on_exit);
-       }
-
-       current->signal->tty_old_pgrp = 0;
-       tty->session = 0;
-       tty->pgrp = -1;
-
-       read_lock(&tasklist_lock);
-       for_each_task_pid(current->signal->session, PIDTYPE_SID, p, l, pid)
-               p->signal->tty = NULL;
-       read_unlock(&tasklist_lock);
-       unlock_kernel();
-}
-
-void stop_tty(struct tty_struct *tty)
-{
-       if (tty->stopped)
-               return;
-       tty->stopped = 1;
-       if (tty->link && tty->link->packet) {
-               tty->ctrl_status &= ~TIOCPKT_START;
-               tty->ctrl_status |= TIOCPKT_STOP;
-               wake_up_interruptible(&tty->link->read_wait);
-       }
-       if (tty->driver->stop)
-               (tty->driver->stop)(tty);
-}
-
-EXPORT_SYMBOL(stop_tty);
-
-void start_tty(struct tty_struct *tty)
-{
-       if (!tty->stopped || tty->flow_stopped)
-               return;
-       tty->stopped = 0;
-       if (tty->link && tty->link->packet) {
-               tty->ctrl_status &= ~TIOCPKT_STOP;
-               tty->ctrl_status |= TIOCPKT_START;
-               wake_up_interruptible(&tty->link->read_wait);
-       }
-       if (tty->driver->start)
-               (tty->driver->start)(tty);
-       if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
-           tty->ldisc.write_wakeup)
-               (tty->ldisc.write_wakeup)(tty);
-       wake_up_interruptible(&tty->write_wait);
-}
-
-EXPORT_SYMBOL(start_tty);
-
-static ssize_t tty_read(struct file * file, char __user * buf, size_t count, 
-                       loff_t *ppos)
-{
-       int i;
-       struct tty_struct * tty;
-       struct inode *inode;
-
-       tty = (struct tty_struct *)file->private_data;
-       inode = file->f_dentry->d_inode;
-       if (tty_paranoia_check(tty, inode, "tty_read"))
-               return -EIO;
-       if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
-               return -EIO;
-
-       lock_kernel();
-       if (tty->ldisc.read)
-               i = (tty->ldisc.read)(tty,file,buf,count);
-       else
-               i = -EIO;
-       unlock_kernel();
-       if (i > 0)
-               inode->i_atime = CURRENT_TIME;
-       return i;
-}
-
-/*
- * Split writes up in sane blocksizes to avoid
- * denial-of-service type attacks
- */
-static inline ssize_t do_tty_write(
-       ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char __user *, size_t),
-       struct tty_struct *tty,
-       struct file *file,
-       const unsigned char __user *buf,
-       size_t count)
-{
-       ssize_t ret = 0, written = 0;
-       
-       if (down_interruptible(&tty->atomic_write)) {
-               return -ERESTARTSYS;
-       }
-       if ( test_bit(TTY_NO_WRITE_SPLIT, &tty->flags) ) {
-               lock_kernel();
-               written = write(tty, file, buf, count);
-               unlock_kernel();
-       } else {
-               for (;;) {
-                       unsigned long size = max((unsigned long)PAGE_SIZE*2, 16384UL);
-                       if (size > count)
-                               size = count;
-                       lock_kernel();
-                       ret = write(tty, file, buf, size);
-                       unlock_kernel();
-                       if (ret <= 0)
-                               break;
-                       written += ret;
-                       buf += ret;
-                       count -= ret;
-                       if (!count)
-                               break;
-                       ret = -ERESTARTSYS;
-                       if (signal_pending(current))
-                               break;
-                       cond_resched();
-               }
-       }
-       if (written) {
-               file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
-               ret = written;
-       }
-       up(&tty->atomic_write);
-       return ret;
-}
-
-
-static ssize_t tty_write(struct file * file, const char __user * buf, size_t count,
-                        loff_t *ppos)
-{
-       struct tty_struct * tty;
-       struct inode *inode = file->f_dentry->d_inode;
-
-       tty = (struct tty_struct *)file->private_data;
-       if (tty_paranoia_check(tty, inode, "tty_write"))
-               return -EIO;
-       if (!tty || !tty->driver->write || (test_bit(TTY_IO_ERROR, &tty->flags)))
-               return -EIO;
-       if (!tty->ldisc.write)
-               return -EIO;
-       return do_tty_write(tty->ldisc.write, tty, file,
-                           (const unsigned char __user *)buf, count);
-}
-
-ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t count,
-                        loff_t *ppos)
-{
-       struct file *p = NULL;
-
-       spin_lock(&redirect_lock);
-       if (redirect) {
-               get_file(redirect);
-               p = redirect;
-       }
-       spin_unlock(&redirect_lock);
-
-       if (p) {
-               ssize_t res;
-               res = vfs_write(p, buf, count, &p->f_pos);
-               fput(p);
-               return res;
-       }
-
-       return tty_write(file, buf, count, ppos);
-}
-
-static inline void tty_line_name(struct tty_driver *driver, int index, char *p)
-{
-       sprintf(p, "%s%d", driver->name, index + driver->name_base);
-}
-
-/*
- * WSH 06/09/97: Rewritten to remove races and properly clean up after a
- * failed open.  The new code protects the open with a semaphore, so it's
- * really quite straightforward.  The semaphore locking can probably be
- * relaxed for the (most common) case of reopening a tty.
- */
-static int init_dev(struct tty_driver *driver, int idx,
-       struct tty_struct **ret_tty)
-{
-       struct tty_struct *tty, *o_tty;
-       struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
-       struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
-       int retval=0;
-
-       /* 
-        * Check whether we need to acquire the tty semaphore to avoid
-        * race conditions.  For now, play it safe.
-        */
-       down(&tty_sem);
-
-       /* check whether we're reopening an existing tty */
-       if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
-               tty = devpts_get_tty(idx);
-               if (tty && driver->subtype == PTY_TYPE_MASTER)
-                       tty = tty->link;
-       } else {
-               tty = driver->ttys[idx];
-       }
-       if (tty) goto fast_track;
-
-       /*
-        * First time open is complex, especially for PTY devices.
-        * This code guarantees that either everything succeeds and the
-        * TTY is ready for operation, or else the table slots are vacated
-        * and the allocated memory released.  (Except that the termios 
-        * and locked termios may be retained.)
-        */
-
-       if (!try_module_get(driver->owner)) {
-               retval = -ENODEV;
-               goto end_init;
-       }
-
-       o_tty = NULL;
-       tp = o_tp = NULL;
-       ltp = o_ltp = NULL;
-
-       tty = alloc_tty_struct();
-       if(!tty)
-               goto fail_no_mem;
-       initialize_tty_struct(tty);
-       tty->driver = driver;
-       tty->index = idx;
-       tty_line_name(driver, idx, tty->name);
-
-       if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
-               tp_loc = &tty->termios;
-               ltp_loc = &tty->termios_locked;
-       } else {
-               tp_loc = &driver->termios[idx];
-               ltp_loc = &driver->termios_locked[idx];
-       }
-
-       if (!*tp_loc) {
-               tp = (struct termios *) kmalloc(sizeof(struct termios),
-                                               GFP_KERNEL);
-               if (!tp)
-                       goto free_mem_out;
-               *tp = driver->init_termios;
-       }
-
-       if (!*ltp_loc) {
-               ltp = (struct termios *) kmalloc(sizeof(struct termios),
-                                                GFP_KERNEL);
-               if (!ltp)
-                       goto free_mem_out;
-               memset(ltp, 0, sizeof(struct termios));
-       }
-
-       if (driver->type == TTY_DRIVER_TYPE_PTY) {
-               o_tty = alloc_tty_struct();
-               if (!o_tty)
-                       goto free_mem_out;
-               initialize_tty_struct(o_tty);
-               o_tty->driver = driver->other;
-               o_tty->index = idx;
-               tty_line_name(driver->other, idx, o_tty->name);
-
-               if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
-                       o_tp_loc = &o_tty->termios;
-                       o_ltp_loc = &o_tty->termios_locked;
-               } else {
-                       o_tp_loc = &driver->other->termios[idx];
-                       o_ltp_loc = &driver->other->termios_locked[idx];
-               }
-
-               if (!*o_tp_loc) {
-                       o_tp = (struct termios *)
-                               kmalloc(sizeof(struct termios), GFP_KERNEL);
-                       if (!o_tp)
-                               goto free_mem_out;
-                       *o_tp = driver->other->init_termios;
-               }
-
-               if (!*o_ltp_loc) {
-                       o_ltp = (struct termios *)
-                               kmalloc(sizeof(struct termios), GFP_KERNEL);
-                       if (!o_ltp)
-                               goto free_mem_out;
-                       memset(o_ltp, 0, sizeof(struct termios));
-               }
-
-               /*
-                * Everything allocated ... set up the o_tty structure.
-                */
-               if (!(driver->other->flags & TTY_DRIVER_DEVPTS_MEM)) {
-                       driver->other->ttys[idx] = o_tty;
-               }
-               if (!*o_tp_loc)
-                       *o_tp_loc = o_tp;
-               if (!*o_ltp_loc)
-                       *o_ltp_loc = o_ltp;
-               o_tty->termios = *o_tp_loc;
-               o_tty->termios_locked = *o_ltp_loc;
-               driver->other->refcount++;
-               if (driver->subtype == PTY_TYPE_MASTER)
-                       o_tty->count++;
-
-               /* Establish the links in both directions */
-               tty->link   = o_tty;
-               o_tty->link = tty;
-       }
-
-       /* 
-        * All structures have been allocated, so now we install them.
-        * Failures after this point use release_mem to clean up, so 
-        * there's no need to null out the local pointers.
-        */
-       if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
-               driver->ttys[idx] = tty;
-       }
-       
-       if (!*tp_loc)
-               *tp_loc = tp;
-       if (!*ltp_loc)
-               *ltp_loc = ltp;
-       tty->termios = *tp_loc;
-       tty->termios_locked = *ltp_loc;
-       driver->refcount++;
-       tty->count++;
-
-       /* 
-        * Structures all installed ... call the ldisc open routines.
-        * If we fail here just call release_mem to clean up.  No need
-        * to decrement the use counts, as release_mem doesn't care.
-        */
-       if (tty->ldisc.open) {
-               retval = (tty->ldisc.open)(tty);
-               if (retval)
-                       goto release_mem_out;
-       }
-       if (o_tty && o_tty->ldisc.open) {
-               retval = (o_tty->ldisc.open)(o_tty);
-               if (retval) {
-                       if (tty->ldisc.close)
-                               (tty->ldisc.close)(tty);
-                       goto release_mem_out;
-               }
-       }
-       goto success;
-
-       /*
-        * This fast open can be used if the tty is already open.
-        * No memory is allocated, and the only failures are from
-        * attempting to open a closing tty or attempting multiple
-        * opens on a pty master.
-        */
-fast_track:
-       if (test_bit(TTY_CLOSING, &tty->flags)) {
-               retval = -EIO;
-               goto end_init;
-       }
-       if (driver->type == TTY_DRIVER_TYPE_PTY &&
-           driver->subtype == PTY_TYPE_MASTER) {
-               /*
-                * special case for PTY masters: only one open permitted, 
-                * and the slave side open count is incremented as well.
-                */
-               if (tty->count) {
-                       retval = -EIO;
-                       goto end_init;
-               }
-               tty->link->count++;
-       }
-       tty->count++;
-       tty->driver = driver; /* N.B. why do this every time?? */
-
-success:
-       *ret_tty = tty;
-       
-       /* All paths come through here to release the semaphore */
-end_init:
-       up(&tty_sem);
-       return retval;
-
-       /* Release locally allocated memory ... nothing placed in slots */
-free_mem_out:
-       if (o_tp)
-               kfree(o_tp);
-       if (o_tty)
-               free_tty_struct(o_tty);
-       if (ltp)
-               kfree(ltp);
-       if (tp)
-               kfree(tp);
-       free_tty_struct(tty);
-
-fail_no_mem:
-       module_put(driver->owner);
-       retval = -ENOMEM;
-       goto end_init;
-
-       /* call the tty release_mem routine to clean out this slot */
-release_mem_out:
-       printk(KERN_INFO "init_dev: ldisc open failed, "
-                        "clearing slot %d\n", idx);
-       release_mem(tty, idx);
-       goto end_init;
-}
-
-/*
- * Releases memory associated with a tty structure, and clears out the
- * driver table slots.
- */
-static void release_mem(struct tty_struct *tty, int idx)
-{
-       struct tty_struct *o_tty;
-       struct termios *tp;
-       int devpts = tty->driver->flags & TTY_DRIVER_DEVPTS_MEM;
-
-       if ((o_tty = tty->link) != NULL) {
-               if (!devpts)
-                       o_tty->driver->ttys[idx] = NULL;
-               if (o_tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
-                       tp = o_tty->termios;
-                       if (!devpts)
-                               o_tty->driver->termios[idx] = NULL;
-                       kfree(tp);
-
-                       tp = o_tty->termios_locked;
-                       if (!devpts)
-                               o_tty->driver->termios_locked[idx] = NULL;
-                       kfree(tp);
-               }
-               o_tty->magic = 0;
-               o_tty->driver->refcount--;
-               file_list_lock();
-               list_del_init(&o_tty->tty_files);
-               file_list_unlock();
-               free_tty_struct(o_tty);
-       }
-
-       if (!devpts)
-               tty->driver->ttys[idx] = NULL;
-       if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
-               tp = tty->termios;
-               if (!devpts)
-                       tty->driver->termios[idx] = NULL;
-               kfree(tp);
-
-               tp = tty->termios_locked;
-               if (!devpts)
-                       tty->driver->termios_locked[idx] = NULL;
-               kfree(tp);
-       }
-
-       tty->magic = 0;
-       tty->driver->refcount--;
-       file_list_lock();
-       list_del_init(&tty->tty_files);
-       file_list_unlock();
-       module_put(tty->driver->owner);
-       free_tty_struct(tty);
-}
-
-/*
- * Even releasing the tty structures is a tricky business.. We have
- * to be very careful that the structures are all released at the
- * same time, as interrupts might otherwise get the wrong pointers.
- *
- * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
- * lead to double frees or releasing memory still in use.
- */
-static void release_dev(struct file * filp)
-{
-       struct tty_struct *tty, *o_tty;
-       int     pty_master, tty_closing, o_tty_closing, do_sleep;
-       int     devpts_master, devpts;
-       int     idx;
-       char    buf[64];
-       
-       tty = (struct tty_struct *)filp->private_data;
-       if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "release_dev"))
-               return;
-
-       check_tty_count(tty, "release_dev");
-
-       tty_fasync(-1, filp, 0);
-
-       idx = tty->index;
-       pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-                     tty->driver->subtype == PTY_TYPE_MASTER);
-       devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
-       devpts_master = pty_master && devpts;
-       o_tty = tty->link;
-
-#ifdef TTY_PARANOIA_CHECK
-       if (idx < 0 || idx >= tty->driver->num) {
-               printk(KERN_DEBUG "release_dev: bad idx when trying to "
-                                 "free (%s)\n", tty->name);
-               return;
-       }
-       if (!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
-               if (tty != tty->driver->ttys[idx]) {
-                       printk(KERN_DEBUG "release_dev: driver.table[%d] not tty "
-                              "for (%s)\n", idx, tty->name);
-                       return;
-               }
-               if (tty->termios != tty->driver->termios[idx]) {
-                       printk(KERN_DEBUG "release_dev: driver.termios[%d] not termios "
-                              "for (%s)\n",
-                              idx, tty->name);
-                       return;
-               }
-               if (tty->termios_locked != tty->driver->termios_locked[idx]) {
-                       printk(KERN_DEBUG "release_dev: driver.termios_locked[%d] not "
-                              "termios_locked for (%s)\n",
-                              idx, tty->name);
-                       return;
-               }
-       }
-#endif
-
-#ifdef TTY_DEBUG_HANGUP
-       printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
-              tty_name(tty, buf), tty->count);
-#endif
-
-#ifdef TTY_PARANOIA_CHECK
-       if (tty->driver->other &&
-            !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
-               if (o_tty != tty->driver->other->ttys[idx]) {
-                       printk(KERN_DEBUG "release_dev: other->table[%d] "
-                                         "not o_tty for (%s)\n",
-                              idx, tty->name);
-                       return;
-               }
-               if (o_tty->termios != tty->driver->other->termios[idx]) {
-                       printk(KERN_DEBUG "release_dev: other->termios[%d] "
-                                         "not o_termios for (%s)\n",
-                              idx, tty->name);
-                       return;
-               }
-               if (o_tty->termios_locked != 
-                     tty->driver->other->termios_locked[idx]) {
-                       printk(KERN_DEBUG "release_dev: other->termios_locked["
-                                         "%d] not o_termios_locked for (%s)\n",
-                              idx, tty->name);
-                       return;
-               }
-               if (o_tty->link != tty) {
-                       printk(KERN_DEBUG "release_dev: bad pty pointers\n");
-                       return;
-               }
-       }
-#endif
-
-       if (tty->driver->close)
-               tty->driver->close(tty, filp);
-
-       /*
-        * Sanity check: if tty->count is going to zero, there shouldn't be
-        * any waiters on tty->read_wait or tty->write_wait.  We test the
-        * wait queues and kick everyone out _before_ actually starting to
-        * close.  This ensures that we won't block while releasing the tty
-        * structure.
-        *
-        * The test for the o_tty closing is necessary, since the master and
-        * slave sides may close in any order.  If the slave side closes out
-        * first, its count will be one, since the master side holds an open.
-        * Thus this test wouldn't be triggered at the time the slave closes,
-        * so we do it now.
-        *
-        * Note that it's possible for the tty to be opened again while we're
-        * flushing out waiters.  By recalculating the closing flags before
-        * each iteration we avoid any problems.
-        */
-       while (1) {
-               tty_closing = tty->count <= 1;
-               o_tty_closing = o_tty &&
-                       (o_tty->count <= (pty_master ? 1 : 0));
-               do_sleep = 0;
-
-               if (tty_closing) {
-                       if (waitqueue_active(&tty->read_wait)) {
-                               wake_up(&tty->read_wait);
-                               do_sleep++;
-                       }
-                       if (waitqueue_active(&tty->write_wait)) {
-                               wake_up(&tty->write_wait);
-                               do_sleep++;
-                       }
-               }
-               if (o_tty_closing) {
-                       if (waitqueue_active(&o_tty->read_wait)) {
-                               wake_up(&o_tty->read_wait);
-                               do_sleep++;
-                       }
-                       if (waitqueue_active(&o_tty->write_wait)) {
-                               wake_up(&o_tty->write_wait);
-                               do_sleep++;
-                       }
-               }
-               if (!do_sleep)
-                       break;
-
-               printk(KERN_WARNING "release_dev: %s: read/write wait queue "
-                                   "active!\n", tty_name(tty, buf));
-               schedule();
-       }       
-
-       /*
-        * The closing flags are now consistent with the open counts on 
-        * both sides, and we've completed the last operation that could 
-        * block, so it's safe to proceed with closing.
-        */
-       if (pty_master) {
-               if (--o_tty->count < 0) {
-                       printk(KERN_WARNING "release_dev: bad pty slave count "
-                                           "(%d) for %s\n",
-                              o_tty->count, tty_name(o_tty, buf));
-                       o_tty->count = 0;
-               }
-       }
-       if (--tty->count < 0) {
-               printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n",
-                      tty->count, tty_name(tty, buf));
-               tty->count = 0;
-       }
-
-       /*
-        * We've decremented tty->count, so we need to remove this file
-        * descriptor off the tty->tty_files list; this serves two
-        * purposes:
-        *  - check_tty_count sees the correct number of file descriptors
-        *    associated with this tty.
-        *  - do_tty_hangup no longer sees this file descriptor as
-        *    something that needs to be handled for hangups.
-        */
-       file_kill(filp);
-       filp->private_data = NULL;
-
-       /*
-        * Perform some housekeeping before deciding whether to return.
-        *
-        * Set the TTY_CLOSING flag if this was the last open.  In the
-        * case of a pty we may have to wait around for the other side
-        * to close, and TTY_CLOSING makes sure we can't be reopened.
-        */
-       if(tty_closing)
-               set_bit(TTY_CLOSING, &tty->flags);
-       if(o_tty_closing)
-               set_bit(TTY_CLOSING, &o_tty->flags);
-
-       /*
-        * If _either_ side is closing, make sure there aren't any
-        * processes that still think tty or o_tty is their controlling
-        * tty.
-        */
-       if (tty_closing || o_tty_closing) {
-               struct task_struct *p;
-               struct list_head *l;
-               struct pid *pid;
-
-               read_lock(&tasklist_lock);
-               for_each_task_pid(tty->session, PIDTYPE_SID, p, l, pid)
-                       p->signal->tty = NULL;
-               if (o_tty)
-                       for_each_task_pid(o_tty->session, PIDTYPE_SID, p,l, pid)
-                               p->signal->tty = NULL;
-               read_unlock(&tasklist_lock);
-       }
-
-       /* check whether both sides are closing ... */
-       if (!tty_closing || (o_tty && !o_tty_closing))
-               return;
-       
-#ifdef TTY_DEBUG_HANGUP
-       printk(KERN_DEBUG "freeing tty structure...");
-#endif
-
-       /*
-        * Prevent flush_to_ldisc() from rescheduling the work for later.  Then
-        * kill any delayed work.
-        */
-       clear_bit(TTY_DONT_FLIP, &tty->flags);
-       cancel_delayed_work(&tty->flip.work);
-
-       /*
-        * Wait for ->hangup_work and ->flip.work handlers to terminate
-        */
-       flush_scheduled_work();
-
-       /*
-        * Shutdown the current line discipline, and reset it to N_TTY.
-        * N.B. why reset ldisc when we're releasing the memory??
-        */
-       if (tty->ldisc.close)
-               (tty->ldisc.close)(tty);
-       module_put(tty->ldisc.owner);
-       
-       tty->ldisc = ldiscs[N_TTY];
-       tty->termios->c_line = N_TTY;
-       if (o_tty) {
-               if (o_tty->ldisc.close)
-                       (o_tty->ldisc.close)(o_tty);
-               module_put(o_tty->ldisc.owner);
-               o_tty->ldisc = ldiscs[N_TTY];
-       }
-
-       /*
-        * The release_mem function takes care of the details of clearing
-        * the slots and preserving the termios structure.
-        */
-       release_mem(tty, idx);
-
-#ifdef CONFIG_UNIX98_PTYS
-       /* Make this pty number available for reallocation */
-       if (devpts) {
-               down(&allocated_ptys_lock);
-               idr_remove(&allocated_ptys, idx);
-               up(&allocated_ptys_lock);
-       }
-#endif
-
-}
-
-/*
- * tty_open and tty_release keep up the tty count that contains the
- * number of opens done on a tty. We cannot use the inode-count, as
- * different inodes might point to the same tty.
- *
- * Open-counting is needed for pty masters, as well as for keeping
- * track of serial lines: DTR is dropped when the last close happens.
- * (This is not done solely through tty->count, now.  - Ted 1/27/92)
- *
- * The termios state of a pty is reset on first open so that
- * settings don't persist across reuse.
- */
-static int tty_open(struct inode * inode, struct file * filp)
-{
-       struct tty_struct *tty;
-       int noctty, retval;
-       struct tty_driver *driver;
-       int index;
-       dev_t device = inode->i_rdev;
-       unsigned short saved_flags = filp->f_flags;
-
-       nonseekable_open(inode, filp);
-retry_open:
-       noctty = filp->f_flags & O_NOCTTY;
-       index  = -1;
-       retval = 0;
-
-       if (device == MKDEV(TTYAUX_MAJOR,0)) {
-               if (!current->signal->tty)
-                       return -ENXIO;
-               driver = current->signal->tty->driver;
-               index = current->signal->tty->index;
-               filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
-               /* noctty = 1; */
-               goto got_driver;
-       }
-#ifdef CONFIG_VT
-       if (console_use_vt && device == MKDEV(TTY_MAJOR,0)) {
-               extern int fg_console;
-               extern struct tty_driver *console_driver;
-               driver = console_driver;
-               index = fg_console;
-               noctty = 1;
-               goto got_driver;
-       }
-#endif
-       if (device == MKDEV(TTYAUX_MAJOR,1)) {
-               driver = console_device(&index);
-               if (driver) {
-                       /* Don't let /dev/console block */
-                       filp->f_flags |= O_NONBLOCK;
-                       noctty = 1;
-                       goto got_driver;
-               }
-               return -ENODEV;
-       }
-
-#ifdef CONFIG_UNIX98_PTYS
-       if (device == MKDEV(TTYAUX_MAJOR,2)) {
-               int idr_ret;
-
-               /* find a device that is not in use. */
-               down(&allocated_ptys_lock);
-               if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
-                       up(&allocated_ptys_lock);
-                       return -ENOMEM;
-               }
-               idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
-               if (idr_ret < 0) {
-                       up(&allocated_ptys_lock);
-                       if (idr_ret == -EAGAIN)
-                               return -ENOMEM;
-                       return -EIO;
-               }
-               if (index >= pty_limit) {
-                       idr_remove(&allocated_ptys, index);
-                       up(&allocated_ptys_lock);
-                       return -EIO;
-               }
-               up(&allocated_ptys_lock);
-
-               driver = ptm_driver;
-               retval = init_dev(driver, index, &tty);
-               if (retval) {
-                       down(&allocated_ptys_lock);
-                       idr_remove(&allocated_ptys, index);
-                       up(&allocated_ptys_lock);
-                       return retval;
-               }
-
-               set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-               if (devpts_pty_new(tty->link))
-                       retval = -ENOMEM;
-       } else
-#endif
-       {
-               driver = get_tty_driver(device, &index);
-               if (!driver)
-                       return -ENODEV;
-got_driver:
-               retval = init_dev(driver, index, &tty);
-               if (retval)
-                       return retval;
-       }
-
-       filp->private_data = tty;
-       file_move(filp, &tty->tty_files);
-       check_tty_count(tty, "tty_open");
-       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-           tty->driver->subtype == PTY_TYPE_MASTER)
-               noctty = 1;
-#ifdef TTY_DEBUG_HANGUP
-       printk(KERN_DEBUG "opening %s...", tty->name);
-#endif
-       if (!retval) {
-               if (tty->driver->open)
-                       retval = tty->driver->open(tty, filp);
-               else
-                       retval = -ENODEV;
-       }
-       filp->f_flags = saved_flags;
-
-       if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
-               retval = -EBUSY;
-
-       if (retval) {
-#ifdef TTY_DEBUG_HANGUP
-               printk(KERN_DEBUG "error %d in opening %s...", retval,
-                      tty->name);
-#endif
-
-#ifdef CONFIG_UNIX98_PTYS
-               if (index != -1) {
-                       down(&allocated_ptys_lock);
-                       idr_remove(&allocated_ptys, index);
-                       up(&allocated_ptys_lock);
-               }
-#endif
-
-               release_dev(filp);
-               if (retval != -ERESTARTSYS)
-                       return retval;
-               if (signal_pending(current))
-                       return retval;
-               schedule();
-               /*
-                * Need to reset f_op in case a hangup happened.
-                */
-               if (filp->f_op == &hung_up_tty_fops)
-                       filp->f_op = &tty_fops;
-               goto retry_open;
-       }
-       if (!noctty &&
-           current->signal->leader &&
-           !current->signal->tty &&
-           tty->session == 0) {
-               task_lock(current);
-               current->signal->tty = tty;
-               task_unlock(current);
-               current->signal->tty_old_pgrp = 0;
-               tty->session = current->signal->session;
-               tty->pgrp = process_group(current);
-       }
-       return 0;
-}
-
-static int tty_release(struct inode * inode, struct file * filp)
-{
-       lock_kernel();
-       release_dev(filp);
-       unlock_kernel();
-       return 0;
-}
-
-/* No kernel lock held - fine */
-static unsigned int tty_poll(struct file * filp, poll_table * wait)
-{
-       struct tty_struct * tty;
-
-       tty = (struct tty_struct *)filp->private_data;
-       if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_poll"))
-               return 0;
-
-       if (tty->ldisc.poll)
-               return (tty->ldisc.poll)(tty, filp, wait);
-       return 0;
-}
-
-static int tty_fasync(int fd, struct file * filp, int on)
-{
-       struct tty_struct * tty;
-       int retval;
-
-       tty = (struct tty_struct *)filp->private_data;
-       if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_fasync"))
-               return 0;
-       
-       retval = fasync_helper(fd, filp, on, &tty->fasync);
-       if (retval <= 0)
-               return retval;
-
-       if (on) {
-               if (!waitqueue_active(&tty->read_wait))
-                       tty->minimum_to_wake = 1;
-               retval = f_setown(filp, (-tty->pgrp) ? : current->pid, 0);
-               if (retval)
-                       return retval;
-       } else {
-               if (!tty->fasync && !waitqueue_active(&tty->read_wait))
-                       tty->minimum_to_wake = N_TTY_BUF_SIZE;
-       }
-       return 0;
-}
-
-static int tiocsti(struct tty_struct *tty, char __user *p)
-{
-       char ch, mbz = 0;
-
-       if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
-               return -EPERM;
-       if (get_user(ch, p))
-               return -EFAULT;
-       tty->ldisc.receive_buf(tty, &ch, &mbz, 1);
-       return 0;
-}
-
-static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
-{
-       if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
-               return -EFAULT;
-       return 0;
-}
-
-static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
-       struct winsize __user * arg)
-{
-       struct winsize tmp_ws;
-
-       if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
-               return -EFAULT;
-       if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
-               return 0;
-#ifdef CONFIG_VT
-       if (console_use_vt && tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
-               unsigned int currcons = tty->index;
-               int rc;
-
-               acquire_console_sem();
-               rc = vc_resize(currcons, tmp_ws.ws_col, tmp_ws.ws_row);
-               release_console_sem();
-               if (rc)
-                       return -ENXIO;
-       }
-#endif
-       if (tty->pgrp > 0)
-               kill_pg(tty->pgrp, SIGWINCH, 1);
-       if ((real_tty->pgrp != tty->pgrp) && (real_tty->pgrp > 0))
-               kill_pg(real_tty->pgrp, SIGWINCH, 1);
-       tty->winsize = tmp_ws;
-       real_tty->winsize = tmp_ws;
-       return 0;
-}
-
-static int tioccons(struct file *file)
-{
-       if (file->f_op->write == redirected_tty_write) {
-               struct file *f;
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-               spin_lock(&redirect_lock);
-               f = redirect;
-               redirect = NULL;
-               spin_unlock(&redirect_lock);
-               if (f)
-                       fput(f);
-               return 0;
-       }
-       spin_lock(&redirect_lock);
-       if (redirect) {
-               spin_unlock(&redirect_lock);
-               return -EBUSY;
-       }
-       get_file(file);
-       redirect = file;
-       spin_unlock(&redirect_lock);
-       return 0;
-}
-
-
-static int fionbio(struct file *file, int __user *p)
-{
-       int nonblock;
-
-       if (get_user(nonblock, p))
-               return -EFAULT;
-
-       if (nonblock)
-               file->f_flags |= O_NONBLOCK;
-       else
-               file->f_flags &= ~O_NONBLOCK;
-       return 0;
-}
-
-static int tiocsctty(struct tty_struct *tty, int arg)
-{
-       struct list_head *l;
-       struct pid *pid;
-       task_t *p;
-
-       if (current->signal->leader &&
-           (current->signal->session == tty->session))
-               return 0;
-       /*
-        * The process must be a session leader and
-        * not have a controlling tty already.
-        */
-       if (!current->signal->leader || current->signal->tty)
-               return -EPERM;
-       if (tty->session > 0) {
-               /*
-                * This tty is already the controlling
-                * tty for another session group!
-                */
-               if ((arg == 1) && capable(CAP_SYS_ADMIN)) {
-                       /*
-                        * Steal it away
-                        */
-
-                       read_lock(&tasklist_lock);
-                       for_each_task_pid(tty->session, PIDTYPE_SID, p, l, pid)
-                               p->signal->tty = NULL;
-                       read_unlock(&tasklist_lock);
-               } else
-                       return -EPERM;
-       }
-       task_lock(current);
-       current->signal->tty = tty;
-       task_unlock(current);
-       current->signal->tty_old_pgrp = 0;
-       tty->session = current->signal->session;
-       tty->pgrp = process_group(current);
-       return 0;
-}
-
-static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
-{
-       /*
-        * (tty == real_tty) is a cheap way of
-        * testing if the tty is NOT a master pty.
-        */
-       if (tty == real_tty && current->signal->tty != real_tty)
-               return -ENOTTY;
-       return put_user(real_tty->pgrp, p);
-}
-
-static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
-{
-       pid_t pgrp;
-       int retval = tty_check_change(real_tty);
-
-       if (retval == -EIO)
-               return -ENOTTY;
-       if (retval)
-               return retval;
-       if (!current->signal->tty ||
-           (current->signal->tty != real_tty) ||
-           (real_tty->session != current->signal->session))
-               return -ENOTTY;
-       if (get_user(pgrp, p))
-               return -EFAULT;
-       if (pgrp < 0)
-               return -EINVAL;
-       if (session_of_pgrp(pgrp) != current->signal->session)
-               return -EPERM;
-       real_tty->pgrp = pgrp;
-       return 0;
-}
-
-static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
-{
-       /*
-        * (tty == real_tty) is a cheap way of
-        * testing if the tty is NOT a master pty.
-       */
-       if (tty == real_tty && current->signal->tty != real_tty)
-               return -ENOTTY;
-       if (real_tty->session <= 0)
-               return -ENOTTY;
-       return put_user(real_tty->session, p);
-}
-
-static int tiocsetd(struct tty_struct *tty, int __user *p)
-{
-       int ldisc;
-
-       if (get_user(ldisc, p))
-               return -EFAULT;
-       return tty_set_ldisc(tty, ldisc);
-}
-
-static int send_break(struct tty_struct *tty, int duration)
-{
-       set_current_state(TASK_INTERRUPTIBLE);
-
-       tty->driver->break_ctl(tty, -1);
-       if (!signal_pending(current))
-               schedule_timeout(duration);
-       tty->driver->break_ctl(tty, 0);
-       if (signal_pending(current))
-               return -EINTR;
-       return 0;
-}
-
-static int
-tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
-{
-       int retval = -EINVAL;
-
-       if (tty->driver->tiocmget) {
-               retval = tty->driver->tiocmget(tty, file);
-
-               if (retval >= 0)
-                       retval = put_user(retval, p);
-       }
-       return retval;
-}
-
-static int
-tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
-            unsigned __user *p)
-{
-       int retval = -EINVAL;
-
-       if (tty->driver->tiocmset) {
-               unsigned int set, clear, val;
-
-               retval = get_user(val, p);
-               if (retval)
-                       return retval;
-
-               set = clear = 0;
-               switch (cmd) {
-               case TIOCMBIS:
-                       set = val;
-                       break;
-               case TIOCMBIC:
-                       clear = val;
-                       break;
-               case TIOCMSET:
-                       set = val;
-                       clear = ~val;
-                       break;
-               }
-
-               set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
-               clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
-
-               retval = tty->driver->tiocmset(tty, file, set, clear);
-       }
-       return retval;
-}
-
-/*
- * Split this up, as gcc can choke on it otherwise..
- */
-int tty_ioctl(struct inode * inode, struct file * file,
-             unsigned int cmd, unsigned long arg)
-{
-       struct tty_struct *tty, *real_tty;
-       void __user *p = (void __user *)arg;
-       int retval;
-       
-       tty = (struct tty_struct *)file->private_data;
-       if (tty_paranoia_check(tty, inode, "tty_ioctl"))
-               return -EINVAL;
-
-       real_tty = tty;
-       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-           tty->driver->subtype == PTY_TYPE_MASTER)
-               real_tty = tty->link;
-
-       /*
-        * Break handling by driver
-        */
-       if (!tty->driver->break_ctl) {
-               switch(cmd) {
-               case TIOCSBRK:
-               case TIOCCBRK:
-                       if (tty->driver->ioctl)
-                               return tty->driver->ioctl(tty, file, cmd, arg);
-                       return -EINVAL;
-                       
-               /* These two ioctl's always return success; even if */
-               /* the driver doesn't support them. */
-               case TCSBRK:
-               case TCSBRKP:
-                       if (!tty->driver->ioctl)
-                               return 0;
-                       retval = tty->driver->ioctl(tty, file, cmd, arg);
-                       if (retval == -ENOIOCTLCMD)
-                               retval = 0;
-                       return retval;
-               }
-       }
-
-       /*
-        * Factor out some common prep work
-        */
-       switch (cmd) {
-       case TIOCSETD:
-       case TIOCSBRK:
-       case TIOCCBRK:
-       case TCSBRK:
-       case TCSBRKP:                   
-               retval = tty_check_change(tty);
-               if (retval)
-                       return retval;
-               if (cmd != TIOCCBRK) {
-                       tty_wait_until_sent(tty, 0);
-                       if (signal_pending(current))
-                               return -EINTR;
-               }
-               break;
-       }
-
-       switch (cmd) {
-               case TIOCSTI:
-                       return tiocsti(tty, p);
-               case TIOCGWINSZ:
-                       return tiocgwinsz(tty, p);
-               case TIOCSWINSZ:
-                       return tiocswinsz(tty, real_tty, p);
-               case TIOCCONS:
-                       return real_tty!=tty ? -EINVAL : tioccons(file);
-               case FIONBIO:
-                       return fionbio(file, p);
-               case TIOCEXCL:
-                       set_bit(TTY_EXCLUSIVE, &tty->flags);
-                       return 0;
-               case TIOCNXCL:
-                       clear_bit(TTY_EXCLUSIVE, &tty->flags);
-                       return 0;
-               case TIOCNOTTY:
-                       if (current->signal->tty != tty)
-                               return -ENOTTY;
-                       if (current->signal->leader)
-                               disassociate_ctty(0);
-                       task_lock(current);
-                       current->signal->tty = NULL;
-                       task_unlock(current);
-                       return 0;
-               case TIOCSCTTY:
-                       return tiocsctty(tty, arg);
-               case TIOCGPGRP:
-                       return tiocgpgrp(tty, real_tty, p);
-               case TIOCSPGRP:
-                       return tiocspgrp(tty, real_tty, p);
-               case TIOCGSID:
-                       return tiocgsid(tty, real_tty, p);
-               case TIOCGETD:
-                       return put_user(tty->ldisc.num, (int __user *)p);
-               case TIOCSETD:
-                       return tiocsetd(tty, p);
-#ifdef CONFIG_VT
-               case TIOCLINUX:
-                       return tioclinux(tty, arg);
-#endif
-               /*
-                * Break handling
-                */
-               case TIOCSBRK:  /* Turn break on, unconditionally */
-                       tty->driver->break_ctl(tty, -1);
-                       return 0;
-                       
-               case TIOCCBRK:  /* Turn break off, unconditionally */
-                       tty->driver->break_ctl(tty, 0);
-                       return 0;
-               case TCSBRK:   /* SVID version: non-zero arg --> no break */
-                       /*
-                        * XXX is the above comment correct, or the
-                        * code below correct?  Is this ioctl used at
-                        * all by anyone?
-                        */
-                       if (!arg)
-                               return send_break(tty, HZ/4);
-                       return 0;
-               case TCSBRKP:   /* support for POSIX tcsendbreak() */   
-                       return send_break(tty, arg ? arg*(HZ/10) : HZ/4);
-
-               case TIOCMGET:
-                       return tty_tiocmget(tty, file, p);
-
-               case TIOCMSET:
-               case TIOCMBIC:
-               case TIOCMBIS:
-                       return tty_tiocmset(tty, file, cmd, p);
-       }
-       if (tty->driver->ioctl) {
-               int retval = (tty->driver->ioctl)(tty, file, cmd, arg);
-               if (retval != -ENOIOCTLCMD)
-                       return retval;
-       }
-       if (tty->ldisc.ioctl) {
-               int retval = (tty->ldisc.ioctl)(tty, file, cmd, arg);
-               if (retval != -ENOIOCTLCMD)
-                       return retval;
-       }
-       return -EINVAL;
-}
-
-
-/*
- * This implements the "Secure Attention Key" ---  the idea is to
- * prevent trojan horses by killing all processes associated with this
- * tty when the user hits the "Secure Attention Key".  Required for
- * super-paranoid applications --- see the Orange Book for more details.
- * 
- * This code could be nicer; ideally it should send a HUP, wait a few
- * seconds, then send a INT, and then a KILL signal.  But you then
- * have to coordinate with the init process, since all processes associated
- * with the current tty must be dead before the new getty is allowed
- * to spawn.
- *
- * Now, if it would be correct ;-/ The current code has a nasty hole -
- * it doesn't catch files in flight. We may send the descriptor to ourselves
- * via AF_UNIX socket, close it and later fetch from socket. FIXME.
- *
- * Nasty bug: do_SAK is being called in interrupt context.  This can
- * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
- */
-static void __do_SAK(void *arg)
-{
-#ifdef TTY_SOFT_SAK
-       tty_hangup(tty);
-#else
-       struct tty_struct *tty = arg;
-       struct task_struct *p;
-       struct list_head *l;
-       struct pid *pid;
-       int session;
-       int             i;
-       struct file     *filp;
-       
-       if (!tty)
-               return;
-       session  = tty->session;
-       if (tty->ldisc.flush_buffer)
-               tty->ldisc.flush_buffer(tty);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
-       read_lock(&tasklist_lock);
-       for_each_task_pid(session, PIDTYPE_SID, p, l, pid) {
-               if (p->signal->tty == tty || session > 0) {
-                       printk(KERN_NOTICE "SAK: killed process %d"
-                           " (%s): p->signal->session==tty->session\n",
-                           p->pid, p->comm);
-                       send_sig(SIGKILL, p, 1);
-                       continue;
-               }
-               task_lock(p);
-               if (p->files) {
-                       spin_lock(&p->files->file_lock);
-                       for (i=0; i < p->files->max_fds; i++) {
-                               filp = fcheck_files(p->files, i);
-                               if (!filp)
-                                       continue;
-                               if (filp->f_op->read == tty_read &&
-                                   filp->private_data == tty) {
-                                       printk(KERN_NOTICE "SAK: killed process %d"
-                                           " (%s): fd#%d opened to the tty\n",
-                                           p->pid, p->comm, i);
-                                       send_sig(SIGKILL, p, 1);
-                                       break;
-                               }
-                       }
-                       spin_unlock(&p->files->file_lock);
-               }
-               task_unlock(p);
-       }
-       read_unlock(&tasklist_lock);
-#endif
-}
-
-/*
- * The tq handling here is a little racy - tty->SAK_work may already be queued.
- * Fortunately we don't need to worry, because if ->SAK_work is already queued,
- * the values which we write to it will be identical to the values which it
- * already has. --akpm
- */
-void do_SAK(struct tty_struct *tty)
-{
-       if (!tty)
-               return;
-       PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
-       schedule_work(&tty->SAK_work);
-}
-
-EXPORT_SYMBOL(do_SAK);
-
-/*
- * This routine is called out of the software interrupt to flush data
- * from the flip buffer to the line discipline.
- */
-static void flush_to_ldisc(void *private_)
-{
-       struct tty_struct *tty = (struct tty_struct *) private_;
-       unsigned char   *cp;
-       char            *fp;
-       int             count;
-       unsigned long flags;
-
-       if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
-               /*
-                * Do it after the next timer tick:
-                */
-               schedule_delayed_work(&tty->flip.work, 1);
-               return;
-       }
-
-       spin_lock_irqsave(&tty->read_lock, flags);
-       if (tty->flip.buf_num) {
-               cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
-               fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
-               tty->flip.buf_num = 0;
-               tty->flip.char_buf_ptr = tty->flip.char_buf;
-               tty->flip.flag_buf_ptr = tty->flip.flag_buf;
-       } else {
-               cp = tty->flip.char_buf;
-               fp = tty->flip.flag_buf;
-               tty->flip.buf_num = 1;
-               tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
-               tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
-       }
-       count = tty->flip.count;
-       tty->flip.count = 0;
-       spin_unlock_irqrestore(&tty->read_lock, flags);
-
-       tty->ldisc.receive_buf(tty, cp, fp, count);
-}
-
-/*
- * Routine which returns the baud rate of the tty
- *
- * Note that the baud_table needs to be kept in sync with the
- * include/asm/termbits.h file.
- */
-static int baud_table[] = {
-       0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-       9600, 19200, 38400, 57600, 115200, 230400, 460800,
-#ifdef __sparc__
-       76800, 153600, 307200, 614400, 921600
-#else
-       500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
-       2500000, 3000000, 3500000, 4000000
-#endif
-};
-
-static int n_baud_table = ARRAY_SIZE(baud_table);
-
-int tty_termios_baud_rate(struct termios *termios)
-{
-       unsigned int cbaud = termios->c_cflag & CBAUD;
-
-       if (cbaud & CBAUDEX) {
-               cbaud &= ~CBAUDEX;
-
-               if (cbaud < 1 || cbaud + 15 > n_baud_table)
-                       termios->c_cflag &= ~CBAUDEX;
-               else
-                       cbaud += 15;
-       }
-
-       return baud_table[cbaud];
-}
-
-EXPORT_SYMBOL(tty_termios_baud_rate);
-
-int tty_get_baud_rate(struct tty_struct *tty)
-{
-       int baud = tty_termios_baud_rate(tty->termios);
-
-       if (baud == 38400 && tty->alt_speed) {
-               if (!tty->warned) {
-                       printk(KERN_WARNING "Use of setserial/setrocket to "
-                                           "set SPD_* flags is deprecated\n");
-                       tty->warned = 1;
-               }
-               baud = tty->alt_speed;
-       }
-       
-       return baud;
-}
-
-EXPORT_SYMBOL(tty_get_baud_rate);
-
-void tty_flip_buffer_push(struct tty_struct *tty)
-{
-       if (tty->low_latency)
-               flush_to_ldisc((void *) tty);
-       else
-               schedule_delayed_work(&tty->flip.work, 1);
-}
-
-EXPORT_SYMBOL(tty_flip_buffer_push);
-
-/*
- * This subroutine initializes a tty structure.
- */
-static void initialize_tty_struct(struct tty_struct *tty)
-{
-       memset(tty, 0, sizeof(struct tty_struct));
-       tty->magic = TTY_MAGIC;
-       tty->ldisc = ldiscs[N_TTY];
-       tty->pgrp = -1;
-       tty->flip.char_buf_ptr = tty->flip.char_buf;
-       tty->flip.flag_buf_ptr = tty->flip.flag_buf;
-       INIT_WORK(&tty->flip.work, flush_to_ldisc, tty);
-       init_MUTEX(&tty->flip.pty_sem);
-       init_waitqueue_head(&tty->write_wait);
-       init_waitqueue_head(&tty->read_wait);
-       INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
-       sema_init(&tty->atomic_read, 1);
-       sema_init(&tty->atomic_write, 1);
-       spin_lock_init(&tty->read_lock);
-       INIT_LIST_HEAD(&tty->tty_files);
-       INIT_WORK(&tty->SAK_work, NULL, NULL);
-}
-
-/*
- * The default put_char routine if the driver did not define one.
- */
-static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
-{
-       tty->driver->write(tty, 0, &ch, 1);
-}
-
-static struct class_simple *tty_class;
-
-/**
- * tty_register_device - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- *     This field is optional, if there is no known struct device for this
- *     tty device it can be set to NULL safely.
- *
- * This call is required to be made to register an individual tty device if
- * the tty driver's flags have the TTY_DRIVER_NO_DEVFS bit set.  If that
- * bit is not set, this function should not be called.
- */
-void tty_register_device(struct tty_driver *driver, unsigned index,
-                        struct device *device)
-{
-       dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
-
-       if (index >= driver->num) {
-               printk(KERN_ERR "Attempt to register invalid tty line number "
-                      " (%d).\n", index);
-               return;
-       }
-
-       devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
-                       "%s%d", driver->devfs_name, index + driver->name_base);
-
-       /* we don't care about the ptys */
-       /* how nice to hide this behind some crappy interface.. */
-       if (driver->type != TTY_DRIVER_TYPE_PTY) {
-               char name[64];
-               tty_line_name(driver, index, name);
-               class_simple_device_add(tty_class, dev, device, name);
-       }
-}
-
-/**
- * tty_unregister_device - unregister a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- *
- * If a tty device is registered with a call to tty_register_device() then
- * this function must be made when the tty device is gone.
- */
-void tty_unregister_device(struct tty_driver *driver, unsigned index)
-{
-       devfs_remove("%s%d", driver->devfs_name, index + driver->name_base);
-       class_simple_device_remove(MKDEV(driver->major, driver->minor_start) + index);
-}
-
-EXPORT_SYMBOL(tty_register_device);
-EXPORT_SYMBOL(tty_unregister_device);
-
-struct tty_driver *alloc_tty_driver(int lines)
-{
-       struct tty_driver *driver;
-
-       driver = kmalloc(sizeof(struct tty_driver), GFP_KERNEL);
-       if (driver) {
-               memset(driver, 0, sizeof(struct tty_driver));
-               driver->magic = TTY_DRIVER_MAGIC;
-               driver->num = lines;
-               /* later we'll move allocation of tables here */
-       }
-       return driver;
-}
-
-void put_tty_driver(struct tty_driver *driver)
-{
-       kfree(driver);
-}
-
-void tty_set_operations(struct tty_driver *driver, struct tty_operations *op)
-{
-       driver->open = op->open;
-       driver->close = op->close;
-       driver->write = op->write;
-       driver->put_char = op->put_char;
-       driver->flush_chars = op->flush_chars;
-       driver->write_room = op->write_room;
-       driver->chars_in_buffer = op->chars_in_buffer;
-       driver->ioctl = op->ioctl;
-       driver->set_termios = op->set_termios;
-       driver->throttle = op->throttle;
-       driver->unthrottle = op->unthrottle;
-       driver->stop = op->stop;
-       driver->start = op->start;
-       driver->hangup = op->hangup;
-       driver->break_ctl = op->break_ctl;
-       driver->flush_buffer = op->flush_buffer;
-       driver->set_ldisc = op->set_ldisc;
-       driver->wait_until_sent = op->wait_until_sent;
-       driver->send_xchar = op->send_xchar;
-       driver->read_proc = op->read_proc;
-       driver->write_proc = op->write_proc;
-       driver->tiocmget = op->tiocmget;
-       driver->tiocmset = op->tiocmset;
-}
-
-
-EXPORT_SYMBOL(alloc_tty_driver);
-EXPORT_SYMBOL(put_tty_driver);
-EXPORT_SYMBOL(tty_set_operations);
-
-/*
- * Called by a tty driver to register itself.
- */
-int tty_register_driver(struct tty_driver *driver)
-{
-       int error;
-        int i;
-       dev_t dev;
-       void **p = NULL;
-
-       if (driver->flags & TTY_DRIVER_INSTALLED)
-               return 0;
-
-       if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
-               p = kmalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL);
-               if (!p)
-                       return -ENOMEM;
-               memset(p, 0, driver->num * 3 * sizeof(void *));
-       }
-
-       if (!driver->major) {
-               error = alloc_chrdev_region(&dev, driver->minor_start, driver->num,
-                                               (char*)driver->name);
-               if (!error) {
-                       driver->major = MAJOR(dev);
-                       driver->minor_start = MINOR(dev);
-               }
-       } else {
-               dev = MKDEV(driver->major, driver->minor_start);
-               error = register_chrdev_region(dev, driver->num,
-                                               (char*)driver->name);
-       }
-       if (error < 0) {
-               kfree(p);
-               return error;
-       }
-
-       if (p) {
-               driver->ttys = (struct tty_struct **)p;
-               driver->termios = (struct termios **)(p + driver->num);
-               driver->termios_locked = (struct termios **)(p + driver->num * 2);
-       } else {
-               driver->ttys = NULL;
-               driver->termios = NULL;
-               driver->termios_locked = NULL;
-       }
-
-       cdev_init(&driver->cdev, &tty_fops);
-       driver->cdev.owner = driver->owner;
-       error = cdev_add(&driver->cdev, dev, driver->num);
-       if (error) {
-               cdev_del(&driver->cdev);
-               unregister_chrdev_region(dev, driver->num);
-               driver->ttys = NULL;
-               driver->termios = driver->termios_locked = NULL;
-               kfree(p);
-               return error;
-       }
-
-       if (!driver->put_char)
-               driver->put_char = tty_default_put_char;
-       
-       list_add(&driver->tty_drivers, &tty_drivers);
-       
-       if ( !(driver->flags & TTY_DRIVER_NO_DEVFS) ) {
-               for(i = 0; i < driver->num; i++)
-                   tty_register_device(driver, i, NULL);
-       }
-       proc_tty_register_driver(driver);
-       return 0;
-}
-
-EXPORT_SYMBOL(tty_register_driver);
-
-/*
- * Called by a tty driver to unregister itself.
- */
-int tty_unregister_driver(struct tty_driver *driver)
-{
-       int i;
-       struct termios *tp;
-       void *p;
-
-       if (driver->refcount)
-               return -EBUSY;
-
-       unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
-                               driver->num);
-
-       list_del(&driver->tty_drivers);
-
-       /*
-        * Free the termios and termios_locked structures because
-        * we don't want to get memory leaks when modular tty
-        * drivers are removed from the kernel.
-        */
-       for (i = 0; i < driver->num; i++) {
-               tp = driver->termios[i];
-               if (tp) {
-                       driver->termios[i] = NULL;
-                       kfree(tp);
-               }
-               tp = driver->termios_locked[i];
-               if (tp) {
-                       driver->termios_locked[i] = NULL;
-                       kfree(tp);
-               }
-               if (!(driver->flags & TTY_DRIVER_NO_DEVFS))
-                       tty_unregister_device(driver, i);
-       }
-       p = driver->ttys;
-       proc_tty_unregister_driver(driver);
-       driver->ttys = NULL;
-       driver->termios = driver->termios_locked = NULL;
-       kfree(p);
-       cdev_del(&driver->cdev);
-       return 0;
-}
-
-EXPORT_SYMBOL(tty_unregister_driver);
-
-
-/*
- * Initialize the console device. This is called *early*, so
- * we can't necessarily depend on lots of kernel help here.
- * Just do some early initializations, and do the complex setup
- * later.
- */
-void __init console_init(void)
-{
-       initcall_t *call;
-
-       /* Setup the default TTY line discipline. */
-       (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
-
-       /*
-        * set up the console device so that later boot sequences can 
-        * inform about problems etc..
-        */
-#ifdef CONFIG_EARLY_PRINTK
-       disable_early_printk();
-#endif
-#ifdef CONFIG_SERIAL_68360
-       /* This is not a console initcall. I know not what it's doing here.
-          So I haven't moved it. dwmw2 */
-        rs_360_init();
-#endif
-       call = &__con_initcall_start;
-       while (call < &__con_initcall_end) {
-               (*call)();
-               call++;
-       }
-}
-
-#ifdef CONFIG_VT
-extern int vty_init(void);
-#endif
-
-static int __init tty_class_init(void)
-{
-       tty_class = class_simple_create(THIS_MODULE, "tty");
-       if (IS_ERR(tty_class))
-               return PTR_ERR(tty_class);
-       return 0;
-}
-
-postcore_initcall(tty_class_init);
-
-/* 3/2004 jmc: why do these devices exist? */
-
-static struct cdev tty_cdev, console_cdev;
-#ifdef CONFIG_UNIX98_PTYS
-static struct cdev ptmx_cdev;
-#endif
-#ifdef CONFIG_VT
-static struct cdev vc0_cdev;
-#endif
-
-/*
- * Ok, now we can initialize the rest of the tty devices and can count
- * on memory allocations, interrupts etc..
- */
-static int __init tty_init(void)
-{
-       cdev_init(&tty_cdev, &tty_fops);
-       if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
-           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
-               panic("Couldn't register /dev/tty driver\n");
-       devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 0), S_IFCHR|S_IRUGO|S_IWUGO, "tty");
-       class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
-
-       cdev_init(&console_cdev, &console_fops);
-       if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
-           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
-               panic("Couldn't register /dev/console driver\n");
-       devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 1), S_IFCHR|S_IRUSR|S_IWUSR, "console");
-       class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
-
-#ifdef CONFIG_UNIX98_PTYS
-       cdev_init(&ptmx_cdev, &tty_fops);
-       if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
-           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
-               panic("Couldn't register /dev/ptmx driver\n");
-       devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 2), S_IFCHR|S_IRUGO|S_IWUGO, "ptmx");
-       class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
-#endif
-
-#ifdef CONFIG_VT
-       if (console_use_vt) {
-               cdev_init(&vc0_cdev, &console_fops);
-               if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
-                   register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1,
-                                          "/dev/vc/0") < 0)
-                       panic("Couldn't register /dev/tty0 driver\n");
-               devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR,
-                             "vc/0");
-               class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL,
-                                       "tty0");
-
-               vty_init();
-       }
-#endif
-       return 0;
-}
-module_init(tty_init);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/Makefile
deleted file mode 100644 (file)
index e181171..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-obj-y  += console/
-obj-y  += evtchn/
-obj-y  += privcmd/
-obj-y   += balloon/
-
-obj-$(CONFIG_XEN_BLKDEV_BACKEND)       += blkback/
-obj-$(CONFIG_XEN_NETDEV_BACKEND)       += netback/
-obj-$(CONFIG_XEN_BLKDEV_FRONTEND)      += blkfront/
-obj-$(CONFIG_XEN_NETDEV_FRONTEND)      += netfront/
-
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/balloon/Makefile
deleted file mode 100644 (file)
index 0e3a348..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y += balloon.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c b/linux-2.6.8.1-xen-sparse/drivers/xen/balloon/balloon.c
deleted file mode 100644 (file)
index b9ebda1..0000000
+++ /dev/null
@@ -1,712 +0,0 @@
-/******************************************************************************
- * balloon.c
- *
- * Xen balloon driver - enables returning/claiming memory to/from Xen.
- *
- * Copyright (c) 2003, B Dragovic
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#include <asm-xen/xen_proc.h>
-#else
-#include <asm/xen_proc.h>
-#endif
-
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/ctrl_if.h>
-#else
-#include <asm/hypervisor.h>
-#include <asm/ctrl_if.h>
-#endif
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-
-/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
-#define USER_INFLATE_BALLOON  1   /* return mem to hypervisor */
-#define USER_DEFLATE_BALLOON  2   /* claim mem from hypervisor */
-typedef struct user_balloon_op {
-    unsigned int  op;
-    unsigned long size;
-} user_balloon_op_t;
-/* END OF USER DEFINE */
-
-static struct proc_dir_entry *balloon_pde;
-
-unsigned long credit;
-static unsigned long current_pages, most_seen_pages;
-
-/*
- * Dead entry written into balloon-owned entries in the PMT.
- * It is deliberately different to INVALID_P2M_ENTRY.
- */
-#define DEAD 0xdead1234
-
-static inline pte_t *get_ptep(unsigned long addr)
-{
-    pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
-    pgd = pgd_offset_k(addr);
-
-    if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
-
-    pmd = pmd_offset(pgd, addr);
-    if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    ptep = pte_offset_kernel(pmd, addr);
-#else
-    ptep = pte_offset(pmd, addr);
-#endif
-
-    return ptep;
-}
-
-/* Main function for relinquishing memory. */
-static unsigned long inflate_balloon(unsigned long num_pages)
-{
-    unsigned long *parray;
-    unsigned long *currp;
-    unsigned long curraddr;
-    unsigned long ret = 0;
-    unsigned long i, j;
-
-    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
-    if ( parray == NULL )
-    {
-        printk(KERN_ERR "inflate_balloon: Unable to vmalloc parray\n");
-        return -EFAULT;
-    }
-
-    currp = parray;
-
-    for ( i = 0; i < num_pages; i++, currp++ )
-    {
-        struct page *page = alloc_page(GFP_HIGHUSER);
-        unsigned long pfn = page - mem_map;
-
-        /* If allocation fails then free all reserved pages. */
-        if ( page == NULL )
-        {
-            printk(KERN_ERR "Unable to inflate balloon by %ld, only"
-                   " %ld pages free.", num_pages, i);
-            currp = parray;
-            for ( j = 0; j < i; j++, currp++ )
-                __free_page((struct page *) (mem_map + *currp));
-            ret = -EFAULT;
-            goto cleanup;
-        }
-
-        *currp = pfn;
-    }
-
-
-    for ( i = 0, currp = parray; i < num_pages; i++, currp++ )
-    {
-        unsigned long mfn = phys_to_machine_mapping[*currp];
-        curraddr = (unsigned long)page_address(mem_map + *currp);
-        /* Blow away page contents for security, and also p.t. ref if any. */
-        if ( curraddr != 0 )
-        {
-            scrub_pages(curraddr, 1);
-            queue_l1_entry_update(get_ptep(curraddr), 0);
-        }
-#ifdef CONFIG_XEN_SCRUB_PAGES
-        else
-        {
-            void *p = kmap(&mem_map[*currp]);
-            scrub_pages(p, 1);
-            kunmap(&mem_map[*currp]);
-        }
-#endif
-        phys_to_machine_mapping[*currp] = DEAD;
-        *currp = mfn;
-    }
-
-    /* Flush updates through and flush the TLB. */
-    xen_tlb_flush();
-
-    ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
-                                parray, num_pages, 0);
-    if ( unlikely(ret != num_pages) )
-    {
-        printk(KERN_ERR "Unable to inflate balloon, error %lx\n", ret);
-        goto cleanup;
-    }
-
-    credit += num_pages;
-    ret = num_pages;
-
- cleanup:
-    vfree(parray);
-
-    return ret;
-}
-
-/*
- * Install new mem pages obtained by deflate_balloon. function walks 
- * phys->machine mapping table looking for DEAD entries and populates
- * them.
- */
-static unsigned long process_returned_pages(unsigned long * parray, 
-                                       unsigned long num)
-{
-    /* currently, this function is rather simplistic as 
-     * it is assumed that domain reclaims only number of 
-     * pages previously released. this is to change soon
-     * and the code to extend page tables etc. will be 
-     * incorporated here.
-     */
-     
-    unsigned long tot_pages = most_seen_pages;   
-    unsigned long * curr = parray;
-    unsigned long num_installed;
-    unsigned long i;
-
-    num_installed = 0;
-    for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
-    {
-        if ( phys_to_machine_mapping[i] == DEAD )
-        {
-            phys_to_machine_mapping[i] = *curr;
-            queue_machphys_update(*curr, i);
-            if (i<max_low_pfn)
-              queue_l1_entry_update(
-                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
-                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-
-            __free_page(mem_map + i);
-
-            curr++;
-            num_installed++;
-        }
-    }
-
-    return num_installed;
-}
-
-unsigned long deflate_balloon(unsigned long num_pages)
-{
-    unsigned long ret;
-    unsigned long * parray;
-
-    if ( num_pages > credit )
-    {
-        printk(KERN_ERR "deflate_balloon: %lu pages > %lu credit.\n",
-               num_pages, credit);
-        return -EAGAIN;
-    }
-
-    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
-    if ( parray == NULL )
-    {
-        printk(KERN_ERR "deflate_balloon: Unable to vmalloc parray\n");
-        return 0;
-    }
-
-    ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
-                                parray, num_pages, 0);
-    if ( unlikely(ret != num_pages) )
-    {
-        printk(KERN_ERR "deflate_balloon: xen increase_reservation err %lx\n",
-               ret);
-        goto cleanup;
-    }
-
-    if ( (ret = process_returned_pages(parray, num_pages)) < num_pages )
-    {
-        printk(KERN_WARNING
-               "deflate_balloon: restored only %lx of %lx pages.\n",
-           ret, num_pages);
-        goto cleanup;
-    }
-
-    ret = num_pages;
-    credit -= num_pages;
-
- cleanup:
-    vfree(parray);
-
-    return ret;
-}
-
-#define PAGE_TO_MB_SHIFT 8
-
-/*
- * pagetable_extend() mimics pagetable_init() from arch/xen/mm/init.c 
- * The loops do go through all of low memory (ZONE_NORMAL).  The
- * old pages have _PAGE_PRESENT set and so get skipped.
- * If low memory is not full, the new pages are used to fill it, going
- * from cur_low_pfn to low_pfn.   high memory is not direct mapped so
- * no extension is needed for new high memory.
- */
-
-static void pagetable_extend (int cur_low_pfn, int newpages)
-{
-    unsigned long vaddr, end;
-    pgd_t *kpgd, *pgd, *pgd_base;
-    int i, j, k;
-    pmd_t *kpmd, *pmd;
-    pte_t *kpte, *pte, *pte_base;
-    int low_pfn = min(cur_low_pfn+newpages,(int)max_low_pfn);
-
-    /*
-     * This can be zero as well - no problem, in that case we exit
-     * the loops anyway due to the PTRS_PER_* conditions.
-     */
-    end = (unsigned long)__va(low_pfn*PAGE_SIZE);
-
-    pgd_base = init_mm.pgd;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    i = pgd_index(PAGE_OFFSET);
-#else
-    i = __pgd_offset(PAGE_OFFSET);
-#endif
-    pgd = pgd_base + i;
-
-    for (; i < PTRS_PER_PGD; pgd++, i++) {
-        vaddr = i*PGDIR_SIZE;
-        if (end && (vaddr >= end))
-            break;
-        pmd = (pmd_t *)pgd;
-        for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
-            vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
-            if (end && (vaddr >= end))
-                break;
-
-            /* Filled in for us already? */
-            if ( pmd_val(*pmd) & _PAGE_PRESENT )
-                continue;
-
-            pte_base = pte = (pte_t *) __get_free_page(GFP_KERNEL);
-
-            for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
-                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
-                if (end && (vaddr >= end))
-                    break;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-                *pte = mk_pte(virt_to_page(vaddr), PAGE_KERNEL);
-#else
-               *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
-#endif
-            }
-            kpgd = pgd_offset_k((unsigned long)pte_base);
-            kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-            kpte = pte_offset_kernel(kpmd, (unsigned long)pte_base);
-#else
-           kpte = pte_offset(kpmd, (unsigned long)pte_base);
-#endif
-            queue_l1_entry_update(kpte,
-                                  (*(unsigned long *)kpte)&~_PAGE_RW);
-            set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
-            XEN_flush_page_update_queue();
-        }
-    }
-}
-
-/*
- * claim_new_pages() asks xen to increase this domain's memory  reservation
- * and return a list of the new pages of memory.  This new pages are
- * added to the free list of the memory manager.
- *
- * Available RAM does not normally change while Linux runs.  To make this work,
- * the linux mem= boottime command line param must say how big memory could
- * possibly grow.  Then setup_arch() in arch/xen/kernel/setup.c
- * sets max_pfn, max_low_pfn and the zones according to
- * this max memory size.   The page tables themselves can only be
- * extended after xen has assigned new pages to this domain.
- */
-
-static unsigned long
-claim_new_pages(unsigned long num_pages)
-{
-    unsigned long new_page_cnt, pfn;
-    unsigned long * parray, *curr;
-
-    if (most_seen_pages+num_pages> max_pfn)
-        num_pages = max_pfn-most_seen_pages;
-    if (num_pages==0) return -EINVAL;
-
-    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
-    if ( parray == NULL )
-    {
-        printk(KERN_ERR "claim_new_pages: Unable to vmalloc parray\n");
-        return 0;
-    }
-
-    new_page_cnt = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
-                                parray, num_pages, 0);
-    if ( new_page_cnt != num_pages )
-    {
-        printk(KERN_WARNING
-            "claim_new_pages: xen granted only %lu of %lu requested pages\n",
-            new_page_cnt, num_pages);
-
-        /* 
-         * Avoid xen lockup when user forgot to setdomainmaxmem. Xen
-         * usually can dribble out a few pages and then hangs.
-         */
-        if ( new_page_cnt < 1000 )
-        {
-            printk(KERN_WARNING "Remember to use setdomainmaxmem\n");
-            HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
-                                parray, new_page_cnt, 0);
-            return -EFAULT;
-        }
-    }
-    memcpy(phys_to_machine_mapping+most_seen_pages, parray,
-           new_page_cnt * sizeof(unsigned long));
-
-    pagetable_extend(most_seen_pages,new_page_cnt);
-
-    for ( pfn = most_seen_pages, curr = parray;
-          pfn < most_seen_pages+new_page_cnt;
-          pfn++, curr++ )
-    {
-        struct page *page = mem_map + pfn;
-
-#ifndef CONFIG_HIGHMEM
-        if ( pfn>=max_low_pfn )
-        {
-            printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-               pfn>>PAGE_TO_MB_SHIFT);
-            printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-            break;
-        }
-#endif
-        queue_machphys_update(*curr, pfn);
-        if ( pfn < max_low_pfn )
-            queue_l1_entry_update(
-                get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
-                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-        
-        XEN_flush_page_update_queue();
-        
-        /* this next bit mimics arch/xen/mm/init.c:one_highpage_init() */
-        ClearPageReserved(page);
-        if ( pfn >= max_low_pfn )
-            set_bit(PG_highmem, &page->flags);
-        set_page_count(page, 1);
-        __free_page(page);
-    }
-
-    vfree(parray);
-
-    return new_page_cnt;
-}
-
-
-static int balloon_try_target(int target)
-{
-    int change, reclaim;
-
-    if ( target < current_pages )
-    {
-        int change = inflate_balloon(current_pages-target);
-        if ( change <= 0 )
-            return change;
-
-        current_pages -= change;
-        printk(KERN_INFO "Relinquish %dMB to xen. Domain now has %luMB\n",
-            change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
-    }
-    else if ( target > current_pages )
-    {
-        reclaim = min((unsigned long)target,most_seen_pages) - current_pages;
-
-        if ( reclaim )
-        {
-            change = deflate_balloon( reclaim );
-            if ( change <= 0 )
-                return change;
-            current_pages += change;
-            printk(KERN_INFO "Reclaim %dMB from xen. Domain now has %luMB\n",
-                change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
-        }
-
-        if ( most_seen_pages < target )
-        {
-            int growth = claim_new_pages(target-most_seen_pages);
-            if ( growth <= 0 )
-                return growth;
-            most_seen_pages += growth;
-            current_pages += growth;
-            printk(KERN_INFO "Granted %dMB new mem. Dom now has %luMB\n",
-                growth>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
-        }
-    }
-
-    return 1;
-}
-
-
-static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    switch ( msg->subtype )
-    {
-    case CMSG_MEM_REQUEST_SET:
-        if ( msg->length != sizeof(mem_request_t) )
-            goto parse_error;
-        {
-            mem_request_t *req = (mem_request_t *)&msg->msg[0];
-            req->status = balloon_try_target(req->target);
-        }
-        break;        
-    default:
-        goto parse_error;
-    }
-
-    ctrl_if_send_response(msg);
-    return;
-
- parse_error:
-    msg->length = 0;
-    ctrl_if_send_response(msg);
-}
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static int balloon_write(struct file *file, const char *buffer,
-                         size_t count, loff_t *offp)
-{
-    char memstring[64], *endchar;
-    int len, i;
-    unsigned long target;
-    unsigned long long targetbytes;
-
-    /* Only admin can play with the balloon :) */
-    if ( !capable(CAP_SYS_ADMIN) )
-        return -EPERM;
-
-    if ( count > sizeof(memstring) )
-        return -EFBIG;
-
-    len = strnlen_user(buffer, count);
-    if ( len == 0 ) return -EBADMSG;
-    if ( len == 1 ) return 1; /* input starts with a NUL char */
-    if ( strncpy_from_user(memstring, buffer, len) < 0 )
-        return -EFAULT;
-
-    endchar = memstring;
-    for ( i = 0; i < len; ++i, ++endchar )
-        if ( (memstring[i] < '0') || (memstring[i] > '9') )
-            break;
-    if ( i == 0 )
-        return -EBADMSG;
-
-    targetbytes = memparse(memstring,&endchar);
-    target = targetbytes >> PAGE_SHIFT;
-
-    i = balloon_try_target(target);
-
-    if ( i <= 0 ) return i;
-
-    *offp += len;
-    return len;
-}
-
-static int balloon_read(struct file *filp, char *buffer,
-                        size_t count, loff_t *offp)
-{
-    static char priv_buf[32];
-    char *priv_bufp = priv_buf;
-    int len;
-    len = sprintf(priv_buf,"%lu\n",current_pages<<PAGE_SHIFT);
-
-    len -= *offp;
-    priv_bufp += *offp;
-    if (len>count) len = count;
-    if (len<0) len = 0;
-
-    copy_to_user(buffer, priv_bufp, len);
-
-    *offp += len;
-    return len;
-}
-
-static struct file_operations balloon_fops = {
-    .read  = balloon_read,
-    .write = balloon_write
-};
-#else
-
-
-static int balloon_write(struct file *file, const char *buffer,
-                         u_long count, void *data)
-{
-    char memstring[64], *endchar;
-    int len, i;
-    unsigned long target;
-    unsigned long long targetbytes;
-
-    /* Only admin can play with the balloon :) */
-    if ( !capable(CAP_SYS_ADMIN) )
-        return -EPERM;
-
-    if ( count > sizeof(memstring) )
-        return -EFBIG;
-
-    len = strnlen_user(buffer, count);
-    if ( len == 0 ) return -EBADMSG;
-    if ( len == 1 ) return 1; /* input starts with a NUL char */
-    if ( strncpy_from_user(memstring, buffer, len) < 0 )
-        return -EFAULT;
-
-    endchar = memstring;
-    for ( i = 0; i < len; ++i, ++endchar )
-        if ( (memstring[i] < '0') || (memstring[i] > '9') )
-            break;
-    if ( i == 0 )
-        return -EBADMSG;
-
-    targetbytes = memparse(memstring,&endchar);
-    target = targetbytes >> PAGE_SHIFT;
-
-    if ( target < current_pages )
-    {
-        int change = inflate_balloon(current_pages-target);
-        if ( change <= 0 )
-            return change;
-
-        current_pages -= change;
-        printk(KERN_INFO "Relinquish %dMB to xen. Domain now has %luMB\n",
-            change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
-    }
-    else if ( target > current_pages )
-    {
-        int change, reclaim = min(target,most_seen_pages) - current_pages;
-
-        if ( reclaim )
-        {
-            change = deflate_balloon( reclaim);
-            if ( change <= 0 )
-                return change;
-            current_pages += change;
-            printk(KERN_INFO "Reclaim %dMB from xen. Domain now has %luMB\n",
-                change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
-        }
-
-        if ( most_seen_pages < target )
-        {
-            int growth = claim_new_pages(target-most_seen_pages);
-            if ( growth <= 0 )
-                return growth;
-            most_seen_pages += growth;
-            current_pages += growth;
-            printk(KERN_INFO "Granted %dMB new mem. Dom now has %luMB\n",
-                growth>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
-        }
-    }
-
-
-    return len;
-}
-
-static int balloon_read(char *page, char **start, off_t off,
-                       int count, int *eof, void *data)
-{
-  int len;
-  len = sprintf(page,"%lu\n",current_pages<<PAGE_SHIFT);
-  
-  if (len <= off+count) *eof = 1;
-  *start = page + off;
-  len -= off;
-  if (len>count) len = count;
-  if (len<0) len = 0;
-  return len;
-}
-
-
-
-#endif
-
-
-static int __init balloon_init(void)
-{
-    printk(KERN_ALERT "Starting Xen Balloon driver\n");
-
-    most_seen_pages = current_pages = min(xen_start_info.nr_pages,max_pfn);
-    if ( (balloon_pde = create_xen_proc_entry("memory_target", 0644)) == NULL )
-    {
-        printk(KERN_ALERT "Unable to create balloon driver proc entry!");
-        return -1;
-    }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    balloon_pde->owner     = THIS_MODULE;
-    balloon_pde->nlink     = 1;
-    balloon_pde->proc_fops = &balloon_fops;
-#else
-    balloon_pde->write_proc = balloon_write;
-    balloon_pde->read_proc  = balloon_read;
-#endif
-
-    (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx,
-                                    CALLBACK_IN_BLOCKING_CONTEXT);
-
-    /* 
-     * make_module a new phys map if mem= says xen can give us memory  to grow
-     */
-    if ( max_pfn > xen_start_info.nr_pages )
-    {
-        extern unsigned long *phys_to_machine_mapping;
-        unsigned long *newmap;
-        newmap = (unsigned long *)vmalloc(max_pfn * sizeof(unsigned long));
-        memset(newmap, ~0, max_pfn * sizeof(unsigned long));
-        memcpy(newmap, phys_to_machine_mapping,
-               xen_start_info.nr_pages * sizeof(unsigned long));
-        phys_to_machine_mapping = newmap;
-    }
-
-    return 0;
-}
-
-static void __exit balloon_cleanup(void)
-{
-    if ( balloon_pde != NULL )
-    {
-        remove_xen_proc_entry("memory_target");
-        balloon_pde = NULL;
-    }
-}
-
-module_init(balloon_init);
-module_exit(balloon_cleanup);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/Makefile
deleted file mode 100644 (file)
index a27fe65..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y  := blkback.o control.o interface.o vbd.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/blkback.c b/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/blkback.c
deleted file mode 100644 (file)
index 6d20102..0000000
+++ /dev/null
@@ -1,586 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/backend/main.c
- * 
- * Back-end of the driver for virtual block devices. This portion of the
- * driver exports a 'unified' block-device interface that can be accessed
- * by any operating system that implements a compatible front end. A 
- * reference front-end implementation can be found in:
- *  arch/xen/drivers/blkif/frontend
- * 
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- */
-
-#include "common.h"
-
-/*
- * These are rather arbitrary. They are fairly large because adjacent requests
- * pulled from a communication ring are quite likely to end up being part of
- * the same scatter/gather request at the disc.
- * 
- * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW **
- * This will increase the chances of being able to write whole tracks.
- * 64 should be enough to keep us competitive with Linux.
- */
-#define MAX_PENDING_REQS 64
-#define BATCH_PER_DOMAIN 16
-
-static unsigned long mmap_vstart;
-#define MMAP_PAGES_PER_REQUEST \
-    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
-#define MMAP_PAGES             \
-    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
-#define MMAP_VADDR(_req,_seg)                        \
-    (mmap_vstart +                                   \
-     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
-     ((_seg) * PAGE_SIZE))
-
-/*
- * Each outstanding request that we've passed to the lower device layers has a 
- * 'pending_req' allocated to it. Each buffer_head that completes decrements 
- * the pendcnt towards zero. When it hits zero, the specified domain has a 
- * response queued for it, with the saved 'id' passed back.
- */
-typedef struct {
-    blkif_t       *blkif;
-    unsigned long  id;
-    int            nr_pages;
-    atomic_t       pendcnt;
-    unsigned short operation;
-    int            status;
-} pending_req_t;
-
-/*
- * We can't allocate pending_req's in order, since they may complete out of 
- * order. We therefore maintain an allocation ring. This ring also indicates 
- * when enough work has been passed down -- at that point the allocation ring 
- * will be empty.
- */
-static pending_req_t pending_reqs[MAX_PENDING_REQS];
-static unsigned char pending_ring[MAX_PENDING_REQS];
-static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
-/* NB. We use a different index type to differentiate from shared blk rings. */
-typedef unsigned int PEND_RING_IDX;
-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-static kmem_cache_t *buffer_head_cachep;
-#endif
-
-static int do_block_io_op(blkif_t *blkif, int max_to_do);
-static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
-static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
-static void make_response(blkif_t *blkif, unsigned long id, 
-                          unsigned short op, int st);
-
-static void fast_flush_area(int idx, int nr_pages)
-{
-    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-    int               i;
-
-    for ( i = 0; i < nr_pages; i++ )
-    {
-        mcl[i].op = __HYPERVISOR_update_va_mapping;
-        mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
-        mcl[i].args[1] = 0;
-        mcl[i].args[2] = 0;
-    }
-
-    mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
-    if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
-        BUG();
-}
-
-
-/******************************************************************
- * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
- */
-
-static struct list_head blkio_schedule_list;
-static spinlock_t blkio_schedule_list_lock;
-
-static int __on_blkdev_list(blkif_t *blkif)
-{
-    return blkif->blkdev_list.next != NULL;
-}
-
-static void remove_from_blkdev_list(blkif_t *blkif)
-{
-    unsigned long flags;
-    if ( !__on_blkdev_list(blkif) ) return;
-    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-    if ( __on_blkdev_list(blkif) )
-    {
-        list_del(&blkif->blkdev_list);
-        blkif->blkdev_list.next = NULL;
-        blkif_put(blkif);
-    }
-    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-}
-
-static void add_to_blkdev_list_tail(blkif_t *blkif)
-{
-    unsigned long flags;
-    if ( __on_blkdev_list(blkif) ) return;
-    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
-    {
-        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
-        blkif_get(blkif);
-    }
-    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-}
-
-
-/******************************************************************
- * SCHEDULER FUNCTIONS
- */
-
-static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
-
-static int blkio_schedule(void *arg)
-{
-    DECLARE_WAITQUEUE(wq, current);
-
-    blkif_t          *blkif;
-    struct list_head *ent;
-
-    daemonize(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        "xenblkd"
-#endif
-        );
-
-    for ( ; ; )
-    {
-        /* Wait for work to do. */
-        add_wait_queue(&blkio_schedule_wait, &wq);
-        set_current_state(TASK_INTERRUPTIBLE);
-        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
-             list_empty(&blkio_schedule_list) )
-            schedule();
-        __set_current_state(TASK_RUNNING);
-        remove_wait_queue(&blkio_schedule_wait, &wq);
-
-        /* Queue up a batch of requests. */
-        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-                !list_empty(&blkio_schedule_list) )
-        {
-            ent = blkio_schedule_list.next;
-            blkif = list_entry(ent, blkif_t, blkdev_list);
-            blkif_get(blkif);
-            remove_from_blkdev_list(blkif);
-            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
-                add_to_blkdev_list_tail(blkif);
-            blkif_put(blkif);
-        }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-        /* Push the batch through to disc. */
-        run_task_queue(&tq_disk);
-#endif
-    }
-}
-
-static void maybe_trigger_blkio_schedule(void)
-{
-    /*
-     * Needed so that two processes, who together make the following predicate
-     * true, don't both read stale values and evaluate the predicate
-     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
-     */
-    smp_mb();
-
-    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-         !list_empty(&blkio_schedule_list) )
-        wake_up(&blkio_schedule_wait);
-}
-
-
-
-/******************************************************************
- * COMPLETION CALLBACK -- Called as bh->b_end_io()
- */
-
-static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
-{
-    unsigned long flags;
-
-    /* An error fails the entire request. */
-    if ( !uptodate )
-    {
-        DPRINTK("Buffer not up-to-date at end of operation\n");
-        pending_req->status = BLKIF_RSP_ERROR;
-    }
-
-    if ( atomic_dec_and_test(&pending_req->pendcnt) )
-    {
-        int pending_idx = pending_req - pending_reqs;
-        fast_flush_area(pending_idx, pending_req->nr_pages);
-        make_response(pending_req->blkif, pending_req->id,
-                      pending_req->operation, pending_req->status);
-        blkif_put(pending_req->blkif);
-        spin_lock_irqsave(&pend_prod_lock, flags);
-        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-        spin_unlock_irqrestore(&pend_prod_lock, flags);
-        maybe_trigger_blkio_schedule();
-    }
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-static void end_block_io_op(struct buffer_head *bh, int uptodate)
-{
-    __end_block_io_op(bh->b_private, uptodate);
-    kmem_cache_free(buffer_head_cachep, bh);
-}
-#else
-static int end_block_io_op(struct bio *bio, unsigned int done, int error)
-{
-    if ( done || error )
-        __end_block_io_op(bio->bi_private, (done && !error));
-    bio_put(bio);
-    return error;
-}
-#endif
-
-
-/******************************************************************************
- * NOTIFICATION FROM GUEST OS.
- */
-
-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-{
-    blkif_t *blkif = dev_id;
-    add_to_blkdev_list_tail(blkif);
-    maybe_trigger_blkio_schedule();
-    return IRQ_HANDLED;
-}
-
-
-
-/******************************************************************
- * DOWNWARD CALLS -- These interface with the block-device layer proper.
- */
-
-static int do_block_io_op(blkif_t *blkif, int max_to_do)
-{
-    blkif_ring_t *blk_ring = blkif->blk_ring_base;
-    blkif_request_t *req;
-    BLKIF_RING_IDX i, rp;
-    int more_to_do = 0;
-
-    rp = blk_ring->req_prod;
-    rmb(); /* Ensure we see queued requests up to 'rp'. */
-
-    /* Take items off the comms ring, taking care not to overflow. */
-    for ( i = blkif->blk_req_cons; 
-          (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
-          i++ )
-    {
-        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
-        {
-            more_to_do = 1;
-            break;
-        }
-        
-        req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
-        switch ( req->operation )
-        {
-        case BLKIF_OP_READ:
-        case BLKIF_OP_WRITE:
-            dispatch_rw_block_io(blkif, req);
-            break;
-
-        case BLKIF_OP_PROBE:
-            dispatch_probe(blkif, req);
-            break;
-
-        default:
-            DPRINTK("error: unknown block io operation [%d]\n",
-                    blk_ring->ring[i].req.operation);
-            make_response(blkif, blk_ring->ring[i].req.id, 
-                          blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
-            break;
-        }
-    }
-
-    blkif->blk_req_cons = i;
-    return more_to_do;
-}
-
-static void dispatch_probe(blkif_t *blkif, blkif_request_t *req)
-{
-    int rsp = BLKIF_RSP_ERROR;
-    int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-
-    /* We expect one buffer only. */
-    if ( unlikely(req->nr_segments != 1) )
-        goto out;
-
-    /* Make sure the buffer is page-sized. */
-    if ( (blkif_first_sect(req->frame_and_sects[0]) != 0) ||
-         (blkif_last_sect(req->frame_and_sects[0]) != 7) )
-        goto out;
-
-    if ( HYPERVISOR_update_va_mapping_otherdomain(
-        MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
-        (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
-        0, blkif->domid) )
-        goto out;
-
-    rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), 
-                    PAGE_SIZE / sizeof(vdisk_t));
-
- out:
-    fast_flush_area(pending_idx, 1);
-    make_response(blkif, req->id, req->operation, rsp);
-}
-
-static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
-{
-    extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
-    int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-    short nr_sects;
-    unsigned long buffer, fas;
-    int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-    pending_req_t *pending_req;
-    unsigned long  remap_prot;
-    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-
-    /* We map virtual scatter/gather segments to physical segments. */
-    int new_segs, nr_psegs = 0;
-    phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
-
-    /* Check that number of segments is sane. */
-    if ( unlikely(req->nr_segments == 0) || 
-         unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
-    {
-        DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
-        goto bad_descriptor;
-    }
-
-    /*
-     * Check each address/size pair is sane, and convert into a
-     * physical device and block offset. Note that if the offset and size
-     * crosses a virtual extent boundary, we may end up with more
-     * physical scatter/gather segments than virtual segments.
-     */
-    for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
-    {
-        fas      = req->frame_and_sects[i];
-        buffer   = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
-        nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
-
-        if ( nr_sects <= 0 )
-            goto bad_descriptor;
-
-        phys_seg[nr_psegs].dev           = req->device;
-        phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
-        phys_seg[nr_psegs].buffer        = buffer;
-        phys_seg[nr_psegs].nr_sects      = nr_sects;
-
-        /* Translate the request into the relevant 'physical device' */
-        new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation);
-        if ( new_segs < 0 )
-        { 
-            DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
-                    operation == READ ? "read" : "write", 
-                    req->sector_number + tot_sects, 
-                    req->sector_number + tot_sects + nr_sects, 
-                    req->device); 
-            goto bad_descriptor;
-        }
-  
-        nr_psegs += new_segs;
-        ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1));
-    }
-
-    /* Nonsensical zero-sized request? */
-    if ( unlikely(nr_psegs == 0) )
-        goto bad_descriptor;
-
-    if ( operation == READ )
-        remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
-    else
-        remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED;
-
-    for ( i = 0; i < nr_psegs; i++ )
-    {
-        mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
-        mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
-        mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
-        mcl[i].args[2] = 0;
-        mcl[i].args[3] = blkif->domid;
-
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
-            FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
-    }
-
-    if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
-        BUG();
-
-    for ( i = 0; i < nr_psegs; i++ )
-    {
-        if ( unlikely(mcl[i].args[5] != 0) )
-        {
-            DPRINTK("invalid buffer -- could not remap it\n");
-            fast_flush_area(pending_idx, nr_psegs);
-            goto bad_descriptor;
-        }
-    }
-
-    pending_req = &pending_reqs[pending_idx];
-    pending_req->blkif     = blkif;
-    pending_req->id        = req->id;
-    pending_req->operation = operation;
-    pending_req->status    = BLKIF_RSP_OKAY;
-    pending_req->nr_pages  = nr_psegs;
-    atomic_set(&pending_req->pendcnt, nr_psegs);
-    pending_cons++;
-
-    blkif_get(blkif);
-
-    /* Now we pass each segment down to the real blkdev layer. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-    for ( i = 0; i < nr_psegs; i++ )
-    {
-        struct buffer_head *bh;
-
-        bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC);
-        if ( unlikely(bh == NULL) )
-        {
-            __end_block_io_op(pending_req, 0);
-            continue;
-        }
-
-        memset(bh, 0, sizeof (struct buffer_head));
-
-        init_waitqueue_head(&bh->b_wait);
-        bh->b_size          = phys_seg[i].nr_sects << 9;
-        bh->b_dev           = phys_seg[i].dev;
-        bh->b_rdev          = phys_seg[i].dev;
-        bh->b_rsector       = (unsigned long)phys_seg[i].sector_number;
-        bh->b_data          = (char *)MMAP_VADDR(pending_idx, i) +
-            (phys_seg[i].buffer & ~PAGE_MASK);
-        bh->b_page          = virt_to_page(MMAP_VADDR(pending_idx, i));
-        bh->b_end_io        = end_block_io_op;
-        bh->b_private       = pending_req;
-
-        bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | 
-            (1 << BH_Req) | (1 << BH_Launder);
-        if ( operation == WRITE )
-            bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate);
-
-        atomic_set(&bh->b_count, 1);
-
-        /* Dispatch a single request. We'll flush it to disc later. */
-        generic_make_request(operation, bh);
-    }
-#else
-    for ( i = 0; i < nr_psegs; i++ )
-    {
-        struct bio *bio;
-        struct bio_vec *bv;
-
-        bio = bio_alloc(GFP_ATOMIC, 1);
-        if ( unlikely(bio == NULL) )
-        {
-            __end_block_io_op(pending_req, 0);
-            continue;
-        }
-
-        bio->bi_bdev    = phys_seg[i].bdev;
-        bio->bi_private = pending_req;
-        bio->bi_end_io  = end_block_io_op;
-        bio->bi_sector  = phys_seg[i].sector_number;
-        bio->bi_rw      = operation;
-
-        bv = bio_iovec_idx(bio, 0);
-        bv->bv_page   = virt_to_page(MMAP_VADDR(pending_idx, i));
-        bv->bv_len    = phys_seg[i].nr_sects << 9;
-        bv->bv_offset = phys_seg[i].buffer & ~PAGE_MASK;
-
-        bio->bi_size    = bv->bv_len;
-        bio->bi_vcnt++;
-
-        submit_bio(operation, bio);
-    }
-#endif
-
-    return;
-
- bad_descriptor:
-    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-} 
-
-
-
-/******************************************************************
- * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
- */
-
-
-static void make_response(blkif_t *blkif, unsigned long id, 
-                          unsigned short op, int st)
-{
-    blkif_response_t *resp;
-    unsigned long     flags;
-
-    /* Place on the response ring for the relevant domain. */ 
-    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-    resp = &blkif->blk_ring_base->
-        ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
-    resp->id        = id;
-    resp->operation = op;
-    resp->status    = st;
-    wmb(); /* Ensure other side can see the response fields. */
-    blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
-    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
-    /* Kick the relevant domain. */
-    notify_via_evtchn(blkif->evtchn);
-}
-
-void blkif_deschedule(blkif_t *blkif)
-{
-    remove_from_blkdev_list(blkif);
-}
-
-static int __init blkif_init(void)
-{
-    int i;
-
-    if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
-         !(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
-        return 0;
-
-    blkif_interface_init();
-
-    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
-        BUG();
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    memset(pending_reqs, 0, sizeof(pending_reqs));
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
-    
-    spin_lock_init(&blkio_schedule_list_lock);
-    INIT_LIST_HEAD(&blkio_schedule_list);
-
-    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
-        BUG();
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-    buffer_head_cachep = kmem_cache_create(
-        "buffer_head_cache", sizeof(struct buffer_head),
-        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
-#endif
-
-    blkif_ctrlif_init();
-
-    return 0;
-}
-
-__initcall(blkif_init);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/common.h b/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/common.h
deleted file mode 100644 (file)
index 0fa60cd..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-
-#ifndef __BLKIF__BACKEND__COMMON_H__
-#define __BLKIF__BACKEND__COMMON_H__
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/rbtree.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-#include <asm/pgalloc.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/hypervisor-ifs/io/blkif.h>
-
-#if 0
-#define ASSERT(_p) \
-    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
-    __LINE__, __FILE__); *(int*)0=0; }
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
-                           __FILE__ , __LINE__ , ## _a )
-#else
-#define ASSERT(_p) ((void)0)
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-typedef struct rb_root rb_root_t;
-typedef struct rb_node rb_node_t;
-#else
-struct block_device;
-#endif
-
-typedef struct blkif_st {
-    /* Unique identifier for this interface. */
-    domid_t          domid;
-    unsigned int     handle;
-    /* Physical parameters of the comms window. */
-    unsigned long    shmem_frame;
-    unsigned int     evtchn;
-    int              irq;
-    /* Comms information. */
-    blkif_ring_t    *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
-    BLKIF_RING_IDX     blk_req_cons;  /* Request consumer. */
-    BLKIF_RING_IDX     blk_resp_prod; /* Private version of resp. producer. */
-    /* VBDs attached to this interface. */
-    rb_root_t        vbd_rb;        /* Mapping from 16-bit vdevices to VBDs. */
-    spinlock_t       vbd_lock;      /* Protects VBD mapping. */
-    /* Private fields. */
-    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-    /*
-     * DISCONNECT response is deferred until pending requests are ack'ed.
-     * We therefore need to store the id from the original request.
-     */
-    u8               disconnect_rspid;
-    struct blkif_st *hash_next;
-    struct list_head blkdev_list;
-    spinlock_t       blk_ring_lock;
-    atomic_t         refcnt;
-
-    struct work_struct work;
-} blkif_t;
-
-void blkif_create(blkif_be_create_t *create);
-void blkif_destroy(blkif_be_destroy_t *destroy);
-void blkif_connect(blkif_be_connect_t *connect);
-int  blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id);
-void blkif_disconnect_complete(blkif_t *blkif);
-blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
-#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define blkif_put(_b)                             \
-    do {                                          \
-        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-            blkif_disconnect_complete(_b);        \
-    } while (0)
-
-/* An entry in a list of xen_extents. */
-typedef struct _blkif_extent_le { 
-    blkif_extent_t extent;               /* an individual extent */
-    struct _blkif_extent_le *next;       /* and a pointer to the next */ 
-    struct block_device *bdev;
-} blkif_extent_le_t; 
-
-typedef struct _vbd { 
-    blkif_vdev_t       vdevice;   /* what the domain refers to this vbd as */
-    unsigned char      readonly;  /* Non-zero -> read-only */
-    unsigned char      type;      /* VDISK_TYPE_xxx */
-    blkif_extent_le_t *extents;   /* list of xen_extents making up this vbd */
-    rb_node_t          rb;        /* for linking into R-B tree lookup struct */
-} vbd_t; 
-
-void vbd_create(blkif_be_vbd_create_t *create); 
-void vbd_grow(blkif_be_vbd_grow_t *grow); 
-void vbd_shrink(blkif_be_vbd_shrink_t *shrink);
-void vbd_destroy(blkif_be_vbd_destroy_t *delete); 
-int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds);
-void destroy_all_vbds(blkif_t *blkif);
-
-/* Describes a [partial] disk extent (part of a block io request) */
-typedef struct {
-    unsigned short       dev;
-    unsigned short       nr_sects;
-    struct block_device *bdev;
-    unsigned long        buffer;
-    blkif_sector_t       sector_number;
-} phys_seg_t;
-
-int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation); 
-
-void blkif_interface_init(void);
-void blkif_ctrlif_init(void);
-
-void blkif_deschedule(blkif_t *blkif);
-
-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-
-#endif /* __BLKIF__BACKEND__COMMON_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/control.c b/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/control.c
deleted file mode 100644 (file)
index b55bae7..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/backend/control.c
- * 
- * Routines for interfacing with the control plane.
- * 
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    DPRINTK("Received blkif backend message, subtype=%d\n", msg->subtype);
-    
-    switch ( msg->subtype )
-    {
-    case CMSG_BLKIF_BE_CREATE:
-        if ( msg->length != sizeof(blkif_be_create_t) )
-            goto parse_error;
-        blkif_create((blkif_be_create_t *)&msg->msg[0]);
-        break;        
-    case CMSG_BLKIF_BE_DESTROY:
-        if ( msg->length != sizeof(blkif_be_destroy_t) )
-            goto parse_error;
-        blkif_destroy((blkif_be_destroy_t *)&msg->msg[0]);
-        break;        
-    case CMSG_BLKIF_BE_CONNECT:
-        if ( msg->length != sizeof(blkif_be_connect_t) )
-            goto parse_error;
-        blkif_connect((blkif_be_connect_t *)&msg->msg[0]);
-        break;        
-    case CMSG_BLKIF_BE_DISCONNECT:
-        if ( msg->length != sizeof(blkif_be_disconnect_t) )
-            goto parse_error;
-        if ( !blkif_disconnect((blkif_be_disconnect_t *)&msg->msg[0],msg->id) )
-            return; /* Sending the response is deferred until later. */
-        break;        
-    case CMSG_BLKIF_BE_VBD_CREATE:
-        if ( msg->length != sizeof(blkif_be_vbd_create_t) )
-            goto parse_error;
-        vbd_create((blkif_be_vbd_create_t *)&msg->msg[0]);
-        break;
-    case CMSG_BLKIF_BE_VBD_DESTROY:
-        if ( msg->length != sizeof(blkif_be_vbd_destroy_t) )
-            goto parse_error;
-        vbd_destroy((blkif_be_vbd_destroy_t *)&msg->msg[0]);
-        break;
-    case CMSG_BLKIF_BE_VBD_GROW:
-        if ( msg->length != sizeof(blkif_be_vbd_grow_t) )
-            goto parse_error;
-        vbd_grow((blkif_be_vbd_grow_t *)&msg->msg[0]);
-        break;
-    case CMSG_BLKIF_BE_VBD_SHRINK:
-        if ( msg->length != sizeof(blkif_be_vbd_shrink_t) )
-            goto parse_error;
-        vbd_shrink((blkif_be_vbd_shrink_t *)&msg->msg[0]);
-        break;
-    default:
-        goto parse_error;
-    }
-
-    ctrl_if_send_response(msg);
-    return;
-
- parse_error:
-    DPRINTK("Parse error while reading message subtype %d, len %d\n",
-            msg->subtype, msg->length);
-    msg->length = 0;
-    ctrl_if_send_response(msg);
-}
-
-void blkif_ctrlif_init(void)
-{
-    ctrl_msg_t cmsg;
-    blkif_be_driver_status_t st;
-
-    (void)ctrl_if_register_receiver(CMSG_BLKIF_BE, blkif_ctrlif_rx, 
-                                    CALLBACK_IN_BLOCKING_CONTEXT);
-
-    /* Send a driver-UP notification to the domain controller. */
-    cmsg.type      = CMSG_BLKIF_BE;
-    cmsg.subtype   = CMSG_BLKIF_BE_DRIVER_STATUS;
-    cmsg.length    = sizeof(blkif_be_driver_status_t);
-    st.status      = BLKIF_DRIVER_STATUS_UP;
-    memcpy(cmsg.msg, &st, sizeof(st));
-    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/interface.c b/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/interface.c
deleted file mode 100644 (file)
index 4196014..0000000
+++ /dev/null
@@ -1,246 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/backend/interface.c
- * 
- * Block-device interface management.
- * 
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#endif
-
-#define BLKIF_HASHSZ 1024
-#define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1))
-
-static kmem_cache_t *blkif_cachep;
-static blkif_t      *blkif_hash[BLKIF_HASHSZ];
-
-blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle)
-{
-    blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)];
-    while ( (blkif != NULL) && 
-            ((blkif->domid != domid) || (blkif->handle != handle)) )
-        blkif = blkif->hash_next;
-    return blkif;
-}
-
-static void __blkif_disconnect_complete(void *arg)
-{
-    blkif_t              *blkif = (blkif_t *)arg;
-    ctrl_msg_t            cmsg;
-    blkif_be_disconnect_t disc;
-
-    /*
-     * These can't be done in blkif_disconnect() because at that point there
-     * may be outstanding requests at the disc whose asynchronous responses
-     * must still be notified to the remote driver.
-     */
-    unbind_evtchn_from_irq(blkif->evtchn);
-    vfree(blkif->blk_ring_base);
-
-    /* Construct the deferred response message. */
-    cmsg.type         = CMSG_BLKIF_BE;
-    cmsg.subtype      = CMSG_BLKIF_BE_DISCONNECT;
-    cmsg.id           = blkif->disconnect_rspid;
-    cmsg.length       = sizeof(blkif_be_disconnect_t);
-    disc.domid        = blkif->domid;
-    disc.blkif_handle = blkif->handle;
-    disc.status       = BLKIF_BE_STATUS_OKAY;
-    memcpy(cmsg.msg, &disc, sizeof(disc));
-
-    /*
-     * Make sure message is constructed /before/ status change, because
-     * after the status change the 'blkif' structure could be deallocated at
-     * any time. Also make sure we send the response /after/ status change,
-     * as otherwise a subsequent CONNECT request could spuriously fail if
-     * another CPU doesn't see the status change yet.
-     */
-    mb();
-    if ( blkif->status != DISCONNECTING )
-        BUG();
-    blkif->status = DISCONNECTED;
-    mb();
-
-    /* Send the successful response. */
-    ctrl_if_send_response(&cmsg);
-}
-
-void blkif_disconnect_complete(blkif_t *blkif)
-{
-    INIT_WORK(&blkif->work, __blkif_disconnect_complete, (void *)blkif);
-    schedule_work(&blkif->work);
-}
-
-void blkif_create(blkif_be_create_t *create)
-{
-    domid_t       domid  = create->domid;
-    unsigned int  handle = create->blkif_handle;
-    blkif_t     **pblkif, *blkif;
-
-    if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL )
-    {
-        DPRINTK("Could not create blkif: out of memory\n");
-        create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    memset(blkif, 0, sizeof(*blkif));
-    blkif->domid  = domid;
-    blkif->handle = handle;
-    blkif->status = DISCONNECTED;
-    spin_lock_init(&blkif->vbd_lock);
-    spin_lock_init(&blkif->blk_ring_lock);
-    atomic_set(&blkif->refcnt, 0);
-
-    pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
-    while ( *pblkif != NULL )
-    {
-        if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) )
-        {
-            DPRINTK("Could not create blkif: already exists\n");
-            create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS;
-            kmem_cache_free(blkif_cachep, blkif);
-            return;
-        }
-        pblkif = &(*pblkif)->hash_next;
-    }
-
-    blkif->hash_next = *pblkif;
-    *pblkif = blkif;
-
-    DPRINTK("Successfully created blkif\n");
-    create->status = BLKIF_BE_STATUS_OKAY;
-}
-
-void blkif_destroy(blkif_be_destroy_t *destroy)
-{
-    domid_t       domid  = destroy->domid;
-    unsigned int  handle = destroy->blkif_handle;
-    blkif_t     **pblkif, *blkif;
-
-    pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
-    while ( (blkif = *pblkif) != NULL )
-    {
-        if ( (blkif->domid == domid) && (blkif->handle == handle) )
-        {
-            if ( blkif->status != DISCONNECTED )
-                goto still_connected;
-            goto destroy;
-        }
-        pblkif = &blkif->hash_next;
-    }
-
-    destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-    return;
-
- still_connected:
-    destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
-    return;
-
- destroy:
-    *pblkif = blkif->hash_next;
-    destroy_all_vbds(blkif);
-    kmem_cache_free(blkif_cachep, blkif);
-    destroy->status = BLKIF_BE_STATUS_OKAY;
-}
-
-void blkif_connect(blkif_be_connect_t *connect)
-{
-    domid_t       domid  = connect->domid;
-    unsigned int  handle = connect->blkif_handle;
-    unsigned int  evtchn = connect->evtchn;
-    unsigned long shmem_frame = connect->shmem_frame;
-    struct vm_struct *vma;
-    pgprot_t      prot;
-    int           error;
-    blkif_t      *blkif;
-
-    blkif = blkif_find_by_handle(domid, handle);
-    if ( unlikely(blkif == NULL) )
-    {
-        DPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n", 
-                connect->domid, connect->blkif_handle); 
-        connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
-    {
-        connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
-    error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
-                                    shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
-                                    prot, domid);
-    if ( error != 0 )
-    {
-        if ( error == -ENOMEM )
-            connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
-        else if ( error == -EFAULT )
-            connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
-        else
-            connect->status = BLKIF_BE_STATUS_ERROR;
-        vfree(vma->addr);
-        return;
-    }
-
-    if ( blkif->status != DISCONNECTED )
-    {
-        connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
-        vfree(vma->addr);
-        return;
-    }
-
-    blkif->evtchn        = evtchn;
-    blkif->irq           = bind_evtchn_to_irq(evtchn);
-    blkif->shmem_frame   = shmem_frame;
-    blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
-    blkif->status        = CONNECTED;
-    blkif_get(blkif);
-
-    request_irq(blkif->irq, blkif_be_int, 0, "blkif-backend", blkif);
-
-    connect->status = BLKIF_BE_STATUS_OKAY;
-}
-
-int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id)
-{
-    domid_t       domid  = disconnect->domid;
-    unsigned int  handle = disconnect->blkif_handle;
-    blkif_t      *blkif;
-
-    blkif = blkif_find_by_handle(domid, handle);
-    if ( unlikely(blkif == NULL) )
-    {
-        DPRINTK("blkif_disconnect attempted for non-existent blkif"
-                " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle); 
-        disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return 1; /* Caller will send response error message. */
-    }
-
-    if ( blkif->status == CONNECTED )
-    {
-        blkif->status = DISCONNECTING;
-        blkif->disconnect_rspid = rsp_id;
-        wmb(); /* Let other CPUs see the status change. */
-        free_irq(blkif->irq, blkif);
-        blkif_deschedule(blkif);
-        blkif_put(blkif);
-        return 0; /* Caller should not send response message. */
-    }
-
-    disconnect->status = BLKIF_BE_STATUS_OKAY;
-    return 1;
-}
-
-void __init blkif_interface_init(void)
-{
-    blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
-                                     0, 0, NULL, NULL);
-    memset(blkif_hash, 0, sizeof(blkif_hash));
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/vbd.c b/linux-2.6.8.1-xen-sparse/drivers/xen/blkback/vbd.c
deleted file mode 100644 (file)
index b530128..0000000
+++ /dev/null
@@ -1,576 +0,0 @@
-/******************************************************************************
- * blkback/vbd.c
- * 
- * Routines for managing virtual block devices (VBDs).
- * 
- * NOTE: vbd_lock protects updates to the rb_tree against concurrent lookups 
- * in vbd_translate.  All other lookups are implicitly protected because the 
- * only caller (the control message dispatch routine) serializes the calls.
- * 
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- */
-
-#include "common.h"
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static dev_t vbd_map_devnum(blkif_pdev_t);
-#endif
-
-void vbd_create(blkif_be_vbd_create_t *create) 
-{
-    vbd_t       *vbd; 
-    rb_node_t  **rb_p, *rb_parent = NULL;
-    blkif_t     *blkif;
-    blkif_vdev_t vdevice = create->vdevice;
-
-    blkif = blkif_find_by_handle(create->domid, create->blkif_handle);
-    if ( unlikely(blkif == NULL) )
-    {
-        DPRINTK("vbd_create attempted for non-existent blkif (%u,%u)\n", 
-                create->domid, create->blkif_handle); 
-        create->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    rb_p = &blkif->vbd_rb.rb_node;
-    while ( *rb_p != NULL )
-    {
-        rb_parent = *rb_p;
-        vbd = rb_entry(rb_parent, vbd_t, rb);
-        if ( vdevice < vbd->vdevice )
-        {
-            rb_p = &rb_parent->rb_left;
-        }
-        else if ( vdevice > vbd->vdevice )
-        {
-            rb_p = &rb_parent->rb_right;
-        }
-        else
-        {
-            DPRINTK("vbd_create attempted for already existing vbd\n");
-            create->status = BLKIF_BE_STATUS_VBD_EXISTS;
-            return;
-        }
-    }
-
-    if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) )
-    {
-        DPRINTK("vbd_create: out of memory\n");
-        create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    vbd->vdevice  = vdevice; 
-    vbd->readonly = create->readonly;
-    vbd->type     = VDISK_TYPE_DISK | VDISK_FLAG_VIRT;
-    vbd->extents  = NULL; 
-
-    spin_lock(&blkif->vbd_lock);
-    rb_link_node(&vbd->rb, rb_parent, rb_p);
-    rb_insert_color(&vbd->rb, &blkif->vbd_rb);
-    spin_unlock(&blkif->vbd_lock);
-
-    DPRINTK("Successful creation of vdev=%04x (dom=%u)\n",
-            vdevice, create->domid);
-    create->status = BLKIF_BE_STATUS_OKAY;
-}
-
-
-/* Grow a VBD by appending a new extent. Fails if the VBD doesn't exist. */
-void vbd_grow(blkif_be_vbd_grow_t *grow) 
-{
-    blkif_t            *blkif;
-    blkif_extent_le_t **px, *x; 
-    vbd_t              *vbd = NULL;
-    rb_node_t          *rb;
-    blkif_vdev_t        vdevice = grow->vdevice;
-    unsigned long       sz;
-
-    blkif = blkif_find_by_handle(grow->domid, grow->blkif_handle);
-    if ( unlikely(blkif == NULL) )
-    {
-        DPRINTK("vbd_grow attempted for non-existent blkif (%u,%u)\n", 
-                grow->domid, grow->blkif_handle); 
-        grow->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    rb = blkif->vbd_rb.rb_node;
-    while ( rb != NULL )
-    {
-        vbd = rb_entry(rb, vbd_t, rb);
-        if ( vdevice < vbd->vdevice )
-            rb = rb->rb_left;
-        else if ( vdevice > vbd->vdevice )
-            rb = rb->rb_right;
-        else
-            break;
-    }
-
-    if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
-    {
-        DPRINTK("vbd_grow: attempted to append extent to non-existent VBD.\n");
-        grow->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
-        return;
-    } 
-
-    if ( grow->extent.sector_start > 0 )
-    {
-        DPRINTK("vbd_grow: dev %08x start not zero.\n", grow->extent.device);
-        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
-        return;
-    }
-
-    if ( unlikely((x = kmalloc(sizeof(blkif_extent_le_t), 
-                               GFP_KERNEL)) == NULL) )
-    {
-        DPRINTK("vbd_grow: out of memory\n");
-        grow->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    x->extent.device        = grow->extent.device;
-    x->extent.sector_start  = grow->extent.sector_start;
-    x->extent.sector_length = grow->extent.sector_length;
-    x->next                 = (blkif_extent_le_t *)NULL;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    x->bdev = open_by_devnum(vbd_map_devnum(x->extent.device),
-                             vbd->readonly ? FMODE_READ : FMODE_WRITE);
-    if ( IS_ERR(x->bdev) )
-    {
-        DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
-        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
-        goto out;
-    }
-    /* XXXcl maybe bd_claim? */
-
-    if ( (x->bdev->bd_disk == NULL) )
-    {
-        DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
-        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
-        blkdev_put(x->bdev);
-        goto out;
-    }
-
-    /* get size in sectors */   
-    if ( x->bdev->bd_part )
-        sz = x->bdev->bd_part->nr_sects;
-    else
-        sz = x->bdev->bd_disk->capacity;
-
-#else
-    if( !blk_size[MAJOR(x->extent.device)] )
-    {
-        DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
-        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
-        goto out;
-    }
-    
-    /* convert blocks (1KB) to sectors */
-    sz = blk_size[MAJOR(x->extent.device)][MINOR(x->extent.device)] * 2;    
-    
-    if ( sz == 0 )
-    {
-        DPRINTK("vbd_grow: device %08x zero size!\n", x->extent.device);
-        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
-        goto out;
-    }
-#endif
-
-    /*
-     * NB. This test assumes sector_start == 0, which is always the case
-     * in Xen 1.3. In fact the whole grow/shrink interface could do with
-     * some simplification.
-     */
-    if ( x->extent.sector_length > sz )
-        x->extent.sector_length = sz;
-    
-    DPRINTK("vbd_grow: requested_len %llu actual_len %lu\n", 
-            x->extent.sector_length, sz);
-
-    for ( px = &vbd->extents; *px != NULL; px = &(*px)->next ) 
-        continue;
-    
-    *px = x; /* ATOMIC: no need for vbd_lock. */
-
-    DPRINTK("Successful grow of vdev=%04x (dom=%u)\n",
-            vdevice, grow->domid);
-    
-    grow->status = BLKIF_BE_STATUS_OKAY;
-    return;
-
- out:
-    kfree(x);
-}
-
-
-void vbd_shrink(blkif_be_vbd_shrink_t *shrink)
-{
-    blkif_t            *blkif;
-    blkif_extent_le_t **px, *x; 
-    vbd_t              *vbd = NULL;
-    rb_node_t          *rb;
-    blkif_vdev_t        vdevice = shrink->vdevice;
-
-    blkif = blkif_find_by_handle(shrink->domid, shrink->blkif_handle);
-    if ( unlikely(blkif == NULL) )
-    {
-        DPRINTK("vbd_shrink attempted for non-existent blkif (%u,%u)\n", 
-                shrink->domid, shrink->blkif_handle); 
-        shrink->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    rb = blkif->vbd_rb.rb_node;
-    while ( rb != NULL )
-    {
-        vbd = rb_entry(rb, vbd_t, rb);
-        if ( vdevice < vbd->vdevice )
-            rb = rb->rb_left;
-        else if ( vdevice > vbd->vdevice )
-            rb = rb->rb_right;
-        else
-            break;
-    }
-
-    if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
-    {
-        shrink->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
-        return;
-    }
-
-    if ( unlikely(vbd->extents == NULL) )
-    {
-        shrink->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
-        return;
-    }
-
-    /* Find the last extent. We now know that there is at least one. */
-    for ( px = &vbd->extents; (*px)->next != NULL; px = &(*px)->next )
-        continue;
-
-    x   = *px;
-    *px = x->next; /* ATOMIC: no need for vbd_lock. */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    blkdev_put(x->bdev);
-#endif
-    kfree(x);
-
-    shrink->status = BLKIF_BE_STATUS_OKAY;
-}
-
-
-void vbd_destroy(blkif_be_vbd_destroy_t *destroy) 
-{
-    blkif_t           *blkif;
-    vbd_t             *vbd;
-    rb_node_t         *rb;
-    blkif_extent_le_t *x, *t;
-    blkif_vdev_t       vdevice = destroy->vdevice;
-
-    blkif = blkif_find_by_handle(destroy->domid, destroy->blkif_handle);
-    if ( unlikely(blkif == NULL) )
-    {
-        DPRINTK("vbd_destroy attempted for non-existent blkif (%u,%u)\n", 
-                destroy->domid, destroy->blkif_handle); 
-        destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    rb = blkif->vbd_rb.rb_node;
-    while ( rb != NULL )
-    {
-        vbd = rb_entry(rb, vbd_t, rb);
-        if ( vdevice < vbd->vdevice )
-            rb = rb->rb_left;
-        else if ( vdevice > vbd->vdevice )
-            rb = rb->rb_right;
-        else
-            goto found;
-    }
-
-    destroy->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
-    return;
-
- found:
-    spin_lock(&blkif->vbd_lock);
-    rb_erase(rb, &blkif->vbd_rb);
-    spin_unlock(&blkif->vbd_lock);
-
-    x = vbd->extents;
-    kfree(vbd);
-
-    while ( x != NULL )
-    {
-        t = x->next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        blkdev_put(x->bdev);
-#endif
-        kfree(x);
-        x = t;
-    }
-}
-
-
-void destroy_all_vbds(blkif_t *blkif)
-{
-    vbd_t             *vbd;
-    rb_node_t         *rb;
-    blkif_extent_le_t *x, *t;
-
-    spin_lock(&blkif->vbd_lock);
-
-    while ( (rb = blkif->vbd_rb.rb_node) != NULL )
-    {
-        vbd = rb_entry(rb, vbd_t, rb);
-
-        rb_erase(rb, &blkif->vbd_rb);
-        x = vbd->extents;
-        kfree(vbd);
-        
-        while ( x != NULL )
-        {
-            t = x->next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-            blkdev_put(x->bdev);
-#endif
-            kfree(x);
-            x = t;
-        }          
-    }
-
-    spin_unlock(&blkif->vbd_lock);
-}
-
-
-static int vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd)
-{
-    blkif_extent_le_t *x; 
-
-    vbd_info->device = vbd->vdevice; 
-    vbd_info->info   = vbd->type;
-    if ( vbd->readonly )
-        vbd_info->info |= VDISK_FLAG_RO; 
-    vbd_info->capacity = 0ULL;
-    for ( x = vbd->extents; x != NULL; x = x->next )
-        vbd_info->capacity += x->extent.sector_length; 
-        
-    return 0;
-}
-
-
-int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds)
-{
-    int        rc = 0, nr_vbds = 0;
-    rb_node_t *rb;
-
-    spin_lock(&blkif->vbd_lock);
-
-    if ( (rb = blkif->vbd_rb.rb_node) == NULL )
-        goto out;
-
- new_subtree:
-    /* STEP 1. Find least node (it'll be left-most). */
-    while ( rb->rb_left != NULL )
-        rb = rb->rb_left;
-
-    for ( ; ; )
-    {
-        /* STEP 2. Dealt with left subtree. Now process current node. */
-        if ( (rc = vbd_probe_single(blkif, &vbd_info[nr_vbds], 
-                                    rb_entry(rb, vbd_t, rb))) != 0 )
-            goto out;
-        if ( ++nr_vbds == max_vbds )
-            goto out;
-
-        /* STEP 3. Process right subtree, if any. */
-        if ( rb->rb_right != NULL )
-        {
-            rb = rb->rb_right;
-            goto new_subtree;
-        }
-
-        /* STEP 4. Done both subtrees. Head back through ancesstors. */
-        for ( ; ; ) 
-        {
-            /* We're done when we get back to the root node. */
-            if ( rb->rb_parent == NULL )
-                goto out;
-            /* If we are left of parent, then parent is next to process. */
-            if ( rb->rb_parent->rb_left == rb )
-                break;
-            /* If we are right of parent, then we climb to grandparent. */
-            rb = rb->rb_parent;
-        }
-
-        rb = rb->rb_parent;
-    }
-
- out:
-    spin_unlock(&blkif->vbd_lock);
-    return (rc == 0) ? nr_vbds : rc;  
-}
-
-
-int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
-{
-    blkif_extent_le_t *x; 
-    vbd_t             *vbd;
-    rb_node_t         *rb;
-    blkif_sector_t     sec_off;
-    unsigned long      nr_secs;
-
-    /* Take the vbd_lock because another thread could be updating the tree. */
-    spin_lock(&blkif->vbd_lock);
-
-    rb = blkif->vbd_rb.rb_node;
-    while ( rb != NULL )
-    {
-        vbd = rb_entry(rb, vbd_t, rb);
-        if ( pseg->dev < vbd->vdevice )
-            rb = rb->rb_left;
-        else if ( pseg->dev > vbd->vdevice )
-            rb = rb->rb_right;
-        else
-            goto found;
-    }
-
-    DPRINTK("vbd_translate; domain %u attempted to access "
-            "non-existent VBD.\n", blkif->domid);
-
-    spin_unlock(&blkif->vbd_lock);
-    return -ENODEV; 
-
- found:
-
-    if ( (operation == WRITE) && vbd->readonly )
-    {
-        spin_unlock(&blkif->vbd_lock);
-        return -EACCES; 
-    }
-
-    /*
-     * Now iterate through the list of blkif_extents, working out which should 
-     * be used to perform the translation.
-     */
-    sec_off = pseg->sector_number; 
-    nr_secs = pseg->nr_sects;
-    for ( x = vbd->extents; x != NULL; x = x->next )
-    { 
-        if ( sec_off < x->extent.sector_length )
-        {
-            pseg->dev  = x->extent.device;
-            pseg->bdev = x->bdev;
-            pseg->sector_number = x->extent.sector_start + sec_off;
-            if ( unlikely((sec_off + nr_secs) > x->extent.sector_length) )
-                goto overrun;
-            spin_unlock(&blkif->vbd_lock);
-            return 1;
-        } 
-        sec_off -= x->extent.sector_length; 
-    }
-
-    DPRINTK("vbd_translate: end of vbd.\n");
-    spin_unlock(&blkif->vbd_lock);
-    return -EACCES; 
-
-    /*
-     * Here we deal with overrun onto the following extent. We don't deal with 
-     * overrun of more than one boundary since each request is restricted to 
-     * 2^9 512-byte sectors, so it should be trivial for control software to 
-     * ensure that extents are large enough to prevent excessive overrun.
-     */
- overrun:
-
-    /* Adjust length of first chunk to run to end of first extent. */
-    pseg[0].nr_sects = x->extent.sector_length - sec_off;
-
-    /* Set second chunk buffer and length to start where first chunk ended. */
-    pseg[1].buffer   = pseg[0].buffer + (pseg[0].nr_sects << 9);
-    pseg[1].nr_sects = nr_secs - pseg[0].nr_sects;
-
-    /* Now move to the next extent. Check it exists and is long enough! */
-    if ( unlikely((x = x->next) == NULL) || 
-         unlikely(x->extent.sector_length < pseg[1].nr_sects) )
-    {
-        DPRINTK("vbd_translate: multiple overruns or end of vbd.\n");
-        spin_unlock(&blkif->vbd_lock);
-        return -EACCES;
-    }
-
-    /* Store the real device and start sector for the second chunk. */
-    pseg[1].dev           = x->extent.device;
-    pseg[1].bdev          = x->bdev;
-    pseg[1].sector_number = x->extent.sector_start;
-    
-    spin_unlock(&blkif->vbd_lock);
-    return 2;
-}
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
-#define MAJOR_XEN(dev) ((dev)>>8)
-#define MINOR_XEN(dev) ((dev) & 0xff)
-
-#ifndef FANCY_REMAPPING
-static dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{
-    int major = MAJOR_XEN(cookie);
-    int minor = MINOR_XEN(cookie);
-
-    return MKDEV(major, minor);
-}
-#else
-#define XEN_IDE0_MAJOR IDE0_MAJOR
-#define XEN_IDE1_MAJOR IDE1_MAJOR
-#define XEN_IDE2_MAJOR IDE2_MAJOR
-#define XEN_IDE3_MAJOR IDE3_MAJOR
-#define XEN_IDE4_MAJOR IDE4_MAJOR
-#define XEN_IDE5_MAJOR IDE5_MAJOR
-#define XEN_IDE6_MAJOR IDE6_MAJOR
-#define XEN_IDE7_MAJOR IDE7_MAJOR
-#define XEN_IDE8_MAJOR IDE8_MAJOR
-#define XEN_IDE9_MAJOR IDE9_MAJOR
-#define XEN_SCSI_DISK0_MAJOR SCSI_DISK0_MAJOR
-#define XEN_SCSI_DISK1_MAJOR SCSI_DISK1_MAJOR
-#define XEN_SCSI_DISK2_MAJOR SCSI_DISK2_MAJOR
-#define XEN_SCSI_DISK3_MAJOR SCSI_DISK3_MAJOR
-#define XEN_SCSI_DISK4_MAJOR SCSI_DISK4_MAJOR
-#define XEN_SCSI_DISK5_MAJOR SCSI_DISK5_MAJOR
-#define XEN_SCSI_DISK6_MAJOR SCSI_DISK6_MAJOR
-#define XEN_SCSI_DISK7_MAJOR SCSI_DISK7_MAJOR
-#define XEN_SCSI_CDROM_MAJOR SCSI_CDROM_MAJOR
-
-static dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{
-    int new_major;
-    int major = MAJOR_XEN(cookie);
-    int minor = MINOR_XEN(cookie);
-
-    switch (major) {
-    case XEN_IDE0_MAJOR: new_major = IDE0_MAJOR; break;
-    case XEN_IDE1_MAJOR: new_major = IDE1_MAJOR; break;
-    case XEN_IDE2_MAJOR: new_major = IDE2_MAJOR; break;
-    case XEN_IDE3_MAJOR: new_major = IDE3_MAJOR; break;
-    case XEN_IDE4_MAJOR: new_major = IDE4_MAJOR; break;
-    case XEN_IDE5_MAJOR: new_major = IDE5_MAJOR; break;
-    case XEN_IDE6_MAJOR: new_major = IDE6_MAJOR; break;
-    case XEN_IDE7_MAJOR: new_major = IDE7_MAJOR; break;
-    case XEN_IDE8_MAJOR: new_major = IDE8_MAJOR; break;
-    case XEN_IDE9_MAJOR: new_major = IDE9_MAJOR; break;
-    case XEN_SCSI_DISK0_MAJOR: new_major = SCSI_DISK0_MAJOR; break;
-    case XEN_SCSI_DISK1_MAJOR ... XEN_SCSI_DISK7_MAJOR:
-        new_major = SCSI_DISK1_MAJOR + major - XEN_SCSI_DISK1_MAJOR;
-        break;
-    case XEN_SCSI_CDROM_MAJOR: new_major = SCSI_CDROM_MAJOR; break;
-    default: new_major = 0; break;
-    }
-
-    return MKDEV(new_major, minor);
-}
-#endif
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION_CODE(2,6,0) */
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Kconfig b/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Kconfig
deleted file mode 100644 (file)
index edde837..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-
-config XENBLOCK
-       tristate "Block device driver"
-       depends on ARCH_XEN
-       help
-         Block device driver for Xen
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/Makefile
deleted file mode 100644 (file)
index 5d1707d..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-
-obj-y  := blkfront.o vbd.o
-
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/blkfront.c b/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/blkfront.c
deleted file mode 100644 (file)
index b98e7c3..0000000
+++ /dev/null
@@ -1,1424 +0,0 @@
-/******************************************************************************
- * blkfront.c
- * 
- * XenLinux virtual block-device driver.
- * 
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- * Copyright (c) 2004, Christian Limpach
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#include "block.h"
-#else
-#include "common.h"
-#include <linux/blk.h>
-#include <linux/tqueue.h>
-#endif
-
-#include <linux/cdrom.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <scsi/scsi.h>
-#include <asm-xen/ctrl_if.h>
-
-typedef unsigned char byte; /* from linux/ide.h */
-
-/* Control whether runtime update of vbds is enabled. */
-#define ENABLE_VBD_UPDATE 1
-
-#if ENABLE_VBD_UPDATE
-static void vbd_update(void);
-#else
-static void vbd_update(void){};
-#endif
-
-#define BLKIF_STATE_CLOSED       0
-#define BLKIF_STATE_DISCONNECTED 1
-#define BLKIF_STATE_CONNECTED    2
-
-static char *blkif_state_name[] = {
-    [BLKIF_STATE_CLOSED]       = "closed",
-    [BLKIF_STATE_DISCONNECTED] = "disconnected",
-    [BLKIF_STATE_CONNECTED]    = "connected",
-};
-
-static char * blkif_status_name[] = {
-    [BLKIF_INTERFACE_STATUS_CLOSED]       = "closed",
-    [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
-    [BLKIF_INTERFACE_STATUS_CONNECTED]    = "connected",
-    [BLKIF_INTERFACE_STATUS_CHANGED]      = "changed",
-};
-
-#if 1
-#define dprintf(fmt, args...) \
-printk(KERN_ALERT "[XEN:%s:%s:%d] " fmt, __FUNCTION__, __FILE__, __LINE__, ##args)
-#endif
-
-#define WPRINTK(fmt, args...) printk(KERN_WARNING "[XEN] " fmt, ##args)
-
-static int blkif_handle = 0;
-static unsigned int blkif_state = BLKIF_STATE_CLOSED;
-static unsigned int blkif_evtchn = 0;
-static unsigned int blkif_irq = 0;
-
-static int blkif_control_rsp_valid;
-static blkif_response_t blkif_control_rsp;
-
-static blkif_ring_t *blk_ring = NULL;
-static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
-static BLKIF_RING_IDX req_prod;  /* Private request producer.         */
-
-unsigned long rec_ring_free;
-blkif_request_t rec_ring[BLKIF_RING_SIZE];
-
-static int recovery = 0;           /* "Recovery in progress" flag.  Protected
-                                    * by the blkif_io_lock */
-
-/* We plug the I/O ring if the driver is suspended or if the ring is full. */
-#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
-                         (blkif_state != BLKIF_STATE_CONNECTED))
-
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
-                                        blkif_request_t *req);
-
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
-                                        blkif_request_t *req);
-
-static inline void flush_requests(void);
-
-static void kick_pending_request_queues(void);
-
-int __init xlblk_init(void);
-
-void blkif_completion( blkif_request_t *req );
-
-static inline int GET_ID_FROM_FREELIST( void )
-{
-    unsigned long free = rec_ring_free;
-
-    if ( free > BLKIF_RING_SIZE )
-        BUG();
-
-    rec_ring_free = rec_ring[free].id;
-
-    rec_ring[free].id = 0x0fffffee; /* debug */
-
-    return free;
-}
-
-static inline void ADD_ID_TO_FREELIST( unsigned long id )
-{
-    rec_ring[id].id = rec_ring_free;
-    rec_ring_free = id;
-}
-
-
-/**************************  KERNEL VERSION 2.6  **************************/
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
-#define DISABLE_SCATTERGATHER() 
-
-__initcall(xlblk_init);
-
-#if ENABLE_VBD_UPDATE
-static void vbd_update()
-{
-    dprintf(">\n");
-    dprintf("<\n");
-}
-#endif /* ENABLE_VBD_UPDATE */
-
-static void kick_pending_request_queues(void)
-{
-
-    if ( (xlbd_blk_queue != NULL) &&
-         test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
-    {
-        blk_start_queue(xlbd_blk_queue);
-        /* XXXcl call to request_fn should not be needed but
-         * we get stuck without...  needs investigating
-         */
-        xlbd_blk_queue->request_fn(xlbd_blk_queue);
-    }
-
-}
-
-
-int blkif_open(struct inode *inode, struct file *filep)
-{
-    struct gendisk *gd = inode->i_bdev->bd_disk;
-    struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
-
-    /* Update of usage count is protected by per-device semaphore. */
-    di->mi->usage++;
-    
-    return 0;
-}
-
-
-int blkif_release(struct inode *inode, struct file *filep)
-{
-    struct gendisk *gd = inode->i_bdev->bd_disk;
-    struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
-
-    /*
-     * When usage drops to zero it may allow more VBD updates to occur.
-     * Update of usage count is protected by a per-device semaphore.
-     */
-    if (--di->mi->usage == 0) {
-        vbd_update();
-    }
-
-    return 0;
-}
-
-
-int blkif_ioctl(struct inode *inode, struct file *filep,
-                unsigned command, unsigned long argument)
-{
-    /*  struct gendisk *gd = inode->i_bdev->bd_disk; */
-
-    DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-                  command, (long)argument, inode->i_rdev); 
-  
-    switch (command) {
-
-    case HDIO_GETGEO:
-        /* return ENOSYS to use defaults */
-        return -ENOSYS;
-
-    default:
-        printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
-               command);
-        return -ENOSYS;
-    }
-
-    return 0;
-}
-
-#if 0
-/* check media change: should probably do something here in some cases :-) */
-int blkif_check(kdev_t dev)
-{
-    DPRINTK("blkif_check\n");
-    return 0;
-}
-
-int blkif_revalidate(kdev_t dev)
-{
-    struct block_device *bd;
-    struct gendisk *gd;
-    xen_block_t *disk;
-    unsigned long capacity;
-    int i, rc = 0;
-    
-    if ( (bd = bdget(dev)) == NULL )
-        return -EINVAL;
-
-    /*
-     * Update of partition info, and check of usage count, is protected
-     * by the per-block-device semaphore.
-     */
-    down(&bd->bd_sem);
-
-    if ( ((gd = get_gendisk(dev)) == NULL) ||
-         ((disk = xldev_to_xldisk(dev)) == NULL) ||
-         ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
-    {
-        rc = -EINVAL;
-        goto out;
-    }
-
-    if ( disk->usage > 1 )
-    {
-        rc = -EBUSY;
-        goto out;
-    }
-
-    /* Only reread partition table if VBDs aren't mapped to partitions. */
-    if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
-    {
-        for ( i = gd->max_p - 1; i >= 0; i-- )
-        {
-            invalidate_device(dev+i, 1);
-            gd->part[MINOR(dev+i)].start_sect = 0;
-            gd->part[MINOR(dev+i)].nr_sects   = 0;
-            gd->sizes[MINOR(dev+i)]           = 0;
-        }
-
-        grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
-    }
-
- out:
-    up(&bd->bd_sem);
-    bdput(bd);
-    return rc;
-}
-#endif
-
-/*
- * blkif_queue_request
- *
- * request block io 
- * 
- * id: for guest use only.
- * operation: BLKIF_OP_{READ,WRITE,PROBE}
- * buffer: buffer to read/write into. this should be a
- *   virtual address in the guest os.
- */
-static int blkif_queue_request(struct request *req)
-{
-    struct xlbd_disk_info *di =
-        (struct xlbd_disk_info *)req->rq_disk->private_data;
-    unsigned long buffer_ma;
-    blkif_request_t *ring_req;
-    struct bio *bio;
-    struct bio_vec *bvec;
-    int idx, s;
-    unsigned long id;
-    unsigned int fsect, lsect;
-
-    if (unlikely(blkif_state != BLKIF_STATE_CONNECTED))
-        return 1;
-
-    /* Fill out a communications ring structure. */
-    ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
-    id = GET_ID_FROM_FREELIST();
-    rec_ring[id].id = (unsigned long) req;
-
-    ring_req->id = id;
-    ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
-        BLKIF_OP_READ;
-    ring_req->sector_number = (blkif_sector_t)req->sector;
-    ring_req->device = di->xd_device;
-
-    s = 0;
-    ring_req->nr_segments = 0;
-    rq_for_each_bio(bio, req) {
-        bio_for_each_segment(bvec, bio, idx) {
-            buffer_ma = page_to_phys(bvec->bv_page);
-            if (unlikely((buffer_ma & ((1<<9)-1)) != 0))
-                BUG();
-
-            fsect = bvec->bv_offset >> 9;
-            lsect = fsect + (bvec->bv_len >> 9) - 1;
-            if (unlikely(lsect > 7))
-                BUG();
-
-            ring_req->frame_and_sects[ring_req->nr_segments++] =
-                buffer_ma | (fsect << 3) | lsect;
-            s += bvec->bv_len >> 9;
-        }
-    }
-
-    req_prod++;
-
-    /* Keep a private copy so we can reissue requests when recovering. */
-    translate_req_to_pfn( &rec_ring[id], ring_req);
-
-    return 0;
-}
-
-
-/*
- * do_blkif_request
- *  read a block; request is in a request queue
- */
-void do_blkif_request(request_queue_t *rq)
-{
-    struct request *req;
-    int queued;
-
-    DPRINTK("Entered do_blkif_request\n"); 
-
-    queued = 0;
-
-    while ((req = elv_next_request(rq)) != NULL) {
-        if (!blk_fs_request(req)) {
-            end_request(req, 0);
-            continue;
-        }
-
-        if ( BLKIF_RING_FULL )
-        {
-            blk_stop_queue(rq);
-            break;
-        }
-        DPRINTK("do_blkif_request %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
-                req, req->cmd, req->sector, req->current_nr_sectors,
-                req->nr_sectors, req->buffer,
-                rq_data_dir(req) ? "write" : "read");
-        blkdev_dequeue_request(req);
-        if (blkif_queue_request(req)) {
-            blk_stop_queue(rq);
-            break;
-        }
-        queued++;
-    }
-
-    if (queued != 0)
-        flush_requests();
-}
-
-
-static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
-    struct request *req;
-    blkif_response_t *bret;
-    BLKIF_RING_IDX i, rp;
-    unsigned long flags; 
-
-    spin_lock_irqsave(&blkif_io_lock, flags);     
-
-    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 
-         unlikely(recovery) )
-    {
-        spin_unlock_irqrestore(&blkif_io_lock, flags);
-        return IRQ_HANDLED;
-    }
-
-    rp = blk_ring->resp_prod;
-    rmb(); /* Ensure we see queued responses up to 'rp'. */
-
-    for ( i = resp_cons; i != rp; i++ )
-    {
-       unsigned long id;
-        bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
-
-       id = bret->id;
-       req = (struct request *)rec_ring[id].id;
-
-       blkif_completion( &rec_ring[id] );
-
-       ADD_ID_TO_FREELIST(id); /* overwrites req */
-
-        switch ( bret->operation )
-        {
-        case BLKIF_OP_READ:
-        case BLKIF_OP_WRITE:
-            if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
-                DPRINTK("Bad return from blkdev data request: %x\n",
-                        bret->status);
-           
-            if ( unlikely(end_that_request_first
-                          (req, 
-                           (bret->status == BLKIF_RSP_OKAY),
-                           req->hard_nr_sectors)) )
-                BUG();
-            end_that_request_last(req);
-
-            break;
-        case BLKIF_OP_PROBE:
-            memcpy(&blkif_control_rsp, bret, sizeof(*bret));
-            blkif_control_rsp_valid = 1;
-            break;
-        default:
-            BUG();
-        }
-    }
-    
-    resp_cons = i;
-
-    kick_pending_request_queues();
-
-    spin_unlock_irqrestore(&blkif_io_lock, flags);
-
-    return IRQ_HANDLED;
-}
-
-#else
-/**************************  KERNEL VERSION 2.4  **************************/
-
-static kdev_t        sg_dev;
-static int           sg_operation = -1;
-static unsigned long sg_next_sect;
-
-/*
- * Request queues with outstanding work, but ring is currently full.
- * We need no special lock here, as we always access this with the
- * blkif_io_lock held. We only need a small maximum list.
- */
-#define MAX_PENDING 8
-static request_queue_t *pending_queues[MAX_PENDING];
-static int nr_pending;
-
-
-#define DISABLE_SCATTERGATHER() (sg_operation = -1)
-
-#define blkif_io_lock io_request_lock
-
-/*============================================================================*/
-#if ENABLE_VBD_UPDATE
-
-/*
- * blkif_update_int/update-vbds_task - handle VBD update events.
- *  Schedule a task for keventd to run, which will update the VBDs and perform 
- *  the corresponding updates to our view of VBD state.
- */
-static void update_vbds_task(void *unused)
-{ 
-    xlvbd_update_vbds();
-}
-
-static void vbd_update(void)
-{
-    static struct tq_struct update_tq;
-    dprintf(">\n");
-    update_tq.routine = update_vbds_task;
-    schedule_task(&update_tq);
-    dprintf("<\n");
-}
-
-#endif /* ENABLE_VBD_UPDATE */
-/*============================================================================*/
-
-
-static void kick_pending_request_queues(void)
-{
-    /* We kick pending request queues if the ring is reasonably empty. */
-    if ( (nr_pending != 0) && 
-         ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
-    {
-        /* Attempt to drain the queue, but bail if the ring becomes full. */
-        while ( (nr_pending != 0) && !BLKIF_RING_FULL )
-            do_blkif_request(pending_queues[--nr_pending]);
-    }
-}
-
-int blkif_open(struct inode *inode, struct file *filep)
-{
-    short xldev = inode->i_rdev; 
-    struct gendisk *gd = get_gendisk(xldev);
-    xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
-    short minor = MINOR(xldev); 
-
-    if ( gd->part[minor].nr_sects == 0 )
-    { 
-        /*
-         * Device either doesn't exist, or has zero capacity; we use a few
-         * cheesy heuristics to return the relevant error code
-         */
-        if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
-             ((minor & (gd->max_p - 1)) != 0) )
-        { 
-            /*
-             * We have a real device, but no such partition, or we just have a
-             * partition number so guess this is the problem.
-             */
-            return -ENXIO;     /* no such device or address */
-        }
-        else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
-        {
-            /* This is a removable device => assume that media is missing. */ 
-            return -ENOMEDIUM; /* media not present (this is a guess) */
-        } 
-        else
-        { 
-            /* Just go for the general 'no such device' error. */
-            return -ENODEV;    /* no such device */
-        }
-    }
-    
-    /* Update of usage count is protected by per-device semaphore. */
-    disk->usage++;
-
-    return 0;
-}
-
-
-int blkif_release(struct inode *inode, struct file *filep)
-{
-    xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
-
-    /*
-     * When usage drops to zero it may allow more VBD updates to occur.
-     * Update of usage count is protected by a per-device semaphore.
-     */
-    if ( --disk->usage == 0 ) {
-        vbd_update();
-    }
-
-    return 0;
-}
-
-
-int blkif_ioctl(struct inode *inode, struct file *filep,
-                unsigned command, unsigned long argument)
-{
-    kdev_t dev = inode->i_rdev;
-    struct hd_geometry *geo = (struct hd_geometry *)argument;
-    struct gendisk *gd;     
-    struct hd_struct *part; 
-    int i;
-    unsigned short cylinders;
-    byte heads, sectors;
-
-    /* NB. No need to check permissions. That is done for us. */
-    
-    DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-                  command, (long) argument, dev); 
-  
-    gd = get_gendisk(dev);
-    part = &gd->part[MINOR(dev)]; 
-
-    switch ( command )
-    {
-    case BLKGETSIZE:
-        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 
-        return put_user(part->nr_sects, (unsigned long *) argument);
-
-    case BLKGETSIZE64:
-        DPRINTK_IOCTL("   BLKGETSIZE64: %x %llx\n", BLKGETSIZE64,
-                      (u64)part->nr_sects * 512);
-        return put_user((u64)part->nr_sects * 512, (u64 *) argument);
-
-    case BLKRRPART:                               /* re-read partition table */
-        DPRINTK_IOCTL("   BLKRRPART: %x\n", BLKRRPART);
-        return blkif_revalidate(dev);
-
-    case BLKSSZGET:
-        return hardsect_size[MAJOR(dev)][MINOR(dev)]; 
-
-    case BLKBSZGET:                                        /* get block size */
-        DPRINTK_IOCTL("   BLKBSZGET: %x\n", BLKBSZGET);
-        break;
-
-    case BLKBSZSET:                                        /* set block size */
-        DPRINTK_IOCTL("   BLKBSZSET: %x\n", BLKBSZSET);
-        break;
-
-    case BLKRASET:                                         /* set read-ahead */
-        DPRINTK_IOCTL("   BLKRASET: %x\n", BLKRASET);
-        break;
-
-    case BLKRAGET:                                         /* get read-ahead */
-        DPRINTK_IOCTL("   BLKRAFET: %x\n", BLKRAGET);
-        break;
-
-    case HDIO_GETGEO:
-        DPRINTK_IOCTL("   HDIO_GETGEO: %x\n", HDIO_GETGEO);
-        if (!argument) return -EINVAL;
-
-        /* We don't have real geometry info, but let's at least return
-          values consistent with the size of the device */
-
-        heads = 0xff;
-        sectors = 0x3f; 
-        cylinders = part->nr_sects / (heads * sectors);
-
-        if (put_user(0x00,  (unsigned long *) &geo->start)) return -EFAULT;
-        if (put_user(heads,  (byte *)&geo->heads)) return -EFAULT;
-        if (put_user(sectors,  (byte *)&geo->sectors)) return -EFAULT;
-        if (put_user(cylinders, (unsigned short *)&geo->cylinders)) return -EFAULT;
-
-        return 0;
-
-    case HDIO_GETGEO_BIG: 
-        DPRINTK_IOCTL("   HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
-        if (!argument) return -EINVAL;
-
-        /* We don't have real geometry info, but let's at least return
-          values consistent with the size of the device */
-
-        heads = 0xff;
-        sectors = 0x3f; 
-        cylinders = part->nr_sects / (heads * sectors);
-
-        if (put_user(0x00,  (unsigned long *) &geo->start))  return -EFAULT;
-        if (put_user(heads,  (byte *)&geo->heads))   return -EFAULT;
-        if (put_user(sectors,  (byte *)&geo->sectors)) return -EFAULT;
-        if (put_user(cylinders, (unsigned int *) &geo->cylinders)) return -EFAULT;
-
-        return 0;
-
-    case CDROMMULTISESSION:
-        DPRINTK("FIXME: support multisession CDs later\n");
-        for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
-            if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
-        return 0;
-
-    case SCSI_IOCTL_GET_BUS_NUMBER:
-        DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in XL blkif");
-        return -ENOSYS;
-
-    default:
-        printk(KERN_ALERT "ioctl %08x not supported by XL blkif\n", command);
-        return -ENOSYS;
-    }
-    
-    return 0;
-}
-
-
-
-/* check media change: should probably do something here in some cases :-) */
-int blkif_check(kdev_t dev)
-{
-    DPRINTK("blkif_check\n");
-    return 0;
-}
-
-int blkif_revalidate(kdev_t dev)
-{
-    struct block_device *bd;
-    struct gendisk *gd;
-    xl_disk_t *disk;
-    unsigned long capacity;
-    int i, rc = 0;
-    
-    if ( (bd = bdget(dev)) == NULL )
-        return -EINVAL;
-
-    /*
-     * Update of partition info, and check of usage count, is protected
-     * by the per-block-device semaphore.
-     */
-    down(&bd->bd_sem);
-
-    if ( ((gd = get_gendisk(dev)) == NULL) ||
-         ((disk = xldev_to_xldisk(dev)) == NULL) ||
-         ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
-    {
-        rc = -EINVAL;
-        goto out;
-    }
-
-    if ( disk->usage > 1 )
-    {
-        rc = -EBUSY;
-        goto out;
-    }
-
-    /* Only reread partition table if VBDs aren't mapped to partitions. */
-    if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
-    {
-        for ( i = gd->max_p - 1; i >= 0; i-- )
-        {
-            invalidate_device(dev+i, 1);
-            gd->part[MINOR(dev+i)].start_sect = 0;
-            gd->part[MINOR(dev+i)].nr_sects   = 0;
-            gd->sizes[MINOR(dev+i)]           = 0;
-        }
-
-        grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
-    }
-
- out:
-    up(&bd->bd_sem);
-    bdput(bd);
-    return rc;
-}
-
-
-
-
-/*
- * blkif_queue_request
- *
- * request block io 
- * 
- * id: for guest use only.
- * operation: BLKIF_OP_{READ,WRITE,PROBE}
- * buffer: buffer to read/write into. this should be a
- *   virtual address in the guest os.
- */
-static int blkif_queue_request(unsigned long   id,
-                               int             operation,
-                               char *          buffer,
-                               unsigned long   sector_number,
-                               unsigned short  nr_sectors,
-                               kdev_t          device)
-{
-    unsigned long       buffer_ma = phys_to_machine(virt_to_phys(buffer)); 
-    unsigned long       xid;
-    struct gendisk     *gd;
-    blkif_request_t    *req;
-    struct buffer_head *bh;
-    unsigned int        fsect, lsect;
-
-    fsect = (buffer_ma & ~PAGE_MASK) >> 9;
-    lsect = fsect + nr_sectors - 1;
-
-    /* Buffer must be sector-aligned. Extent mustn't cross a page boundary. */
-    if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) )
-        BUG();
-    if ( lsect > 7 )
-        BUG();
-
-    buffer_ma &= PAGE_MASK;
-
-    if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
-        return 1;
-
-    switch ( operation )
-    {
-
-    case BLKIF_OP_READ:
-    case BLKIF_OP_WRITE:
-        gd = get_gendisk(device); 
-
-        /*
-         * Update the sector_number we'll pass down as appropriate; note that
-         * we could sanity check that resulting sector will be in this
-         * partition, but this will happen in driver backend anyhow.
-         */
-        sector_number += gd->part[MINOR(device)].start_sect;
-
-        /*
-         * If this unit doesn't consist of virtual partitions then we clear 
-         * the partn bits from the device number.
-         */
-        if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & 
-               GENHD_FL_VIRT_PARTNS) )
-            device &= ~(gd->max_p - 1);
-
-        if ( (sg_operation == operation) &&
-             (sg_dev == device) &&
-             (sg_next_sect == sector_number) )
-        {
-
-            req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
-            bh = (struct buffer_head *)id;
-           
-            bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
-           
-
-           rec_ring[req->id].id = id;
-
-            req->frame_and_sects[req->nr_segments] = 
-                buffer_ma | (fsect<<3) | lsect;
-            if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST )
-                sg_next_sect += nr_sectors;
-            else
-                DISABLE_SCATTERGATHER();
-
-            /* Update the copy of the request in the recovery ring. */
-            translate_req_to_pfn(&rec_ring[req->id], req );
-
-            return 0;
-        }
-        else if ( BLKIF_RING_FULL )
-        {
-            return 1;
-        }
-        else
-        {
-            sg_operation = operation;
-            sg_dev       = device;
-            sg_next_sect = sector_number + nr_sectors;
-        }
-        break;
-
-    default:
-        panic("unknown op %d\n", operation);
-    }
-
-    /* Fill out a communications ring structure. */
-    req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
-
-    xid = GET_ID_FROM_FREELIST();
-    rec_ring[xid].id = id;
-
-    req->id            = xid;
-    req->operation     = operation;
-    req->sector_number = (blkif_sector_t)sector_number;
-    req->device        = device; 
-    req->nr_segments   = 1;
-    req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
-
-    req_prod++;
-
-    /* Keep a private copy so we can reissue requests when recovering. */    
-    translate_req_to_pfn(&rec_ring[xid], req );
-
-
-
-    return 0;
-}
-
-
-/*
- * do_blkif_request
- *  read a block; request is in a request queue
- */
-void do_blkif_request(request_queue_t *rq)
-{
-    struct request *req;
-    struct buffer_head *bh, *next_bh;
-    int rw, nsect, full, queued = 0;
-
-    DPRINTK("Entered do_blkif_request\n"); 
-
-    while ( !rq->plugged && !list_empty(&rq->queue_head))
-    {
-        if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 
-            goto out;
-  
-        DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
-                req, req->cmd, req->sector,
-                req->current_nr_sectors, req->nr_sectors, req->bh);
-
-        rw = req->cmd;
-        if ( rw == READA )
-            rw = READ;
-        if ( unlikely((rw != READ) && (rw != WRITE)) )
-            panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw);
-
-        req->errors = 0;
-
-        bh = req->bh;
-        while ( bh != NULL )
-        {
-            next_bh = bh->b_reqnext;
-            bh->b_reqnext = NULL;
-
-            full = blkif_queue_request(
-                (unsigned long)bh,
-                (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE, 
-                bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
-
-            if ( full )
-            { 
-                bh->b_reqnext = next_bh;
-                pending_queues[nr_pending++] = rq;
-                if ( unlikely(nr_pending >= MAX_PENDING) )
-                    BUG();
-                goto out; 
-            }
-
-            queued++;
-
-            /* Dequeue the buffer head from the request. */
-            nsect = bh->b_size >> 9;
-            bh = req->bh = next_bh;
-            
-            if ( bh != NULL )
-            {
-                /* There's another buffer head to do. Update the request. */
-                req->hard_sector += nsect;
-                req->hard_nr_sectors -= nsect;
-                req->sector = req->hard_sector;
-                req->nr_sectors = req->hard_nr_sectors;
-                req->current_nr_sectors = bh->b_size >> 9;
-                req->buffer = bh->b_data;
-            }
-            else
-            {
-                /* That was the last buffer head. Finalise the request. */
-                if ( unlikely(end_that_request_first(req, 1, "XenBlk")) )
-                    BUG();
-                blkdev_dequeue_request(req);
-                end_that_request_last(req);
-            }
-        }
-    }
-
- out:
-    if ( queued != 0 )
-        flush_requests();
-}
-
-
-static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
-    BLKIF_RING_IDX i, rp; 
-    unsigned long flags; 
-    struct buffer_head *bh, *next_bh;
-    
-    spin_lock_irqsave(&io_request_lock, flags);     
-
-    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
-    {
-        spin_unlock_irqrestore(&io_request_lock, flags);
-        return;
-    }
-
-    rp = blk_ring->resp_prod;
-    rmb(); /* Ensure we see queued responses up to 'rp'. */
-
-    for ( i = resp_cons; i != rp; i++ )
-    {
-       unsigned long id;
-        blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
-
-       id = bret->id;
-       bh = (struct buffer_head *)rec_ring[id].id; 
-
-       blkif_completion( &rec_ring[id] );
-
-       ADD_ID_TO_FREELIST(id);
-
-        switch ( bret->operation )
-        {
-        case BLKIF_OP_READ:
-        case BLKIF_OP_WRITE:
-            if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
-                DPRINTK("Bad return from blkdev data request: %lx\n",
-                        bret->status);
-            for ( ; bh != NULL; bh = next_bh )
-            {
-                next_bh = bh->b_reqnext;
-                bh->b_reqnext = NULL;
-                bh->b_end_io(bh, bret->status == BLKIF_RSP_OKAY);
-            }
-
-            break;
-        case BLKIF_OP_PROBE:
-            memcpy(&blkif_control_rsp, bret, sizeof(*bret));
-            blkif_control_rsp_valid = 1;
-            break;
-        default:
-            BUG();
-        }
-    }
-    
-    resp_cons = i;
-
-    kick_pending_request_queues();
-
-    spin_unlock_irqrestore(&io_request_lock, flags);
-}
-
-#endif
-
-/*****************************  COMMON CODE  *******************************/
-
-
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
-                                        blkif_request_t *req)
-{
-    int i;
-
-    xreq->operation     = req->operation;
-    xreq->nr_segments   = req->nr_segments;
-    xreq->device        = req->device;
-    /* preserve id */
-    xreq->sector_number = req->sector_number;
-
-    for ( i = 0; i < req->nr_segments; i++ ){
-        xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
-    }
-}
-
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
-                                        blkif_request_t *req)
-{
-    int i;
-
-    xreq->operation     = req->operation;
-    xreq->nr_segments   = req->nr_segments;
-    xreq->device        = req->device;
-    xreq->id            = req->id;   /* copy id (unlike above) */
-    xreq->sector_number = req->sector_number;
-
-    for ( i = 0; i < req->nr_segments; i++ ){
-        xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
-    }
-}
-
-
-
-static inline void flush_requests(void)
-{
-    DISABLE_SCATTERGATHER();
-    wmb(); /* Ensure that the frontend can see the requests. */
-    blk_ring->req_prod = req_prod;
-    notify_via_evtchn(blkif_evtchn);
-}
-
-
-void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
-{
-    unsigned long flags, id;
-
- retry:
-    while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(1);
-    }
-
-    spin_lock_irqsave(&blkif_io_lock, flags);
-    if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
-    {
-        spin_unlock_irqrestore(&blkif_io_lock, flags);
-        goto retry;
-    }
-
-    DISABLE_SCATTERGATHER();
-    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;    
-
-    id = GET_ID_FROM_FREELIST();
-    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
-    rec_ring[id].id = (unsigned long) req;
-
-    translate_req_to_pfn( &rec_ring[id], req );
-
-    req_prod++;
-    flush_requests();
-
-    spin_unlock_irqrestore(&blkif_io_lock, flags);
-
-    while ( !blkif_control_rsp_valid )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(1);
-    }
-
-    memcpy(rsp, &blkif_control_rsp, sizeof(*rsp));
-    blkif_control_rsp_valid = 0;
-}
-
-
-/* Send a driver status notification to the domain controller. */
-static void send_driver_status(int ok){
-    ctrl_msg_t cmsg = {
-        .type    = CMSG_BLKIF_FE,
-        .subtype = CMSG_BLKIF_FE_DRIVER_STATUS,
-        .length  = sizeof(blkif_fe_driver_status_t),
-    };
-    blkif_fe_driver_status_t *msg = (void*)cmsg.msg;
-    
-    msg->status = (ok ? BLKIF_DRIVER_STATUS_UP : BLKIF_DRIVER_STATUS_DOWN);
-
-    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
-
-/* Tell the controller to bring up the interface. */
-static void blkif_send_interface_connect(void){
-    ctrl_msg_t cmsg = {
-        .type    = CMSG_BLKIF_FE,
-        .subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
-        .length  = sizeof(blkif_fe_interface_connect_t),
-    };
-    blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
-    
-    msg->handle      = 0;
-    msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
-    
-    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
-
-static void blkif_free(void)
-{
-
-    printk(KERN_INFO "[XEN] Recovering virtual block device driver\n");
-            
-    /* Prevent new requests being issued until we fix things up. */
-    spin_lock_irq(&blkif_io_lock);
-    recovery = 1;
-    blkif_state = BLKIF_STATE_DISCONNECTED;
-    spin_unlock_irq(&blkif_io_lock);
-
-    /* Free resources associated with old device channel. */
-    if(blk_ring){
-        free_page((unsigned long)blk_ring);
-        blk_ring = 0;
-    }
-    free_irq(blkif_irq, NULL);
-    blkif_irq = 0;
-    
-    unbind_evtchn_from_irq(blkif_evtchn);
-    blkif_evtchn = 0;
-}
-
-static void blkif_close(void){
-}
-
-/* Move from CLOSED to DISCONNECTED state. */
-static void blkif_disconnect(void)
-{
-    if(blk_ring) free_page((unsigned long)blk_ring);
-    blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
-    blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
-    blkif_state  = BLKIF_STATE_DISCONNECTED;
-    blkif_send_interface_connect();
-}
-
-static void blkif_reset(void)
-{
-    printk(KERN_INFO "[XEN] Recovering virtual block device driver\n");
-    blkif_free();
-    blkif_disconnect();
-}
-
-static void blkif_recover(void)
-{
-
-    int i;
-
-    /* Hmm, requests might be re-ordered when we re-issue them.
-     * This will need to be fixed once we have barriers */
-
-    /* Stage 1 : Find active and move to safety. */
-    for ( i = 0; i < BLKIF_RING_SIZE; i++ ) {
-        if ( rec_ring[i].id >= PAGE_OFFSET ) {
-            translate_req_to_mfn(
-                &blk_ring->ring[req_prod].req, &rec_ring[i]);
-            req_prod++;
-        }
-    }
-
-    printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod);
-           
-    /* Stage 2 : Set up shadow list. */
-    for ( i = 0; i < req_prod; i++ ) {
-        rec_ring[i].id = blk_ring->ring[i].req.id;             
-        blk_ring->ring[i].req.id = i;
-        translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
-    }
-
-    /* Stage 3 : Set up free list. */
-    for ( ; i < BLKIF_RING_SIZE; i++ ){
-        rec_ring[i].id = i+1;
-    }
-    rec_ring_free = req_prod;
-    rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
-
-    /* blk_ring->req_prod will be set when we flush_requests().*/
-    wmb();
-
-    /* Switch off recovery mode, using a memory barrier to ensure that
-     * it's seen before we flush requests - we don't want to miss any
-     * interrupts. */
-    recovery = 0;
-    wmb();
-
-    /* Kicks things back into life. */
-    flush_requests();
-
-    /* Now safe to left other peope use interface. */
-    blkif_state = BLKIF_STATE_CONNECTED;
-}
-
-static void blkif_connect(blkif_fe_interface_status_t *status)
-{
-    int err = 0;
-
-    blkif_evtchn = status->evtchn;
-    blkif_irq    = bind_evtchn_to_irq(blkif_evtchn);
-
-    err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
-    if(err){
-        printk(KERN_ALERT "[XEN] blkfront request_irq failed (err=%d)\n", err);
-        return;
-    }
-
-    if ( recovery ) {
-        blkif_recover();
-    } else {
-        /* Transition to connected in case we need to do 
-         *  a partition probe on a whole disk. */
-        blkif_state = BLKIF_STATE_CONNECTED;
-        
-        /* Probe for discs attached to the interface. */
-        xlvbd_init();
-    }
-    
-    /* Kick pending requests. */
-    spin_lock_irq(&blkif_io_lock);
-    kick_pending_request_queues();
-    spin_unlock_irq(&blkif_io_lock);
-}
-
-static void unexpected(blkif_fe_interface_status_t *status)
-{
-    WPRINTK(" Unexpected blkif status %s in state %s\n", 
-           blkif_status_name[status->status],
-           blkif_state_name[blkif_state]);
-}
-
-static void blkif_status(blkif_fe_interface_status_t *status)
-{
-    if (status->handle != blkif_handle) {
-        WPRINTK(" Invalid blkif: handle=%u", status->handle);
-        return;
-    }
-
-    switch (status->status) {
-
-    case BLKIF_INTERFACE_STATUS_CLOSED:
-        switch(blkif_state){
-        case BLKIF_STATE_CLOSED:
-            unexpected(status);
-            break;
-        case BLKIF_STATE_DISCONNECTED:
-        case BLKIF_STATE_CONNECTED:
-            unexpected(status);
-            blkif_close();
-            break;
-        }
-        break;
-
-    case BLKIF_INTERFACE_STATUS_DISCONNECTED:
-        switch(blkif_state){
-        case BLKIF_STATE_CLOSED:
-            blkif_disconnect();
-            break;
-        case BLKIF_STATE_DISCONNECTED:
-        case BLKIF_STATE_CONNECTED:
-            unexpected(status);
-            blkif_reset();
-            break;
-        }
-        break;
-
-    case BLKIF_INTERFACE_STATUS_CONNECTED:
-        switch(blkif_state){
-        case BLKIF_STATE_CLOSED:
-            unexpected(status);
-            blkif_disconnect();
-            blkif_connect(status);
-            break;
-        case BLKIF_STATE_DISCONNECTED:
-            blkif_connect(status);
-            break;
-        case BLKIF_STATE_CONNECTED:
-            unexpected(status);
-            blkif_connect(status);
-            break;
-        }
-        break;
-
-   case BLKIF_INTERFACE_STATUS_CHANGED:
-        switch(blkif_state){
-        case BLKIF_STATE_CLOSED:
-        case BLKIF_STATE_DISCONNECTED:
-            unexpected(status);
-            break;
-        case BLKIF_STATE_CONNECTED:
-            vbd_update();
-            break;
-        }
-       break;
-
-    default:
-        WPRINTK(" Invalid blkif status: %d\n", status->status);
-        break;
-    }
-}
-
-
-static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    switch ( msg->subtype )
-    {
-    case CMSG_BLKIF_FE_INTERFACE_STATUS:
-        if ( msg->length != sizeof(blkif_fe_interface_status_t) )
-            goto parse_error;
-        blkif_status((blkif_fe_interface_status_t *)
-                     &msg->msg[0]);
-        break;        
-    default:
-        goto parse_error;
-    }
-
-    ctrl_if_send_response(msg);
-    return;
-
- parse_error:
-    msg->length = 0;
-    ctrl_if_send_response(msg);
-}
-
-int wait_for_blkif(void){
-    int err = 0;
-    int i;
-    send_driver_status(1);
-
-    /*
-     * We should read 'nr_interfaces' from response message and wait
-     * for notifications before proceeding. For now we assume that we
-     * will be notified of exactly one interface.
-     */
-    for ( i=0; (blkif_state != BLKIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(1);
-    }
-
-    if (blkif_state != BLKIF_STATE_CONNECTED){
-        printk(KERN_INFO "[XEN] Timeout connecting block device driver!\n");
-        err = -ENOSYS;
-    }
-    return err;
-}
-
-int __init xlblk_init(void)
-{
-    int i;
-    
-    if ( (xen_start_info.flags & SIF_INITDOMAIN) 
-         || (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
-        return 0;
-
-    printk(KERN_INFO "[XEN] Initialising virtual block device driver\n");
-
-    rec_ring_free = 0;
-    for (i=0; i<BLKIF_RING_SIZE; i++)
-    {
-       rec_ring[i].id = i+1;
-    }
-    rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
-
-    (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
-                                    CALLBACK_IN_BLOCKING_CONTEXT);
-
-    wait_for_blkif();
-
-    return 0;
-}
-
-void blkdev_suspend(void)
-{
-}
-
-void blkdev_resume(void)
-{
-    send_driver_status(1);
-}
-
-/* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
-
-void blkif_completion(blkif_request_t *req)
-{
-    int i;
-
-    switch ( req->operation )
-    {
-    case BLKIF_OP_READ:
-       for ( i = 0; i < req->nr_segments; i++ )
-       {
-           unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
-           unsigned long mfn = phys_to_machine_mapping[pfn];
-           xen_machphys_update(mfn, pfn);
-       }
-       break;
-    }
-    
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/block.h b/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/block.h
deleted file mode 100644 (file)
index 76255e8..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-/******************************************************************************
- * block.h
- * 
- * Shared definitions between all levels of XenLinux Virtual block devices.
- * 
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- * Copyright (c) 2004, Christian Limpach
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __XEN_DRIVERS_BLOCK_H__
-#define __XEN_DRIVERS_BLOCK_H__
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/hdreg.h>
-#include <linux/blkdev.h>
-#include <linux/major.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-#include <asm-xen/hypervisor-ifs/io/blkif.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/uaccess.h>
-
-#if 0
-#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-#else
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-#if 0
-#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-#else
-#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-#endif
-
-struct xlbd_type_info {
-       int partn_shift;
-       int devs_per_major;
-       int hardsect_size;
-       int max_sectors;
-       char *name;
-};
-
-/*
- * We have one of these per vbd, whether ide, scsi or 'other'.  They
- * hang in private_data off the gendisk structure. We may end up
- * putting all kinds of interesting stuff here :-)
- */
-struct xlbd_major_info {
-       int major;
-       int usage;
-       int xd_device;
-       struct xlbd_type_info *type;
-};
-
-struct xlbd_disk_info {
-       int xd_device;
-       struct xlbd_major_info *mi;
-};
-
-typedef struct xen_block {
-       int usage;
-} xen_block_t;
-
-extern struct request_queue *xlbd_blk_queue;
-extern spinlock_t blkif_io_lock;
-
-extern int blkif_open(struct inode *inode, struct file *filep);
-extern int blkif_release(struct inode *inode, struct file *filep);
-extern int blkif_ioctl(struct inode *inode, struct file *filep,
-                           unsigned command, unsigned long argument);
-extern int blkif_check(dev_t dev);
-extern int blkif_revalidate(dev_t dev);
-extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
-extern void do_blkif_request (request_queue_t *rq); 
-
-extern void xlvbd_update_vbds(void);
-
-/* Virtual block-device subsystem. */
-extern int  xlvbd_init(void);
-extern void xlvbd_cleanup(void); 
-
-#endif /* __XEN_DRIVERS_BLOCK_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/vbd.c b/linux-2.6.8.1-xen-sparse/drivers/xen/blkfront/vbd.c
deleted file mode 100644 (file)
index d2cfb7c..0000000
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- * vbd.c
- * 
- * XenLinux virtual block-device driver (xvd).
- * 
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- * Copyright (c) 2004, Christian Limpach
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "block.h"
-#include <linux/blkdev.h>
-
-/*
- * For convenience we distinguish between ide, scsi and 'other' (i.e.
- * potentially combinations of the two) in the naming scheme and in a few 
- * other places (like default readahead, etc).
- */
-
-#define NUM_IDE_MAJORS 10
-#define NUM_SCSI_MAJORS 9
-#define NUM_VBD_MAJORS 1
-
-static struct xlbd_type_info xlbd_ide_type = {
-       .partn_shift = 6,
-       // XXXcl todo blksize_size[major]  = 1024;
-       .hardsect_size = 512,
-       .max_sectors = 128,  /* 'hwif->rqsize' if we knew it */
-       // XXXcl todo read_ahead[major]    = 8; /* from drivers/ide/ide-probe.c */
-       .name = "hd",
-};
-
-static struct xlbd_type_info xlbd_scsi_type = {
-       .partn_shift = 4,
-       // XXXcl todo blksize_size[major]  = 1024; /* XXX 512; */
-       .hardsect_size = 512,
-       .max_sectors = 128*8, /* XXX 128; */
-       // XXXcl todo read_ahead[major]    = 0; /* XXX 8; -- guessing */
-       .name = "sd",
-};
-
-static struct xlbd_type_info xlbd_vbd_type = {
-       .partn_shift = 4,
-       // XXXcl todo blksize_size[major]  = 512;
-       .hardsect_size = 512,
-       .max_sectors = 128,
-       // XXXcl todo read_ahead[major]    = 8;
-       .name = "xvd",
-};
-
-/* XXXcl handle cciss after finding out why it's "hacked" in */
-
-static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-                                        NUM_VBD_MAJORS];
-
-/* Information about our VBDs. */
-#define MAX_VBDS 64
-static int nr_vbds;
-static vdisk_t *vbd_info;
-
-struct request_queue *xlbd_blk_queue = NULL;
-
-#define MAJOR_XEN(dev) ((dev)>>8)
-#define MINOR_XEN(dev) ((dev) & 0xff)
-
-static struct block_device_operations xlvbd_block_fops = 
-{
-       .owner          = THIS_MODULE,
-       .open           = blkif_open,
-       .release        = blkif_release,
-       .ioctl          = blkif_ioctl,
-#if 0
-    check_media_change: blkif_check,
-    revalidate:         blkif_revalidate,
-#endif
-};
-
-spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
-
-static int xlvbd_get_vbd_info(vdisk_t *disk_info)
-{
-    vdisk_t         *buf = (vdisk_t *)__get_free_page(GFP_KERNEL);
-    blkif_request_t  req;
-    blkif_response_t rsp;
-    int              nr;
-
-    memset(&req, 0, sizeof(req));
-    req.operation   = BLKIF_OP_PROBE;
-    req.nr_segments = 1;
-    req.frame_and_sects[0] = virt_to_machine(buf) | 7;
-
-    blkif_control_send(&req, &rsp);
-
-    if ( rsp.status <= 0 )
-    {
-        printk(KERN_ALERT "Could not probe disks (%d)\n", rsp.status);
-        return -1;
-    }
-
-    if ( (nr = rsp.status) > MAX_VBDS )
-         nr = MAX_VBDS;
-    memcpy(disk_info, buf, nr * sizeof(vdisk_t));
-
-    free_page((unsigned long)buf);
-
-    return nr;
-}
-
-static struct xlbd_major_info *xlbd_get_major_info(int xd_device, int *minor)
-{
-       int mi_idx, new_major;
-       int xd_major = MAJOR_XEN(xd_device); 
-       int xd_minor = MINOR_XEN(xd_device);
-
-       *minor = xd_minor;
-
-       switch (xd_major) {
-       case IDE0_MAJOR: mi_idx = 0; new_major = IDE0_MAJOR; break;
-       case IDE1_MAJOR: mi_idx = 1; new_major = IDE1_MAJOR; break;
-       case IDE2_MAJOR: mi_idx = 2; new_major = IDE2_MAJOR; break;
-       case IDE3_MAJOR: mi_idx = 3; new_major = IDE3_MAJOR; break;
-       case IDE4_MAJOR: mi_idx = 4; new_major = IDE4_MAJOR; break;
-       case IDE5_MAJOR: mi_idx = 5; new_major = IDE5_MAJOR; break;
-       case IDE6_MAJOR: mi_idx = 6; new_major = IDE6_MAJOR; break;
-       case IDE7_MAJOR: mi_idx = 7; new_major = IDE7_MAJOR; break;
-       case IDE8_MAJOR: mi_idx = 8; new_major = IDE8_MAJOR; break;
-       case IDE9_MAJOR: mi_idx = 9; new_major = IDE9_MAJOR; break;
-       case SCSI_DISK0_MAJOR: mi_idx = 10; new_major = SCSI_DISK0_MAJOR; break;
-       case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
-               mi_idx = 11 + xd_major - SCSI_DISK1_MAJOR;
-               new_major = SCSI_DISK1_MAJOR + xd_major - SCSI_DISK1_MAJOR;
-               break;
-       case SCSI_CDROM_MAJOR: mi_idx = 18; new_major = SCSI_CDROM_MAJOR; break;
-       default: mi_idx = 19; new_major = 0;/* XXXcl notyet */ break;
-       }
-
-       if (major_info[mi_idx])
-               return major_info[mi_idx];
-
-       major_info[mi_idx] = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
-       if (major_info[mi_idx] == NULL)
-               return NULL;
-
-       memset(major_info[mi_idx], 0, sizeof(struct xlbd_major_info));
-
-       switch (mi_idx) {
-       case 0 ... (NUM_IDE_MAJORS - 1):
-               major_info[mi_idx]->type = &xlbd_ide_type;
-               break;
-       case NUM_IDE_MAJORS ... (NUM_IDE_MAJORS + NUM_SCSI_MAJORS - 1):
-               major_info[mi_idx]->type = &xlbd_scsi_type;
-               break;
-       case (NUM_IDE_MAJORS + NUM_SCSI_MAJORS) ...
-               (NUM_IDE_MAJORS + NUM_SCSI_MAJORS + NUM_VBD_MAJORS - 1):
-               major_info[mi_idx]->type = &xlbd_vbd_type;
-               break;
-       }
-       major_info[mi_idx]->major = new_major;
-
-       if (register_blkdev(major_info[mi_idx]->major, major_info[mi_idx]->type->name)) {
-               printk(KERN_ALERT "XL VBD: can't get major %d with name %s\n",
-                   major_info[mi_idx]->major, major_info[mi_idx]->type->name);
-               goto out;
-       }
-
-       devfs_mk_dir(major_info[mi_idx]->type->name);
-
-       return major_info[mi_idx];
-
- out:
-       kfree(major_info[mi_idx]);
-       major_info[mi_idx] = NULL;
-       return NULL;
-}
-
-static struct gendisk *xlvbd_get_gendisk(struct xlbd_major_info *mi,
-                                        int xd_minor, vdisk_t *xd)
-{
-       struct gendisk *gd;
-       struct xlbd_disk_info *di;
-       int device, partno;
-
-       device = MKDEV(mi->major, xd_minor);
-       gd = get_gendisk(device, &partno);
-       if (gd)
-               return gd;
-
-       di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL);
-       if (di == NULL)
-               return NULL;
-       di->mi = mi;
-       di->xd_device = xd->device;
-
-       /* Construct an appropriate gendisk structure. */
-       gd = alloc_disk(1);
-       if (gd == NULL)
-               goto out;
-
-       gd->major = mi->major;
-       gd->first_minor = xd_minor;
-       gd->fops = &xlvbd_block_fops;
-       gd->private_data = di;
-       sprintf(gd->disk_name, "%s%c%d", mi->type->name,
-           'a' + (xd_minor >> mi->type->partn_shift),
-           xd_minor & ((1 << mi->type->partn_shift) - 1));
-       /*  sprintf(gd->devfs_name, "%s%s/disc%d", mi->type->name, , ); XXXdevfs */
-
-       set_capacity(gd, xd->capacity);
-
-       if (xlbd_blk_queue == NULL) {
-               xlbd_blk_queue = blk_init_queue(do_blkif_request,
-                                               &blkif_io_lock);
-               if (xlbd_blk_queue == NULL)
-                       goto out;
-               elevator_init(xlbd_blk_queue, &elevator_noop);
-
-               /*
-                * Turn off barking 'headactive' mode. We dequeue
-                * buffer heads as soon as we pass them to back-end
-                * driver.
-                */
-               blk_queue_headactive(xlbd_blk_queue, 0); /* XXXcl: noop according to blkdev.h */
-
-               blk_queue_hardsect_size(xlbd_blk_queue,
-                                       mi->type->hardsect_size);
-               blk_queue_max_sectors(xlbd_blk_queue, mi->type->max_sectors); /* 'hwif->rqsize' if we knew it */
-
-               /* XXXcl: set mask to PAGE_SIZE for now, to improve either use 
-                  - blk_queue_merge_bvec to merge requests with adjacent ma's
-                  - the tags infrastructure
-                  - the dma infrastructure
-               */
-               blk_queue_segment_boundary(xlbd_blk_queue, PAGE_SIZE - 1);
-
-               blk_queue_max_phys_segments(xlbd_blk_queue,
-                    BLKIF_MAX_SEGMENTS_PER_REQUEST);
-               blk_queue_max_hw_segments(xlbd_blk_queue,
-                    BLKIF_MAX_SEGMENTS_PER_REQUEST); /* XXXcl not needed? */
-
-
-       }
-       gd->queue = xlbd_blk_queue;
-
-       add_disk(gd);
-
-       return gd;
-
- out:
-       if (gd)
-               del_gendisk(gd);
-       kfree(di);
-       return NULL;
-}
-
-/*
- * xlvbd_init_device - initialise a VBD device
- * @disk:              a vdisk_t describing the VBD
- *
- * Takes a vdisk_t * that describes a VBD the domain has access to.
- * Performs appropriate initialisation and registration of the device.
- *
- * Care needs to be taken when making re-entrant calls to ensure that
- * corruption does not occur.  Also, devices that are in use should not have
- * their details updated.  This is the caller's responsibility.
- */
-static int xlvbd_init_device(vdisk_t *xd)
-{
-       struct block_device *bd;
-       struct gendisk *gd;
-       struct xlbd_major_info *mi;
-       int device;
-       int minor;
-
-       int err = -ENOMEM;
-
-       mi = xlbd_get_major_info(xd->device, &minor);
-       if (mi == NULL)
-               return -EPERM;
-
-       device = MKDEV(mi->major, minor);
-
-       if ((bd = bdget(device)) == NULL)
-               return -EPERM;
-
-       /*
-        * Update of partition info, and check of usage count, is protected
-        * by the per-block-device semaphore.
-        */
-       down(&bd->bd_sem);
-
-       gd = xlvbd_get_gendisk(mi, minor, xd);
-       if (mi == NULL) {
-               err = -EPERM;
-               goto out;
-       }
-
-       if (VDISK_READONLY(xd->info))
-               set_disk_ro(gd, 1); 
-
-       /* Some final fix-ups depending on the device type */
-       switch (VDISK_TYPE(xd->info)) { 
-       case VDISK_TYPE_CDROM:
-               gd->flags |= GENHD_FL_REMOVABLE | GENHD_FL_CD; 
-               /* FALLTHROUGH */
-       case VDISK_TYPE_FLOPPY: 
-       case VDISK_TYPE_TAPE:
-               gd->flags |= GENHD_FL_REMOVABLE; 
-               break; 
-
-       case VDISK_TYPE_DISK:
-               break; 
-
-       default:
-               printk(KERN_ALERT "XenLinux: unknown device type %d\n", 
-                   VDISK_TYPE(xd->info)); 
-               break; 
-       }
-
-       err = 0;
- out:
-       up(&bd->bd_sem);
-       bdput(bd);    
-       return err;
-}
-
-#if 0
-/*
- * xlvbd_remove_device - remove a device node if possible
- * @device:       numeric device ID
- *
- * Updates the gendisk structure and invalidates devices.
- *
- * This is OK for now but in future, should perhaps consider where this should
- * deallocate gendisks / unregister devices.
- */
-static int xlvbd_remove_device(int device)
-{
-    int i, rc = 0, minor = MINOR(device);
-    struct gendisk *gd;
-    struct block_device *bd;
-    xen_block_t *disk = NULL;
-
-    if ( (bd = bdget(device)) == NULL )
-        return -1;
-
-    /*
-     * Update of partition info, and check of usage count, is protected
-     * by the per-block-device semaphore.
-     */
-    down(&bd->bd_sem);
-
-    if ( ((gd = get_gendisk(device)) == NULL) ||
-         ((disk = xldev_to_xldisk(device)) == NULL) )
-        BUG();
-
-    if ( disk->usage != 0 )
-    {
-        printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device);
-        rc = -1;
-        goto out;
-    }
-    if ( (minor & (gd->max_p-1)) != 0 )
-    {
-        /* 1: The VBD is mapped to a partition rather than a whole unit. */
-        invalidate_device(device, 1);
-       gd->part[minor].start_sect = 0;
-        gd->part[minor].nr_sects   = 0;
-        gd->sizes[minor]           = 0;
-
-        /* Clear the consists-of-virtual-partitions flag if possible. */
-        gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS;
-        for ( i = 1; i < gd->max_p; i++ )
-            if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 )
-                gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
-
-        /*
-         * If all virtual partitions are now gone, and a 'whole unit' VBD is
-         * present, then we can try to grok the unit's real partition table.
-         */
-        if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
-             (gd->sizes[minor & ~(gd->max_p-1)] != 0) &&
-             !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) )
-        {
-            register_disk(gd,
-                          device&~(gd->max_p-1), 
-                          gd->max_p, 
-                          &xlvbd_block_fops,
-                          gd->part[minor&~(gd->max_p-1)].nr_sects);
-        }
-    }
-    else
-    {
-        /*
-         * 2: The VBD is mapped to an entire 'unit'. Clear all partitions.
-         * NB. The partition entries are only cleared if there are no VBDs
-         * mapped to individual partitions on this unit.
-         */
-        i = gd->max_p - 1; /* Default: clear subpartitions as well. */
-        if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
-            i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */
-        while ( i >= 0 )
-        {
-            invalidate_device(device+i, 1);
-            gd->part[minor+i].start_sect = 0;
-            gd->part[minor+i].nr_sects   = 0;
-            gd->sizes[minor+i]           = 0;
-            i--;
-        }
-    }
-
- out:
-    up(&bd->bd_sem);
-    bdput(bd);
-    return rc;
-}
-
-/*
- * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
- * state. The VBDs need to be updated in this way when the domain is
- * initialised and also each time we receive an XLBLK_UPDATE event.
- */
-void xlvbd_update_vbds(void)
-{
-    int i, j, k, old_nr, new_nr;
-    vdisk_t *old_info, *new_info, *merged_info;
-
-    old_info = vbd_info;
-    old_nr   = nr_vbds;
-
-    new_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
-    if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 )
-    {
-        kfree(new_info);
-        return;
-    }
-
-    /*
-     * Final list maximum size is old list + new list. This occurs only when
-     * old list and new list do not overlap at all, and we cannot yet destroy
-     * VBDs in the old list because the usage counts are busy.
-     */
-    merged_info = kmalloc((old_nr + new_nr) * sizeof(vdisk_t), GFP_KERNEL);
-
-    /* @i tracks old list; @j tracks new list; @k tracks merged list. */
-    i = j = k = 0;
-
-    while ( (i < old_nr) && (j < new_nr) )
-    {
-        if ( old_info[i].device < new_info[j].device )
-        {
-            if ( xlvbd_remove_device(old_info[i].device) != 0 )
-                memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
-            i++;
-        }
-        else if ( old_info[i].device > new_info[j].device )
-        {
-            if ( xlvbd_init_device(&new_info[j]) == 0 )
-                memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
-            j++;
-        }
-        else
-        {
-            if ( ((old_info[i].capacity == new_info[j].capacity) &&
-                  (old_info[i].info == new_info[j].info)) ||
-                 (xlvbd_remove_device(old_info[i].device) != 0) )
-                memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
-            else if ( xlvbd_init_device(&new_info[j]) == 0 )
-                memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
-            i++; j++;
-        }
-    }
-
-    for ( ; i < old_nr; i++ )
-    {
-        if ( xlvbd_remove_device(old_info[i].device) != 0 )
-            memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
-    }
-
-    for ( ; j < new_nr; j++ )
-    {
-        if ( xlvbd_init_device(&new_info[j]) == 0 )
-            memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
-    }
-
-    vbd_info = merged_info;
-    nr_vbds  = k;
-
-    kfree(old_info);
-    kfree(new_info);
-}
-#endif
-
-/*
- * Set up all the linux device goop for the virtual block devices
- * (vbd's) that we know about. Note that although from the backend
- * driver's p.o.v. VBDs are addressed simply an opaque 16-bit device
- * number, the domain creation tools conventionally allocate these
- * numbers to correspond to those used by 'real' linux -- this is just
- * for convenience as it means e.g. that the same /etc/fstab can be
- * used when booting with or without Xen.
- */
-int xlvbd_init(void)
-{
-       int i;
-
-       /*
-        * If compiled as a module, we don't support unloading yet. We
-        * therefore permanently increment the reference count to
-        * disallow it.
-        */
-       MOD_INC_USE_COUNT;
-
-       memset(major_info, 0, sizeof(major_info));
-
-       for (i = 0; i < sizeof(major_info) / sizeof(major_info[0]); i++) {
-       }
-
-       vbd_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
-       nr_vbds  = xlvbd_get_vbd_info(vbd_info);
-
-       if (nr_vbds < 0) {
-               kfree(vbd_info);
-               vbd_info = NULL;
-               nr_vbds  = 0;
-       } else {
-               for (i = 0; i < nr_vbds; i++)
-                       xlvbd_init_device(&vbd_info[i]);
-       }
-
-       return 0;
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/console/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/console/Makefile
deleted file mode 100644 (file)
index e4ffdef..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y  := console.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/console/console.c b/linux-2.6.8.1-xen-sparse/drivers/xen/console/console.c
deleted file mode 100644 (file)
index 8309c25..0000000
+++ /dev/null
@@ -1,764 +0,0 @@
-/******************************************************************************
- * console.c
- * 
- * Virtual console driver.
- * 
- * Copyright (c) 2002-2004, K A Fraser.
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/hypervisor-ifs/event_channel.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/evtchn.h>
-#include <asm-xen/ctrl_if.h>
-
-/*
- * Modes:
- *  'xencons=off'  [XC_OFF]:     Console is disabled.
- *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
- *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
- *                 [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
- * 
- * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
- * warnings from standard distro startup scripts.
- */
-static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
-
-static int __init xencons_setup(char *str)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    if (str[0] == '=')
-       str++;
-#endif
-    if ( !strcmp(str, "tty") )
-        xc_mode = XC_TTY;
-    else if ( !strcmp(str, "ttyS") )
-        xc_mode = XC_SERIAL;
-    else if ( !strcmp(str, "off") )
-        xc_mode = XC_OFF;
-    return 1;
-}
-__setup("xencons", xencons_setup);
-
-/* The kernel and user-land drivers share a common transmit buffer. */
-#define WBUF_SIZE     4096
-#define WBUF_MASK(_i) ((_i)&(WBUF_SIZE-1))
-static char wbuf[WBUF_SIZE];
-static unsigned int wc, wp; /* write_cons, write_prod */
-
-/* This lock protects accesses to the common transmit buffer. */
-static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
-
-/* Common transmit-kick routine. */
-static void __xencons_tx_flush(void);
-
-/* This task is used to defer sending console data until there is space. */
-static void xencons_tx_flush_task_routine(void *data);
-
-static DECLARE_TQUEUE(xencons_tx_flush_task, 
-                      xencons_tx_flush_task_routine,
-                      NULL);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static struct tty_driver *xencons_driver;
-#else
-static struct tty_driver xencons_driver;
-#endif
-
-
-/******************** Kernel console driver ********************************/
-
-static void kcons_write(
-    struct console *c, const char *s, unsigned int count)
-{
-    int           i;
-    unsigned long flags;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    
-    for ( i = 0; i < count; i++ )
-    {
-        if ( (wp - wc) >= (WBUF_SIZE - 1) )
-            break;
-        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
-            wbuf[WBUF_MASK(wp++)] = '\r';
-    }
-
-    __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void kcons_write_dom0(
-    struct console *c, const char *s, unsigned int count)
-{
-    int rc;
-
-    while ( count > 0 )
-    {
-        if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write,
-                                         count, (char *)s)) > 0 )
-        {
-            count -= rc;
-            s += rc;
-        }
-       else
-           break;
-    }
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static struct tty_driver *kcons_device(struct console *c, int *index)
-{
-    *index = c->index;
-    return xencons_driver;
-}
-#else
-static kdev_t kcons_device(struct console *c)
-{
-    return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
-}
-#endif
-
-static struct console kcons_info = {
-    device:  kcons_device,
-    flags:   CON_PRINTBUFFER,
-    index:   -1
-};
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define __RETCODE 0
-static int __init xen_console_init(void)
-#else
-#define __RETCODE
-void xen_console_init(void)
-#endif
-{
-    if ( xen_start_info.flags & SIF_INITDOMAIN )
-    {
-        if ( xc_mode == XC_DEFAULT )
-            xc_mode = XC_SERIAL;
-        kcons_info.write = kcons_write_dom0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-       if ( xc_mode == XC_SERIAL )
-           kcons_info.flags |= CON_ENABLED;
-#endif
-    }
-    else
-    {
-        if ( xc_mode == XC_DEFAULT )
-            xc_mode = XC_TTY;
-        kcons_info.write = kcons_write;
-    }
-
-    if ( xc_mode == XC_OFF )
-        return __RETCODE;
-
-    if ( xc_mode == XC_SERIAL )
-        strcpy(kcons_info.name, "ttyS");
-    else
-        strcpy(kcons_info.name, "tty");
-
-    register_console(&kcons_info);
-    return __RETCODE;
-}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-console_initcall(xen_console_init);
-#endif
-
-/*** Useful function for console debugging -- goes straight to Xen. ***/
-asmlinkage int xprintk(const char *fmt, ...)
-{
-    va_list args;
-    int printk_len;
-    static char printk_buf[1024];
-    
-    /* Emit the output into the temporary buffer */
-    va_start(args, fmt);
-    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-    va_end(args);
-
-    /* Send the processed output directly to Xen. */
-    kcons_write_dom0(NULL, printk_buf, printk_len);
-
-    return 0;
-}
-
-/*** Forcibly flush console data before dying. ***/
-void xencons_force_flush(void)
-{
-    ctrl_msg_t msg;
-    int        sz;
-
-    /* Emergency console is synchronous, so there's nothing to flush. */
-    if ( xen_start_info.flags & SIF_INITDOMAIN )
-        return;
-
-    /*
-     * We use dangerous control-interface functions that require a quiescent
-     * system and no interrupts. Try to ensure this with a global cli().
-     */
-    cli();
-
-    /* Spin until console data is flushed through to the domain controller. */
-    while ( (wc != wp) && !ctrl_if_transmitter_empty() )
-    {
-        /* Interrupts are disabled -- we must manually reap responses. */
-        ctrl_if_discard_responses();
-
-        if ( (sz = wp - wc) == 0 )
-            continue;
-        if ( sz > sizeof(msg.msg) )
-            sz = sizeof(msg.msg);
-        if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
-            sz = WBUF_SIZE - WBUF_MASK(wc);
-
-        msg.type    = CMSG_CONSOLE;
-        msg.subtype = CMSG_CONSOLE_DATA;
-        msg.length  = sz;
-        memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
-            
-        if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
-            wc += sz;
-    }
-}
-
-
-/******************** User-space console driver (/dev/console) ************/
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define DRV(_d)         (_d)
-#define TTY_INDEX(_tty) ((_tty)->index)
-#else
-static int xencons_refcount;
-static struct tty_struct *xencons_table[MAX_NR_CONSOLES];
-#define DRV(_d)         (&(_d))
-#define TTY_INDEX(_tty) (MINOR((_tty)->device) - xencons_driver.minor_start)
-#endif
-
-static struct termios *xencons_termios[MAX_NR_CONSOLES];
-static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
-static struct tty_struct *xencons_tty;
-static int xencons_priv_irq;
-static char x_char;
-
-/* Non-privileged receive callback. */
-static void xencons_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    int           i;
-    unsigned long flags;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    if ( xencons_tty != NULL )
-    {
-        for ( i = 0; i < msg->length; i++ )
-            tty_insert_flip_char(xencons_tty, msg->msg[i], 0);
-        tty_flip_buffer_push(xencons_tty);
-    }
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
-    msg->length = 0;
-    ctrl_if_send_response(msg);
-}
-
-/* Privileged and non-privileged transmit worker. */
-static void __xencons_tx_flush(void)
-{
-    int        sz, work_done = 0;
-    ctrl_msg_t msg;
-
-    if ( xen_start_info.flags & SIF_INITDOMAIN )
-    {
-        if ( x_char )
-        {
-            kcons_write_dom0(NULL, &x_char, 1);
-            x_char = 0;
-            work_done = 1;
-        }
-
-        while ( wc != wp )
-        {
-            sz = wp - wc;
-            if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
-                sz = WBUF_SIZE - WBUF_MASK(wc);
-            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
-            wc += sz;
-            work_done = 1;
-        }
-    }
-    else
-    {
-        while ( x_char )
-        {
-            msg.type    = CMSG_CONSOLE;
-            msg.subtype = CMSG_CONSOLE_DATA;
-            msg.length  = 1;
-            msg.msg[0]  = x_char;
-
-            if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
-                x_char = 0;
-            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
-                break;
-
-            work_done = 1;
-        }
-
-        while ( wc != wp )
-        {
-            sz = wp - wc;
-            if ( sz > sizeof(msg.msg) )
-                sz = sizeof(msg.msg);
-            if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
-                sz = WBUF_SIZE - WBUF_MASK(wc);
-
-            msg.type    = CMSG_CONSOLE;
-            msg.subtype = CMSG_CONSOLE_DATA;
-            msg.length  = sz;
-            memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
-            
-            if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
-                wc += sz;
-            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
-                break;
-
-            work_done = 1;
-        }
-    }
-
-    if ( work_done && (xencons_tty != NULL) )
-    {
-        wake_up_interruptible(&xencons_tty->write_wait);
-        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-             (xencons_tty->ldisc.write_wakeup != NULL) )
-            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
-    }
-}
-
-/* Non-privileged transmit kicker. */
-static void xencons_tx_flush_task_routine(void *data)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&xencons_lock, flags);
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-/* Privileged receive callback and transmit kicker. */
-static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
-                                          struct pt_regs *regs)
-{
-    static char   rbuf[16];
-    int           i, l;
-    unsigned long flags;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-
-    if ( xencons_tty != NULL )
-    {
-        /* Receive work. */
-        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
-            for ( i = 0; i < l; i++ )
-                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
-        if ( xencons_tty->flip.count != 0 )
-            tty_flip_buffer_push(xencons_tty);
-    }
-
-    /* Transmit work. */
-    __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
-    return IRQ_HANDLED;
-}
-
-static int xencons_write_room(struct tty_struct *tty)
-{
-    return WBUF_SIZE - (wp - wc);
-}
-
-static int xencons_chars_in_buffer(struct tty_struct *tty)
-{
-    return wp - wc;
-}
-
-static void xencons_send_xchar(struct tty_struct *tty, char ch)
-{
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    x_char = ch;
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void xencons_throttle(struct tty_struct *tty)
-{
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    if ( I_IXOFF(tty) )
-        xencons_send_xchar(tty, STOP_CHAR(tty));
-}
-
-static void xencons_unthrottle(struct tty_struct *tty)
-{
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    if ( I_IXOFF(tty) )
-    {
-        if ( x_char != 0 )
-            x_char = 0;
-        else
-            xencons_send_xchar(tty, START_CHAR(tty));
-    }
-}
-
-static void xencons_flush_buffer(struct tty_struct *tty)
-{
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    wc = wp = 0;
-    spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static inline int __xencons_put_char(int ch)
-{
-    char _ch = (char)ch;
-    if ( (wp - wc) == WBUF_SIZE )
-        return 0;
-    wbuf[WBUF_MASK(wp++)] = _ch;
-    return 1;
-}
-
-static int xencons_write(struct tty_struct *tty, int from_user,
-                       const u_char * buf, int count)
-{
-    int i;
-    unsigned long flags;
-
-    if ( from_user && verify_area(VERIFY_READ, buf, count) )
-        return -EINVAL;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return count;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-
-    for ( i = 0; i < count; i++ )
-    {
-        char ch;
-        if ( from_user )
-            __get_user(ch, buf + i);
-        else
-            ch = buf[i];
-        if ( !__xencons_put_char(ch) )
-            break;
-    }
-
-    if ( i != 0 )
-        __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
-    return i;
-}
-
-static void xencons_put_char(struct tty_struct *tty, u_char ch)
-{
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    (void)__xencons_put_char(ch);
-    spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void xencons_flush_chars(struct tty_struct *tty)
-{
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);    
-}
-
-static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
-{
-    unsigned long orig_jiffies = jiffies;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    while ( DRV(tty->driver)->chars_in_buffer(tty) )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(1);
-        if ( signal_pending(current) )
-            break;
-        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
-            break;
-    }
-    
-    set_current_state(TASK_RUNNING);
-}
-
-static int xencons_open(struct tty_struct *tty, struct file *filp)
-{
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return 0;
-
-    MOD_INC_USE_COUNT;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    tty->driver_data = NULL;
-    if ( xencons_tty == NULL )
-        xencons_tty = tty;
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);    
-
-    return 0;
-}
-
-static void xencons_close(struct tty_struct *tty, struct file *filp)
-{
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    if ( tty->count == 1 )
-    {
-        tty->closing = 1;
-        tty_wait_until_sent(tty, 0);
-        if ( DRV(tty->driver)->flush_buffer != NULL )
-            DRV(tty->driver)->flush_buffer(tty);
-        if ( tty->ldisc.flush_buffer != NULL )
-            tty->ldisc.flush_buffer(tty);
-        tty->closing = 0;
-        spin_lock_irqsave(&xencons_lock, flags);
-        xencons_tty = NULL;
-        spin_unlock_irqrestore(&xencons_lock, flags);    
-    }
-
-    MOD_DEC_USE_COUNT;
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static struct tty_operations xencons_ops = {
-    .open = xencons_open,
-    .close = xencons_close,
-    .write = xencons_write,
-    .write_room = xencons_write_room,
-    .put_char = xencons_put_char,
-    .flush_chars = xencons_flush_chars,
-    .chars_in_buffer = xencons_chars_in_buffer,
-    .send_xchar = xencons_send_xchar,
-    .flush_buffer = xencons_flush_buffer,
-    .throttle = xencons_throttle,
-    .unthrottle = xencons_unthrottle,
-    .wait_until_sent = xencons_wait_until_sent,
-};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static const char *xennullcon_startup(void)
-{
-    return NULL;
-}
-
-static int xennullcon_dummy(void)
-{
-    return 0;
-}
-
-#define DUMMY  (void *)xennullcon_dummy
-
-/*
- *  The console `switch' structure for the dummy console
- *
- *  Most of the operations are dummies.
- */
-
-const struct consw xennull_con = {
-    .owner =           THIS_MODULE,
-    .con_startup =     xennullcon_startup,
-    .con_init =                DUMMY,
-    .con_deinit =      DUMMY,
-    .con_clear =       DUMMY,
-    .con_putc =                DUMMY,
-    .con_putcs =       DUMMY,
-    .con_cursor =      DUMMY,
-    .con_scroll =      DUMMY,
-    .con_bmove =       DUMMY,
-    .con_switch =      DUMMY,
-    .con_blank =       DUMMY,
-    .con_font_set =    DUMMY,
-    .con_font_get =    DUMMY,
-    .con_font_default =        DUMMY,
-    .con_font_copy =   DUMMY,
-    .con_set_palette = DUMMY,
-    .con_scrolldelta = DUMMY,
-};
-#endif
-#endif
-
-static int __init xencons_init(void)
-{
-    if ( xc_mode == XC_OFF )
-        return 0;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
-                                      1 : MAX_NR_CONSOLES);
-    if ( xencons_driver == NULL )
-        return -ENOMEM;
-#else
-    memset(&xencons_driver, 0, sizeof(struct tty_driver));
-    xencons_driver.magic       = TTY_DRIVER_MAGIC;
-    xencons_driver.refcount    = &xencons_refcount;
-    xencons_driver.table       = xencons_table;
-    xencons_driver.num         = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
-#endif
-
-    DRV(xencons_driver)->major           = TTY_MAJOR;
-    DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
-    DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
-    DRV(xencons_driver)->init_termios    = tty_std_termios;
-    DRV(xencons_driver)->flags           = 
-        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
-    DRV(xencons_driver)->termios         = xencons_termios;
-    DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
-
-    if ( xc_mode == XC_SERIAL )
-    {
-        DRV(xencons_driver)->name        = "ttyS";
-        DRV(xencons_driver)->minor_start = 64;
-       DRV(xencons_driver)->name_base   = 0;
-    }
-    else
-    {
-        DRV(xencons_driver)->name        = "tty";
-        DRV(xencons_driver)->minor_start = 1;
-       DRV(xencons_driver)->name_base   = 1;
-    }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    tty_set_operations(xencons_driver, &xencons_ops);
-#else
-    xencons_driver.open            = xencons_open;
-    xencons_driver.close           = xencons_close;
-    xencons_driver.write           = xencons_write;
-    xencons_driver.write_room      = xencons_write_room;
-    xencons_driver.put_char        = xencons_put_char;
-    xencons_driver.flush_chars     = xencons_flush_chars;
-    xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
-    xencons_driver.send_xchar      = xencons_send_xchar;
-    xencons_driver.flush_buffer    = xencons_flush_buffer;
-    xencons_driver.throttle        = xencons_throttle;
-    xencons_driver.unthrottle      = xencons_unthrottle;
-    xencons_driver.wait_until_sent = xencons_wait_until_sent;
-#endif
-
-    if ( tty_register_driver(DRV(xencons_driver)) )
-        panic("Couldn't register Xen virtual console driver as %s\n",
-              DRV(xencons_driver)->name);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    tty_register_device(xencons_driver, 0, NULL);
-#endif
-
-    if ( xen_start_info.flags & SIF_INITDOMAIN )
-    {
-        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
-        (void)request_irq(xencons_priv_irq,
-                          xencons_priv_interrupt, 0, "console", NULL);
-    }
-    else
-    {
-        (void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0);
-    }
-
-    printk("Xen virtual console successfully installed as %s\n",
-           DRV(xencons_driver)->name);
-    
-    return 0;
-}
-
-static void __exit xencons_fini(void)
-{
-    int ret;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    tty_unregister_device(xencons_driver, 0);
-#endif
-
-    if ( (ret = tty_unregister_driver(DRV(xencons_driver))) != 0 )
-        printk(KERN_ERR "Unable to unregister Xen console driver: %d\n", ret);
-
-    if ( xen_start_info.flags & SIF_INITDOMAIN )
-    {
-        free_irq(xencons_priv_irq, NULL);
-        unbind_virq_from_irq(VIRQ_CONSOLE);
-    }
-    else
-    {
-        ctrl_if_unregister_receiver(CMSG_CONSOLE, xencons_rx);
-    }
-}
-
-module_init(xencons_init);
-module_exit(xencons_fini);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/Makefile
deleted file mode 100644 (file)
index 7b082a0..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y  := evtchn.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/evtchn.c b/linux-2.6.8.1-xen-sparse/drivers/xen/evtchn/evtchn.c
deleted file mode 100644 (file)
index 60170e1..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-/******************************************************************************
- * evtchn.c
- * 
- * Xenolinux driver for receiving and demuxing event-channel signals.
- * 
- * Copyright (c) 2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/major.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/poll.h>
-#include <linux/irq.h>
-#include <linux/init.h>
-#include <asm-xen/evtchn.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#include <linux/devfs_fs_kernel.h>
-#define OLD_DEVFS
-#else
-#include <linux/gfp.h>
-#endif
-
-#ifdef OLD_DEVFS
-/* NB. This must be shared amongst drivers if more things go in /dev/xen */
-static devfs_handle_t xen_dev_dir;
-#endif
-
-/* Only one process may open /dev/xen/evtchn at any time. */
-static unsigned long evtchn_dev_inuse;
-
-/* Notification ring, accessed via /dev/xen/evtchn. */
-#define RING_SIZE     2048  /* 2048 16-bit entries */
-#define RING_MASK(_i) ((_i)&(RING_SIZE-1))
-static u16 *ring;
-static unsigned int ring_cons, ring_prod, ring_overflow;
-
-/* Processes wait on this queue via /dev/xen/evtchn when ring is empty. */
-static DECLARE_WAIT_QUEUE_HEAD(evtchn_wait);
-static struct fasync_struct *evtchn_async_queue;
-
-/* Which ports is user-space bound to? */
-static u32 bound_ports[32];
-
-static spinlock_t lock;
-
-void evtchn_device_upcall(int port)
-{
-    spin_lock(&lock);
-
-    mask_evtchn(port);
-    clear_evtchn(port);
-
-    if ( ring != NULL )
-    {
-        if ( (ring_prod - ring_cons) < RING_SIZE )
-        {
-            ring[RING_MASK(ring_prod)] = (u16)port;
-            if ( ring_cons == ring_prod++ )
-            {
-                wake_up_interruptible(&evtchn_wait);
-                kill_fasync(&evtchn_async_queue, SIGIO, POLL_IN);
-            }
-        }
-        else
-        {
-            ring_overflow = 1;
-        }
-    }
-
-    spin_unlock(&lock);
-}
-
-static void __evtchn_reset_buffer_ring(void)
-{
-    /* Initialise the ring to empty. Clear errors. */
-    ring_cons = ring_prod = ring_overflow = 0;
-}
-
-static ssize_t evtchn_read(struct file *file, char *buf,
-                           size_t count, loff_t *ppos)
-{
-    int rc;
-    unsigned int c, p, bytes1 = 0, bytes2 = 0;
-    DECLARE_WAITQUEUE(wait, current);
-
-    add_wait_queue(&evtchn_wait, &wait);
-
-    count &= ~1; /* even number of bytes */
-
-    if ( count == 0 )
-    {
-        rc = 0;
-        goto out;
-    }
-
-    if ( count > PAGE_SIZE )
-        count = PAGE_SIZE;
-
-    for ( ; ; )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-
-        if ( (c = ring_cons) != (p = ring_prod) )
-            break;
-
-        if ( ring_overflow )
-        {
-            rc = -EFBIG;
-            goto out;
-        }
-
-        if ( file->f_flags & O_NONBLOCK )
-        {
-            rc = -EAGAIN;
-            goto out;
-        }
-
-        if ( signal_pending(current) )
-        {
-            rc = -ERESTARTSYS;
-            goto out;
-        }
-
-        schedule();
-    }
-
-    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-    if ( ((c ^ p) & RING_SIZE) != 0 )
-    {
-        bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
-        bytes2 = RING_MASK(p) * sizeof(u16);
-    }
-    else
-    {
-        bytes1 = (p - c) * sizeof(u16);
-        bytes2 = 0;
-    }
-
-    /* Truncate chunks according to caller's maximum byte count. */
-    if ( bytes1 > count )
-    {
-        bytes1 = count;
-        bytes2 = 0;
-    }
-    else if ( (bytes1 + bytes2) > count )
-    {
-        bytes2 = count - bytes1;
-    }
-
-    if ( copy_to_user(buf, &ring[RING_MASK(c)], bytes1) ||
-         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &ring[0], bytes2)) )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
-
-    ring_cons += (bytes1 + bytes2) / sizeof(u16);
-
-    rc = bytes1 + bytes2;
-
- out:
-    __set_current_state(TASK_RUNNING);
-    remove_wait_queue(&evtchn_wait, &wait);
-    return rc;
-}
-
-static ssize_t evtchn_write(struct file *file, const char *buf,
-                            size_t count, loff_t *ppos)
-{
-    int  rc, i;
-    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
-
-    if ( kbuf == NULL )
-        return -ENOMEM;
-
-    count &= ~1; /* even number of bytes */
-
-    if ( count == 0 )
-    {
-        rc = 0;
-        goto out;
-    }
-
-    if ( count > PAGE_SIZE )
-        count = PAGE_SIZE;
-
-    if ( copy_from_user(kbuf, buf, count) != 0 )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
-
-    spin_lock_irq(&lock);
-    for ( i = 0; i < (count/2); i++ )
-        if ( test_bit(kbuf[i], (unsigned long *)&bound_ports[0]) )
-            unmask_evtchn(kbuf[i]);
-    spin_unlock_irq(&lock);
-
-    rc = count;
-
- out:
-    free_page((unsigned long)kbuf);
-    return rc;
-}
-
-static int evtchn_ioctl(struct inode *inode, struct file *file,
-                        unsigned int cmd, unsigned long arg)
-{
-    int rc = 0;
-    
-    spin_lock_irq(&lock);
-    
-    switch ( cmd )
-    {
-    case EVTCHN_RESET:
-        __evtchn_reset_buffer_ring();
-        break;
-    case EVTCHN_BIND:
-        if ( !test_and_set_bit(arg, (unsigned long *)&bound_ports[0]) )
-            unmask_evtchn(arg);
-        else
-            rc = -EINVAL;
-        break;
-    case EVTCHN_UNBIND:
-        if ( test_and_clear_bit(arg, (unsigned long *)&bound_ports[0]) )
-            mask_evtchn(arg);
-        else
-            rc = -EINVAL;
-        break;
-    default:
-        rc = -ENOSYS;
-        break;
-    }
-
-    spin_unlock_irq(&lock);   
-
-    return rc;
-}
-
-static unsigned int evtchn_poll(struct file *file, poll_table *wait)
-{
-    unsigned int mask = POLLOUT | POLLWRNORM;
-    poll_wait(file, &evtchn_wait, wait);
-    if ( ring_cons != ring_prod )
-        mask |= POLLIN | POLLRDNORM;
-    if ( ring_overflow )
-        mask = POLLERR;
-    return mask;
-}
-
-static int evtchn_fasync(int fd, struct file *filp, int on)
-{
-    return fasync_helper(fd, filp, on, &evtchn_async_queue);
-}
-
-static int evtchn_open(struct inode *inode, struct file *filp)
-{
-    u16 *_ring;
-
-    if ( test_and_set_bit(0, &evtchn_dev_inuse) )
-        return -EBUSY;
-
-    /* Allocate outside locked region so that we can use GFP_KERNEL. */
-    if ( (_ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
-        return -ENOMEM;
-
-    spin_lock_irq(&lock);
-    ring = _ring;
-    __evtchn_reset_buffer_ring();
-    spin_unlock_irq(&lock);
-
-    MOD_INC_USE_COUNT;
-
-    return 0;
-}
-
-static int evtchn_release(struct inode *inode, struct file *filp)
-{
-    int i;
-
-    spin_lock_irq(&lock);
-    if ( ring != NULL )
-    {
-        free_page((unsigned long)ring);
-        ring = NULL;
-    }
-    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
-        if ( test_and_clear_bit(i, (unsigned long *)&bound_ports[0]) )
-            mask_evtchn(i);
-    spin_unlock_irq(&lock);
-
-    evtchn_dev_inuse = 0;
-
-    MOD_DEC_USE_COUNT;
-
-    return 0;
-}
-
-static struct file_operations evtchn_fops = {
-    owner:    THIS_MODULE,
-    read:     evtchn_read,
-    write:    evtchn_write,
-    ioctl:    evtchn_ioctl,
-    poll:     evtchn_poll,
-    fasync:   evtchn_fasync,
-    open:     evtchn_open,
-    release:  evtchn_release
-};
-
-static struct miscdevice evtchn_miscdev = {
-    .minor        = EVTCHN_MINOR,
-    .name         = "evtchn",
-    .fops         = &evtchn_fops,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    .devfs_name   = "misc/evtchn",
-#endif
-};
-
-static int __init evtchn_init(void)
-{
-#ifdef OLD_DEVFS
-    devfs_handle_t symlink_handle;
-    int            pos;
-    char           link_dest[64];
-#endif
-    int err;
-
-    /* (DEVFS) create '/dev/misc/evtchn'. */
-    err = misc_register(&evtchn_miscdev);
-    if ( err != 0 )
-    {
-        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-        return err;
-    }
-
-#ifdef OLD_DEVFS
-    /* (DEVFS) create directory '/dev/xen'. */
-    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
-
-    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
-    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
-                              &link_dest[3], 
-                              sizeof(link_dest) - 3);
-    if ( pos >= 0 )
-        strncpy(&link_dest[pos], "../", 3);
-
-    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
-    (void)devfs_mk_symlink(xen_dev_dir, 
-                           "evtchn", 
-                           DEVFS_FL_DEFAULT, 
-                           &link_dest[pos],
-                           &symlink_handle, 
-                           NULL);
-
-    /* (DEVFS) automatically destroy the symlink with its destination. */
-    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
-#endif
-
-    printk("Event-channel device installed.\n");
-
-    return 0;
-}
-
-static void evtchn_cleanup(void)
-{
-    misc_deregister(&evtchn_miscdev);
-}
-
-module_init(evtchn_init);
-module_exit(evtchn_cleanup);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/Makefile
deleted file mode 100644 (file)
index 3279442..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y  := netback.o control.o interface.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/common.h b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/common.h
deleted file mode 100644 (file)
index 6646f53..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/common.h
- */
-
-#ifndef __NETIF__BACKEND__COMMON_H__
-#define __NETIF__BACKEND__COMMON_H__
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor-ifs/io/netif.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-
-#if 0
-#define ASSERT(_p) \
-    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
-    __LINE__, __FILE__); *(int*)0=0; }
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
-                           __FILE__ , __LINE__ , ## _a )
-#else
-#define ASSERT(_p) ((void)0)
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-typedef struct netif_st {
-    /* Unique identifier for this interface. */
-    domid_t          domid;
-    unsigned int     handle;
-
-    /* Physical parameters of the comms window. */
-    unsigned long    tx_shmem_frame;
-    unsigned long    rx_shmem_frame;
-    unsigned int     evtchn;
-    int              irq;
-
-    /* The shared rings and indexes. */
-    netif_tx_interface_t *tx;
-    netif_rx_interface_t *rx;
-
-    /* Private indexes into shared ring. */
-    NETIF_RING_IDX rx_req_cons;
-    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
-    NETIF_RING_IDX tx_req_cons;
-    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
-
-    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-    unsigned long   credit_bytes;
-    unsigned long   credit_usec;
-    unsigned long   remaining_credit;
-    struct timer_list credit_timeout;
-
-    /* Miscellaneous private stuff. */
-    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-    /*
-     * DISCONNECT response is deferred until pending requests are ack'ed.
-     * We therefore need to store the id from the original request.
-     */
-    u8               disconnect_rspid;
-    struct netif_st *hash_next;
-    struct list_head list;  /* scheduling list */
-    atomic_t         refcnt;
-    spinlock_t       rx_lock, tx_lock;
-    struct net_device *dev;
-    struct net_device_stats stats;
-
-    struct work_struct work;
-} netif_t;
-
-void netif_create(netif_be_create_t *create);
-void netif_destroy(netif_be_destroy_t *destroy);
-void netif_connect(netif_be_connect_t *connect);
-int  netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
-void netif_disconnect_complete(netif_t *netif);
-netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
-#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define netif_put(_b)                             \
-    do {                                          \
-        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-            netif_disconnect_complete(_b);        \
-    } while (0)
-
-void netif_interface_init(void);
-void netif_ctrlif_init(void);
-
-void netif_deschedule(netif_t *netif);
-
-int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-
-#endif /* __NETIF__BACKEND__COMMON_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/control.c b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/control.c
deleted file mode 100644 (file)
index 6ca0ba2..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/control.c
- * 
- * Routines for interfacing with the control plane.
- * 
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    switch ( msg->subtype )
-    {
-    case CMSG_NETIF_BE_CREATE:
-        if ( msg->length != sizeof(netif_be_create_t) )
-            goto parse_error;
-        netif_create((netif_be_create_t *)&msg->msg[0]);
-        break;        
-    case CMSG_NETIF_BE_DESTROY:
-        if ( msg->length != sizeof(netif_be_destroy_t) )
-            goto parse_error;
-        netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
-        break;        
-    case CMSG_NETIF_BE_CONNECT:
-        if ( msg->length != sizeof(netif_be_connect_t) )
-            goto parse_error;
-        netif_connect((netif_be_connect_t *)&msg->msg[0]);
-        break;        
-    case CMSG_NETIF_BE_DISCONNECT:
-        if ( msg->length != sizeof(netif_be_disconnect_t) )
-            goto parse_error;
-        if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
-            return; /* Sending the response is deferred until later. */
-        break;        
-    default:
-        goto parse_error;
-    }
-
-    ctrl_if_send_response(msg);
-    return;
-
- parse_error:
-    DPRINTK("Parse error while reading message subtype %d, len %d\n",
-            msg->subtype, msg->length);
-    msg->length = 0;
-    ctrl_if_send_response(msg);
-}
-
-void netif_ctrlif_init(void)
-{
-    ctrl_msg_t cmsg;
-    netif_be_driver_status_t st;
-
-    (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx,
-                                    CALLBACK_IN_BLOCKING_CONTEXT);
-
-    /* Send a driver-UP notification to the domain controller. */
-    cmsg.type      = CMSG_NETIF_BE;
-    cmsg.subtype   = CMSG_NETIF_BE_DRIVER_STATUS;
-    cmsg.length    = sizeof(netif_be_driver_status_t);
-    st.status      = NETIF_DRIVER_STATUS_UP;
-    memcpy(cmsg.msg, &st, sizeof(st));
-    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/interface.c b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/interface.c
deleted file mode 100644 (file)
index 406d253..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/interface.c
- * 
- * Network-device interface management.
- * 
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-#include <linux/rtnetlink.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#endif
-
-#define NETIF_HASHSZ 1024
-#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
-
-static netif_t *netif_hash[NETIF_HASHSZ];
-
-netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
-{
-    netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
-    while ( (netif != NULL) && 
-            ((netif->domid != domid) || (netif->handle != handle)) )
-        netif = netif->hash_next;
-    return netif;
-}
-
-static void __netif_disconnect_complete(void *arg)
-{
-    netif_t              *netif = (netif_t *)arg;
-    ctrl_msg_t            cmsg;
-    netif_be_disconnect_t disc;
-
-    /*
-     * These can't be done in netif_disconnect() because at that point there
-     * may be outstanding requests at the disc whose asynchronous responses
-     * must still be notified to the remote driver.
-     */
-    unbind_evtchn_from_irq(netif->evtchn);
-    vfree(netif->tx); /* Frees netif->rx as well. */
-    rtnl_lock();
-    (void)dev_close(netif->dev);
-    rtnl_unlock();
-
-    /* Construct the deferred response message. */
-    cmsg.type         = CMSG_NETIF_BE;
-    cmsg.subtype      = CMSG_NETIF_BE_DISCONNECT;
-    cmsg.id           = netif->disconnect_rspid;
-    cmsg.length       = sizeof(netif_be_disconnect_t);
-    disc.domid        = netif->domid;
-    disc.netif_handle = netif->handle;
-    disc.status       = NETIF_BE_STATUS_OKAY;
-    memcpy(cmsg.msg, &disc, sizeof(disc));
-
-    /*
-     * Make sure message is constructed /before/ status change, because
-     * after the status change the 'netif' structure could be deallocated at
-     * any time. Also make sure we send the response /after/ status change,
-     * as otherwise a subsequent CONNECT request could spuriously fail if
-     * another CPU doesn't see the status change yet.
-     */
-    mb();
-    if ( netif->status != DISCONNECTING )
-        BUG();
-    netif->status = DISCONNECTED;
-    mb();
-
-    /* Send the successful response. */
-    ctrl_if_send_response(&cmsg);
-}
-
-void netif_disconnect_complete(netif_t *netif)
-{
-    INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif);
-    schedule_work(&netif->work);
-}
-
-void netif_create(netif_be_create_t *create)
-{
-    int                err = 0;
-    domid_t            domid  = create->domid;
-    unsigned int       handle = create->netif_handle;
-    struct net_device *dev;
-    netif_t          **pnetif, *netif;
-    char               name[IFNAMSIZ] = {};
-
-    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-    if ( dev == NULL )
-    {
-        DPRINTK("Could not create netif: out of memory\n");
-        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    netif = dev->priv;
-    memset(netif, 0, sizeof(*netif));
-    netif->domid  = domid;
-    netif->handle = handle;
-    netif->status = DISCONNECTED;
-    spin_lock_init(&netif->rx_lock);
-    spin_lock_init(&netif->tx_lock);
-    atomic_set(&netif->refcnt, 0);
-    netif->dev = dev;
-
-    netif->credit_bytes = netif->remaining_credit = ~0UL;
-    netif->credit_usec  = 0UL;
-    /*init_ac_timer(&new_vif->credit_timeout);*/
-
-    pnetif = &netif_hash[NETIF_HASH(domid, handle)];
-    while ( *pnetif != NULL )
-    {
-        if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
-        {
-            DPRINTK("Could not create netif: already exists\n");
-            create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
-            kfree(dev);
-            return;
-        }
-        pnetif = &(*pnetif)->hash_next;
-    }
-
-    dev->hard_start_xmit = netif_be_start_xmit;
-    dev->get_stats       = netif_be_get_stats;
-    memcpy(dev->dev_addr, create->mac, ETH_ALEN);
-
-    /* Disable queuing. */
-    dev->tx_queue_len = 0;
-
-    /* Force a different MAC from remote end. */
-    dev->dev_addr[2] ^= 1;
-
-    if ( (err = register_netdev(dev)) != 0 )
-    {
-        DPRINTK("Could not register new net device %s: err=%d\n",
-                dev->name, err);
-        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
-        kfree(dev);
-        return;
-    }
-
-    netif->hash_next = *pnetif;
-    *pnetif = netif;
-
-    DPRINTK("Successfully created netif\n");
-    create->status = NETIF_BE_STATUS_OKAY;
-}
-
-void netif_destroy(netif_be_destroy_t *destroy)
-{
-    domid_t       domid  = destroy->domid;
-    unsigned int  handle = destroy->netif_handle;
-    netif_t     **pnetif, *netif;
-
-    pnetif = &netif_hash[NETIF_HASH(domid, handle)];
-    while ( (netif = *pnetif) != NULL )
-    {
-        if ( (netif->domid == domid) && (netif->handle == handle) )
-        {
-            if ( netif->status != DISCONNECTED )
-                goto still_connected;
-            goto destroy;
-        }
-        pnetif = &netif->hash_next;
-    }
-
-    destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
-    return;
-
- still_connected:
-    destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
-    return;
-
- destroy:
-    *pnetif = netif->hash_next;
-    unregister_netdev(netif->dev);
-    kfree(netif->dev);
-    destroy->status = NETIF_BE_STATUS_OKAY;
-}
-
-void netif_connect(netif_be_connect_t *connect)
-{
-    domid_t       domid  = connect->domid;
-    unsigned int  handle = connect->netif_handle;
-    unsigned int  evtchn = connect->evtchn;
-    unsigned long tx_shmem_frame = connect->tx_shmem_frame;
-    unsigned long rx_shmem_frame = connect->rx_shmem_frame;
-    struct vm_struct *vma;
-    pgprot_t      prot;
-    int           error;
-    netif_t      *netif;
-#if 0
-    struct net_device *eth0_dev;
-#endif
-
-    netif = netif_find_by_handle(domid, handle);
-    if ( unlikely(netif == NULL) )
-    {
-        DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n", 
-                connect->domid, connect->netif_handle); 
-        connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    if ( netif->status != DISCONNECTED )
-    {
-        connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
-        return;
-    }
-
-    if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
-    {
-        connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
-    error  = direct_remap_area_pages(&init_mm, 
-                                     VMALLOC_VMADDR(vma->addr),
-                                     tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
-                                     prot, domid);
-    error |= direct_remap_area_pages(&init_mm, 
-                                     VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
-                                     rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
-                                     prot, domid);
-    if ( error != 0 )
-    {
-        if ( error == -ENOMEM )
-            connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
-        else if ( error == -EFAULT )
-            connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
-        else
-            connect->status = NETIF_BE_STATUS_ERROR;
-        vfree(vma->addr);
-        return;
-    }
-
-    netif->evtchn         = evtchn;
-    netif->irq            = bind_evtchn_to_irq(evtchn);
-    netif->tx_shmem_frame = tx_shmem_frame;
-    netif->rx_shmem_frame = rx_shmem_frame;
-    netif->tx             = 
-        (netif_tx_interface_t *)vma->addr;
-    netif->rx             = 
-        (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
-    netif->status         = CONNECTED;
-    netif_get(netif);
-
-    netif->tx->resp_prod = netif->rx->resp_prod = 0;
-
-    rtnl_lock();
-    (void)dev_open(netif->dev);
-    rtnl_unlock();
-
-    (void)request_irq(netif->irq, netif_be_int, 0, netif->dev->name, netif);
-    netif_start_queue(netif->dev);
-
-    connect->status = NETIF_BE_STATUS_OKAY;
-}
-
-int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
-{
-    domid_t       domid  = disconnect->domid;
-    unsigned int  handle = disconnect->netif_handle;
-    netif_t      *netif;
-
-    netif = netif_find_by_handle(domid, handle);
-    if ( unlikely(netif == NULL) )
-    {
-        DPRINTK("netif_disconnect attempted for non-existent netif"
-                " (%u,%u)\n", disconnect->domid, disconnect->netif_handle); 
-        disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return 1; /* Caller will send response error message. */
-    }
-
-    if ( netif->status == CONNECTED )
-    {
-        netif->status = DISCONNECTING;
-        netif->disconnect_rspid = rsp_id;
-        wmb(); /* Let other CPUs see the status change. */
-        netif_stop_queue(netif->dev);
-        free_irq(netif->irq, netif);
-        netif_deschedule(netif);
-        netif_put(netif);
-        return 0; /* Caller should not send response message. */
-    }
-
-    disconnect->status = NETIF_BE_STATUS_OKAY;
-    return 1;
-}
-
-void netif_interface_init(void)
-{
-    memset(netif_hash, 0, sizeof(netif_hash));
-}
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c
deleted file mode 100644 (file)
index 143ccff..0000000
+++ /dev/null
@@ -1,823 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/main.c
- * 
- * Back-end of the driver for virtual block devices. This portion of the
- * driver exports a 'unified' block-device interface that can be accessed
- * by any operating system that implements a compatible front end. A 
- * reference front-end implementation can be found in:
- *  arch/xen/drivers/netif/frontend
- * 
- * Copyright (c) 2002-2004, K A Fraser
- */
-
-#include "common.h"
-
-static void netif_page_release(struct page *page);
-static void netif_skb_release(struct sk_buff *skb);
-static void make_tx_response(netif_t *netif, 
-                             u16      id,
-                             s8       st);
-static int  make_rx_response(netif_t *netif, 
-                             u16      id, 
-                             s8       st,
-                             memory_t addr,
-                             u16      size);
-
-static void net_tx_action(unsigned long unused);
-static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-
-static void net_rx_action(unsigned long unused);
-static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-
-static struct sk_buff_head rx_queue;
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
-static unsigned char rx_notify[NR_EVENT_CHANNELS];
-
-/* Don't currently gate addition of an interface to the tx scheduling list. */
-#define tx_work_exists(_if) (1)
-
-#define MAX_PENDING_REQS 256
-static unsigned long mmap_vstart;
-#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
-
-#define PKT_MIN_LEN (ETH_HLEN + 20)
-#define PKT_PROT_LEN 64
-
-static struct {
-    netif_tx_request_t req;
-    netif_t *netif;
-} pending_tx_info[MAX_PENDING_REQS];
-static u16 pending_ring[MAX_PENDING_REQS];
-typedef unsigned int PEND_RING_IDX;
-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-
-/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-static u16 dealloc_ring[MAX_PENDING_REQS];
-static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED;
-static PEND_RING_IDX dealloc_prod, dealloc_cons;
-
-static struct sk_buff_head tx_queue;
-static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
-
-static struct list_head net_schedule_list;
-static spinlock_t net_schedule_list_lock;
-
-#define MAX_MFN_ALLOC 64
-static unsigned long mfn_list[MAX_MFN_ALLOC];
-static unsigned int alloc_index = 0;
-static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
-
-static void __refresh_mfn_list(void)
-{
-    int ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
-                                    mfn_list, MAX_MFN_ALLOC, 0);
-    if ( unlikely(ret != MAX_MFN_ALLOC) )
-        BUG();
-    alloc_index = MAX_MFN_ALLOC;
-}
-
-static unsigned long get_new_mfn(void)
-{
-    unsigned long mfn, flags;
-    spin_lock_irqsave(&mfn_lock, flags);
-    if ( alloc_index == 0 )
-        __refresh_mfn_list();
-    mfn = mfn_list[--alloc_index];
-    spin_unlock_irqrestore(&mfn_lock, flags);
-    return mfn;
-}
-
-static void dealloc_mfn(unsigned long mfn)
-{
-    unsigned long flags;
-    spin_lock_irqsave(&mfn_lock, flags);
-    if ( alloc_index != MAX_MFN_ALLOC )
-        mfn_list[alloc_index++] = mfn;
-    else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
-                                    &mfn, 1, 0) != 1 )
-        BUG();
-    spin_unlock_irqrestore(&mfn_lock, flags);
-}
-
-static inline void maybe_schedule_tx_action(void)
-{
-    smp_mb();
-    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-         !list_empty(&net_schedule_list) )
-        tasklet_schedule(&net_tx_tasklet);
-}
-
-/*
- * A gross way of confirming the origin of an skb data page. The slab
- * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
- */
-static inline int is_xen_skb(struct sk_buff *skb)
-{
-    extern kmem_cache_t *skbuff_cachep;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
-#else
-    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
-#endif
-    return (cp == skbuff_cachep);
-}
-
-int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-    netif_t *netif = (netif_t *)dev->priv;
-
-    ASSERT(skb->dev == dev);
-
-    /* Drop the packet if the target domain has no receive buffers. */
-    if ( (netif->rx_req_cons == netif->rx->req_prod) ||
-         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
-        goto drop;
-
-    /*
-     * We do not copy the packet unless:
-     *  1. The data is shared; or
-     *  2. The data is not allocated from our special cache.
-     * NB. We also couldn't cope with fragmented packets, but we won't get
-     *     any because we not advertise the NETIF_F_SG feature.
-     */
-    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
-    {
-        int hlen = skb->data - skb->head;
-        struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
-        if ( unlikely(nskb == NULL) )
-            goto drop;
-        skb_reserve(nskb, hlen);
-        __skb_put(nskb, skb->len);
-        (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
-        nskb->dev = skb->dev;
-        dev_kfree_skb(skb);
-        skb = nskb;
-    }
-
-    netif->rx_req_cons++;
-
-    skb_queue_tail(&rx_queue, skb);
-    tasklet_schedule(&net_rx_tasklet);
-
-    return 0;
-
- drop:
-    netif->stats.tx_dropped++;
-    dev_kfree_skb(skb);
-    return 0;
-}
-
-#if 0
-static void xen_network_done_notify(void)
-{
-    static struct net_device *eth0_dev = NULL;
-    if ( unlikely(eth0_dev == NULL) )
-        eth0_dev = __dev_get_by_name("eth0");
-    netif_rx_schedule(eth0_dev);
-}
-/* 
- * Add following to poll() function in NAPI driver (Tigon3 is example):
- *  if ( xen_network_done() )
- *      tg3_enable_ints(tp); 
- */
-int xen_network_done(void)
-{
-    return skb_queue_empty(&rx_queue);
-}
-#endif
-
-static void net_rx_action(unsigned long unused)
-{
-    netif_t *netif;
-    s8 status;
-    u16 size, id, evtchn;
-    mmu_update_t *mmu;
-    multicall_entry_t *mcl;
-    unsigned long vdata, mdata, new_mfn;
-    struct sk_buff_head rxq;
-    struct sk_buff *skb;
-    u16 notify_list[NETIF_RX_RING_SIZE];
-    int notify_nr = 0;
-
-    skb_queue_head_init(&rxq);
-
-    mcl = rx_mcl;
-    mmu = rx_mmu;
-    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
-    {
-        netif   = (netif_t *)skb->dev->priv;
-        vdata   = (unsigned long)skb->data;
-        mdata   = virt_to_machine(vdata);
-        new_mfn = get_new_mfn();
-        
-        /*
-         * Set the new P2M table entry before reassigning the old data page.
-         * Heed the comment in pgtable-2level.h:pte_page(). :-)
-         */
-        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
-        
-        mmu[0].ptr  = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-        mmu[0].val  = __pa(vdata) >> PAGE_SHIFT;  
-        mmu[1].ptr  = MMU_EXTENDED_COMMAND;
-        mmu[1].val  = MMUEXT_SET_FOREIGNDOM;      
-        mmu[1].val |= (unsigned long)netif->domid << 16;
-        mmu[2].ptr  = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
-        mmu[2].val  = MMUEXT_REASSIGN_PAGE;
-
-        mcl[0].op = __HYPERVISOR_update_va_mapping;
-        mcl[0].args[0] = vdata >> PAGE_SHIFT;
-        mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
-        mcl[0].args[2] = 0;
-        mcl[1].op = __HYPERVISOR_mmu_update;
-        mcl[1].args[0] = (unsigned long)mmu;
-        mcl[1].args[1] = 3;
-        mcl[1].args[2] = 0;
-
-        mcl += 2;
-        mmu += 3;
-
-        __skb_queue_tail(&rxq, skb);
-
-        /* Filled the batch queue? */
-        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
-            break;
-    }
-
-    if ( mcl == rx_mcl )
-        return;
-
-    mcl[-2].args[2] = UVMF_FLUSH_TLB;
-    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
-        BUG();
-
-    mcl = rx_mcl;
-    mmu = rx_mmu;
-    while ( (skb = __skb_dequeue(&rxq)) != NULL )
-    {
-        netif   = (netif_t *)skb->dev->priv;
-        size    = skb->tail - skb->data;
-
-        /* Rederive the machine addresses. */
-        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
-        mdata   = ((mmu[2].ptr & PAGE_MASK) |
-                   ((unsigned long)skb->data & ~PAGE_MASK));
-        
-        atomic_set(&(skb_shinfo(skb)->dataref), 1);
-        skb_shinfo(skb)->nr_frags = 0;
-        skb_shinfo(skb)->frag_list = NULL;
-
-        netif->stats.tx_bytes += size;
-        netif->stats.tx_packets++;
-
-        /* The update_va_mapping() must not fail. */
-        if ( unlikely(mcl[0].args[5] != 0) )
-            BUG();
-
-        /* Check the reassignment error code. */
-        status = NETIF_RSP_OKAY;
-        if ( unlikely(mcl[1].args[5] != 0) )
-        {
-            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
-            dealloc_mfn(mdata >> PAGE_SHIFT);
-            status = NETIF_RSP_ERROR;
-        }
-
-        evtchn = netif->evtchn;
-        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
-        if ( make_rx_response(netif, id, status, mdata, size) &&
-             (rx_notify[evtchn] == 0) )
-        {
-            rx_notify[evtchn] = 1;
-            notify_list[notify_nr++] = evtchn;
-        }
-
-        dev_kfree_skb(skb);
-
-        mcl += 2;
-        mmu += 3;
-    }
-
-    while ( notify_nr != 0 )
-    {
-        evtchn = notify_list[--notify_nr];
-        rx_notify[evtchn] = 0;
-        notify_via_evtchn(evtchn);
-    }
-
-    /* More work to do? */
-    if ( !skb_queue_empty(&rx_queue) )
-        tasklet_schedule(&net_rx_tasklet);
-#if 0
-    else
-        xen_network_done_notify();
-#endif
-}
-
-struct net_device_stats *netif_be_get_stats(struct net_device *dev)
-{
-    netif_t *netif = dev->priv;
-    return &netif->stats;
-}
-
-static int __on_net_schedule_list(netif_t *netif)
-{
-    return netif->list.next != NULL;
-}
-
-static void remove_from_net_schedule_list(netif_t *netif)
-{
-    spin_lock_irq(&net_schedule_list_lock);
-    if ( likely(__on_net_schedule_list(netif)) )
-    {
-        list_del(&netif->list);
-        netif->list.next = NULL;
-        netif_put(netif);
-    }
-    spin_unlock_irq(&net_schedule_list_lock);
-}
-
-static void add_to_net_schedule_list_tail(netif_t *netif)
-{
-    if ( __on_net_schedule_list(netif) )
-        return;
-
-    spin_lock_irq(&net_schedule_list_lock);
-    if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
-    {
-        list_add_tail(&netif->list, &net_schedule_list);
-        netif_get(netif);
-    }
-    spin_unlock_irq(&net_schedule_list_lock);
-}
-
-static inline void netif_schedule_work(netif_t *netif)
-{
-    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
-         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
-    {
-        add_to_net_schedule_list_tail(netif);
-        maybe_schedule_tx_action();
-    }
-}
-
-void netif_deschedule(netif_t *netif)
-{
-    remove_from_net_schedule_list(netif);
-}
-
-#if 0
-static void tx_credit_callback(unsigned long data)
-{
-    netif_t *netif = (netif_t *)data;
-    netif->remaining_credit = netif->credit_bytes;
-    netif_schedule_work(netif);
-}
-#endif
-
-static void net_tx_action(unsigned long unused)
-{
-    struct list_head *ent;
-    struct sk_buff *skb;
-    netif_t *netif;
-    netif_tx_request_t txreq;
-    u16 pending_idx;
-    NETIF_RING_IDX i;
-    multicall_entry_t *mcl;
-    PEND_RING_IDX dc, dp;
-    unsigned int data_len;
-
-    if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
-        goto skip_dealloc;
-
-    mcl = tx_mcl;
-    while ( dc != dp )
-    {
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-        mcl[0].op = __HYPERVISOR_update_va_mapping;
-        mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
-        mcl[0].args[1] = 0;
-        mcl[0].args[2] = 0;
-        mcl++;        
-    }
-
-    mcl[-1].args[2] = UVMF_FLUSH_TLB;
-    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
-        BUG();
-
-    mcl = tx_mcl;
-    while ( dealloc_cons != dp )
-    {
-        /* The update_va_mapping() must not fail. */
-        if ( unlikely(mcl[0].args[5] != 0) )
-            BUG();
-
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-
-        netif = pending_tx_info[pending_idx].netif;
-
-        spin_lock(&netif->tx_lock);
-        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
-                         NETIF_RSP_OKAY);
-        spin_unlock(&netif->tx_lock);
-        
-        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-
-        /*
-         * Scheduling checks must happen after the above response is posted.
-         * This avoids a possible race with a guest OS on another CPU if that
-         * guest is testing against 'resp_prod' when deciding whether to notify
-         * us when it queues additional packets.
-         */
-        mb();
-        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
-             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
-            add_to_net_schedule_list_tail(netif);
-        
-        netif_put(netif);
-
-        mcl++;
-    }
-
- skip_dealloc:
-    mcl = tx_mcl;
-    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-            !list_empty(&net_schedule_list) )
-    {
-        /* Get a netif from the list with work to do. */
-        ent = net_schedule_list.next;
-        netif = list_entry(ent, netif_t, list);
-        netif_get(netif);
-        remove_from_net_schedule_list(netif);
-
-        /* Work to do? */
-        i = netif->tx_req_cons;
-        if ( (i == netif->tx->req_prod) ||
-             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
-        {
-            netif_put(netif);
-            continue;
-        }
-
-        netif->tx->req_cons = ++netif->tx_req_cons;
-
-        /*
-         * 1. Ensure that we see the request when we copy it.
-         * 2. Ensure that frontend sees updated req_cons before we check
-         *    for more work to schedule.
-         */
-        mb();
-
-        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
-               sizeof(txreq));
-
-#if 0
-        /* Credit-based scheduling. */
-        if ( tx.size > netif->remaining_credit )
-        {
-            s_time_t now = NOW(), next_credit = 
-                netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
-            if ( next_credit <= now )
-            {
-                netif->credit_timeout.expires = now;
-                netif->remaining_credit = netif->credit_bytes;
-            }
-            else
-            {
-                netif->remaining_credit = 0;
-                netif->credit_timeout.expires  = next_credit;
-                netif->credit_timeout.data     = (unsigned long)netif;
-                netif->credit_timeout.function = tx_credit_callback;
-                netif->credit_timeout.cpu      = smp_processor_id();
-                add_ac_timer(&netif->credit_timeout);
-                break;
-            }
-        }
-        netif->remaining_credit -= tx.size;
-#endif
-
-        netif_schedule_work(netif);
-
-        if ( unlikely(txreq.size <= PKT_MIN_LEN) || 
-             unlikely(txreq.size > ETH_FRAME_LEN) )
-        {
-            DPRINTK("Bad packet size: %d\n", txreq.size);
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            continue; 
-        }
-
-        /* No crossing a page boundary as the payload mustn't fragment. */
-        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
-        {
-            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
-                    txreq.addr, txreq.size, 
-                    (txreq.addr &~PAGE_MASK) + txreq.size);
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            continue;
-        }
-
-        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-
-        data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
-
-        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
-        {
-            DPRINTK("Can't allocate a skb in start_xmit.\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            break;
-        }
-
-        /* Packets passed to netif_rx() must have some headroom. */
-        skb_reserve(skb, 16);
-
-        mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
-        mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
-        mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
-        mcl[0].args[2] = 0;
-        mcl[0].args[3] = netif->domid;
-        mcl++;
-
-        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
-        pending_tx_info[pending_idx].netif = netif;
-        *((u16 *)skb->data) = pending_idx;
-
-        __skb_queue_tail(&tx_queue, skb);
-
-        pending_cons++;
-
-        /* Filled the batch queue? */
-        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
-            break;
-    }
-
-    if ( mcl == tx_mcl )
-        return;
-
-    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
-        BUG();
-
-    mcl = tx_mcl;
-    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
-    {
-        pending_idx = *((u16 *)skb->data);
-        netif       = pending_tx_info[pending_idx].netif;
-        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
-
-        /* Check the remap error code. */
-        if ( unlikely(mcl[0].args[5] != 0) )
-        {
-            DPRINTK("Bad page frame\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            kfree_skb(skb);
-            mcl++;
-            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-            continue;
-        }
-
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
-            FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
-
-        data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
-
-        __skb_put(skb, data_len);
-        memcpy(skb->data, 
-               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
-               data_len);
-
-        if (data_len < txreq.size) {
-            /* Append the packet payload as a fragment. */
-            skb_shinfo(skb)->frags[0].page        = 
-                virt_to_page(MMAP_VADDR(pending_idx));
-            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
-            skb_shinfo(skb)->frags[0].page_offset = 
-                (txreq.addr + data_len) & ~PAGE_MASK;
-            skb_shinfo(skb)->nr_frags = 1;
-        } else {
-            skb_shinfo(skb)->frags[0].page        = 
-                virt_to_page(MMAP_VADDR(pending_idx));
-            skb->destructor = netif_skb_release;
-        }
-
-        skb->data_len  = txreq.size - data_len;
-        skb->len      += skb->data_len;
-
-        skb->dev      = netif->dev;
-        skb->protocol = eth_type_trans(skb, skb->dev);
-
-        netif->stats.rx_bytes += txreq.size;
-        netif->stats.rx_packets++;
-
-        netif_rx(skb);
-        netif->dev->last_rx = jiffies;
-
-        mcl++;
-    }
-}
-
-static void netif_idx_release(u16 pending_idx)
-{
-    unsigned long flags;
-
-    spin_lock_irqsave(&dealloc_lock, flags);
-    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
-    spin_unlock_irqrestore(&dealloc_lock, flags);
-
-    tasklet_schedule(&net_tx_tasklet);
-}
-
-static void netif_page_release(struct page *page)
-{
-    u16 pending_idx = page - virt_to_page(mmap_vstart);
-
-    /* Ready for next use. */
-    set_page_count(page, 1);
-
-    netif_idx_release(pending_idx);
-}
-
-static void netif_skb_release(struct sk_buff *skb)
-{
-    struct page *page = skb_shinfo(skb)->frags[0].page;
-    u16 pending_idx = page - virt_to_page(mmap_vstart);
-
-    netif_idx_release(pending_idx);
-}
-
-#if 0
-long flush_bufs_for_netif(netif_t *netif)
-{
-    NETIF_RING_IDX i;
-
-    /* Return any outstanding receive buffers to the guest OS. */
-    spin_lock(&netif->rx_lock);
-    for ( i = netif->rx_req_cons; 
-          (i != netif->rx->req_prod) &&
-              ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
-          i++ )
-    {
-        make_rx_response(netif,
-                         netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
-                         NETIF_RSP_DROPPED, 0, 0);
-    }
-    netif->rx_req_cons = i;
-    spin_unlock(&netif->rx_lock);
-
-    /*
-     * Flush pending transmit buffers. The guest may still have to wait for
-     * buffers that are queued at a physical NIC.
-     */
-    spin_lock(&netif->tx_lock);
-    for ( i = netif->tx_req_cons; 
-          (i != netif->tx->req_prod) &&
-              ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
-          i++ )
-    {
-        make_tx_response(netif,
-                         netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
-                         NETIF_RSP_DROPPED);
-    }
-    netif->tx_req_cons = i;
-    spin_unlock(&netif->tx_lock);
-
-    return 0;
-}
-#endif
-
-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-{
-    netif_t *netif = dev_id;
-    if ( tx_work_exists(netif) )
-    {
-        add_to_net_schedule_list_tail(netif);
-        maybe_schedule_tx_action();
-    }
-    return IRQ_HANDLED;
-}
-
-static void make_tx_response(netif_t *netif, 
-                             u16      id,
-                             s8       st)
-{
-    NETIF_RING_IDX i = netif->tx_resp_prod;
-    netif_tx_response_t *resp;
-
-    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
-    resp->id     = id;
-    resp->status = st;
-    wmb();
-    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
-
-    mb(); /* Update producer before checking event threshold. */
-    if ( i == netif->tx->event )
-        notify_via_evtchn(netif->evtchn);
-}
-
-static int make_rx_response(netif_t *netif, 
-                            u16      id, 
-                            s8       st,
-                            memory_t addr,
-                            u16      size)
-{
-    NETIF_RING_IDX i = netif->rx_resp_prod;
-    netif_rx_response_t *resp;
-
-    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-    resp->addr   = addr;
-    resp->id     = id;
-    resp->status = (s16)size;
-    if ( st < 0 )
-        resp->status = (s16)st;
-    wmb();
-    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
-
-    mb(); /* Update producer before checking event threshold. */
-    return (i == netif->rx->event);
-}
-
-static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
-{
-    struct list_head *ent;
-    netif_t *netif;
-    int i = 0;
-
-    printk(KERN_ALERT "netif_schedule_list:\n");
-    spin_lock_irq(&net_schedule_list_lock);
-
-    list_for_each ( ent, &net_schedule_list )
-    {
-        netif = list_entry(ent, netif_t, list);
-        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
-               i, netif->rx_req_cons, netif->rx_resp_prod);               
-        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-               netif->tx_req_cons, netif->tx_resp_prod);
-        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
-               netif->rx->req_prod, netif->rx->resp_prod);
-        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
-               netif->rx->event, netif->tx->req_prod);
-        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
-               netif->tx->resp_prod, netif->tx->event);
-        i++;
-    }
-
-    spin_unlock_irq(&net_schedule_list_lock);
-    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-
-    return IRQ_HANDLED;
-}
-
-static int __init netback_init(void)
-{
-    int i;
-    struct page *page;
-
-    if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
-        !(xen_start_info.flags & SIF_INITDOMAIN) )
-        return 0;
-
-    printk("Initialising Xen netif backend\n");
-
-    skb_queue_head_init(&rx_queue);
-    skb_queue_head_init(&tx_queue);
-
-    netif_interface_init();
-
-    if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
-        BUG();
-
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-    {
-        page = virt_to_page(MMAP_VADDR(i));
-        set_page_count(page, 1);
-        SetPageForeign(page, netif_page_release);
-    }
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
-
-    spin_lock_init(&net_schedule_list_lock);
-    INIT_LIST_HEAD(&net_schedule_list);
-
-    netif_ctrlif_init();
-
-    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
-                      netif_be_dbg, SA_SHIRQ, 
-                      "net-be-dbg", &netif_be_dbg);
-
-    return 0;
-}
-
-static void netback_cleanup(void)
-{
-    BUG();
-}
-
-module_init(netback_init);
-module_exit(netback_cleanup);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Kconfig b/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Kconfig
deleted file mode 100644 (file)
index 334e6c3..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-
-config XENNET
-       tristate "Xen network driver"
-       depends on NETDEVICES && ARCH_XEN
-       help
-         Network driver for Xen
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/Makefile
deleted file mode 100644 (file)
index 7eb07a2..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y  := netfront.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c
deleted file mode 100644 (file)
index 831572b..0000000
+++ /dev/null
@@ -1,1341 +0,0 @@
-/******************************************************************************
- * Virtual network driver for conversing with remote driver backends.
- * 
- * Copyright (c) 2002-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <net/sock.h>
-#include <net/pkt_sched.h>
-#include <asm/io.h>
-#include <asm-xen/evtchn.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor-ifs/io/netif.h>
-#include <asm/page.h>
-
-#include <net/arp.h>
-#include <net/route.h>
-
-#define DEBUG 0
-
-#ifndef __GFP_NOWARN
-#define __GFP_NOWARN 0
-#endif
-#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
-
-#define init_skb_shinfo(_skb)                         \
-    do {                                              \
-        atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
-        skb_shinfo(_skb)->nr_frags = 0;               \
-        skb_shinfo(_skb)->frag_list = NULL;           \
-    } while ( 0 )
-
-/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
-#define RX_HEADROOM 200
-
-/*
- * If the backend driver is pipelining transmit requests then we can be very
- * aggressive in avoiding new-packet notifications -- only need to send a
- * notification if there are no outstanding unreceived responses.
- * If the backend may be buffering our transmit buffers for any reason then we
- * are rather more conservative.
- */
-#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
-#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
-#else
-#define TX_TEST_IDX req_cons  /* conservative: not seen all our requests? */
-#endif
-
-static void network_tx_buf_gc(struct net_device *dev);
-static void network_alloc_rx_buffers(struct net_device *dev);
-
-static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
-
-static struct list_head dev_list;
-
-struct net_private
-{
-    struct list_head list;
-    struct net_device *dev;
-
-    struct net_device_stats stats;
-    NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
-    unsigned int tx_full;
-    
-    netif_tx_interface_t *tx;
-    netif_rx_interface_t *rx;
-
-    spinlock_t   tx_lock;
-    spinlock_t   rx_lock;
-
-    unsigned int handle;
-    unsigned int evtchn;
-    unsigned int irq;
-
-    /* What is the status of our connection to the remote backend? */
-#define BEST_CLOSED       0
-#define BEST_DISCONNECTED 1
-#define BEST_CONNECTED    2
-    unsigned int backend_state;
-
-    /* Is this interface open or closed (down or up)? */
-#define UST_CLOSED        0
-#define UST_OPEN          1
-    unsigned int user_state;
-
-    /* Receive-ring batched refills. */
-#define RX_MIN_TARGET 8
-#define RX_MAX_TARGET NETIF_RX_RING_SIZE
-    int rx_target;
-    struct sk_buff_head rx_batch;
-
-    /*
-     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
-     * array is an index into a chain of free entries.
-     */
-    struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
-    struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
-};
-
-/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
-#define ADD_ID_TO_FREELIST(_list, _id)             \
-    (_list)[(_id)] = (_list)[0];                   \
-    (_list)[0]     = (void *)(unsigned long)(_id);
-#define GET_ID_FROM_FREELIST(_list)                \
- ({ unsigned long _id = (unsigned long)(_list)[0]; \
-    (_list)[0]  = (_list)[_id];                    \
-    (unsigned short)_id; })
-
-static char *status_name[] = {
-    [NETIF_INTERFACE_STATUS_CLOSED]       = "closed",
-    [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
-    [NETIF_INTERFACE_STATUS_CONNECTED]    = "connected",
-    [NETIF_INTERFACE_STATUS_CHANGED]      = "changed",
-};
-
-static char *be_state_name[] = {
-    [BEST_CLOSED]       = "closed",
-    [BEST_DISCONNECTED] = "disconnected",
-    [BEST_CONNECTED]    = "connected",
-};
-
-#if DEBUG
-#define DPRINTK(fmt, args...) \
-    printk(KERN_ALERT "[XEN] (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-#else
-#define DPRINTK(fmt, args...) ((void)0)
-#endif
-#define IPRINTK(fmt, args...) \
-    printk(KERN_INFO "[XEN] " fmt, ##args)
-#define WPRINTK(fmt, args...) \
-    printk(KERN_WARNING "[XEN] " fmt, ##args)
-
-static struct net_device *find_dev_by_handle(unsigned int handle)
-{
-    struct list_head *ent;
-    struct net_private *np;
-    list_for_each ( ent, &dev_list )
-    {
-        np = list_entry(ent, struct net_private, list);
-        if ( np->handle == handle )
-            return np->dev;
-    }
-    return NULL;
-}
-
-/** Network interface info. */
-struct netif_ctrl {
-    /** Number of interfaces. */
-    int interface_n;
-    /** Number of connected interfaces. */
-    int connected_n;
-    /** Error code. */
-    int err;
-    int up;
-};
-
-static struct netif_ctrl netctrl;
-
-static void netctrl_init(void)
-{
-    memset(&netctrl, 0, sizeof(netctrl));
-    netctrl.up = NETIF_DRIVER_STATUS_DOWN;
-}
-
-/** Get or set a network interface error.
- */
-static int netctrl_err(int err)
-{
-    if ( (err < 0) && !netctrl.err )
-        netctrl.err = err;
-    return netctrl.err;
-}
-
-/** Test if all network interfaces are connected.
- *
- * @return 1 if all connected, 0 if not, negative error code otherwise
- */
-static int netctrl_connected(void)
-{
-    int ok;
-
-    if ( netctrl.err )
-        ok = netctrl.err;
-    else if ( netctrl.up == NETIF_DRIVER_STATUS_UP )
-        ok = (netctrl.connected_n == netctrl.interface_n);
-    else
-        ok = 0;
-
-    return ok;
-}
-
-/** Count the connected network interfaces.
- *
- * @return connected count
- */
-static int netctrl_connected_count(void)
-{
-    
-    struct list_head *ent;
-    struct net_private *np;
-    unsigned int connected;
-
-    connected = 0;
-    
-    list_for_each(ent, &dev_list) {
-        np = list_entry(ent, struct net_private, list);
-        if (np->backend_state == BEST_CONNECTED)
-            connected++;
-    }
-
-    netctrl.connected_n = connected;
-    DPRINTK("> connected_n=%d interface_n=%d\n",
-            netctrl.connected_n, netctrl.interface_n);
-    return connected;
-}
-
-/** Send a packet on a net device to encourage switches to learn the
- * MAC. We send a fake ARP request.
- *
- * @param dev device
- * @return 0 on success, error code otherwise
- */
-static int vif_wake(struct net_device *dev)
-{
-    struct sk_buff *skb;
-    u32             src_ip, dst_ip;
-
-    dst_ip = INADDR_BROADCAST;
-    src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-
-    skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
-                     dst_ip, dev, src_ip,
-                     /*dst_hw*/ NULL, /*src_hw*/ NULL, 
-                     /*target_hw*/ dev->dev_addr);
-    if ( skb == NULL )
-        return -ENOMEM;
-
-    return dev_queue_xmit(skb);
-}
-
-static int network_open(struct net_device *dev)
-{
-    struct net_private *np = dev->priv;
-
-    memset(&np->stats, 0, sizeof(np->stats));
-
-    np->user_state = UST_OPEN;
-
-    network_alloc_rx_buffers(dev);
-    np->rx->event = np->rx_resp_cons + 1;
-
-    netif_start_queue(dev);
-
-    return 0;
-}
-
-static void network_tx_buf_gc(struct net_device *dev)
-{
-    NETIF_RING_IDX i, prod;
-    unsigned short id;
-    struct net_private *np = dev->priv;
-    struct sk_buff *skb;
-
-    if ( np->backend_state != BEST_CONNECTED )
-        return;
-
-    do {
-        prod = np->tx->resp_prod;
-        rmb(); /* Ensure we see responses up to 'rp'. */
-
-        for ( i = np->tx_resp_cons; i != prod; i++ )
-        {
-            id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
-            skb = np->tx_skbs[id];
-            ADD_ID_TO_FREELIST(np->tx_skbs, id);
-            dev_kfree_skb_irq(skb);
-        }
-        
-        np->tx_resp_cons = prod;
-        
-        /*
-         * Set a new event, then check for race with update of tx_cons. Note
-         * that it is essential to schedule a callback, no matter how few
-         * buffers are pending. Even if there is space in the transmit ring,
-         * higher layers may be blocked because too much data is outstanding:
-         * in such cases notification from Xen is likely to be the only kick
-         * that we'll get.
-         */
-        np->tx->event = 
-            prod + ((np->tx->req_prod - prod) >> 1) + 1;
-        mb();
-    }
-    while ( prod != np->tx->resp_prod );
-
-    if ( np->tx_full && 
-         ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
-    {
-        np->tx_full = 0;
-        if ( np->user_state == UST_OPEN )
-            netif_wake_queue(dev);
-    }
-}
-
-
-static void network_alloc_rx_buffers(struct net_device *dev)
-{
-    unsigned short id;
-    struct net_private *np = dev->priv;
-    struct sk_buff *skb;
-    int i, batch_target;
-    NETIF_RING_IDX req_prod = np->rx->req_prod;
-
-    if ( unlikely(np->backend_state != BEST_CONNECTED) )
-        return;
-
-    /*
-     * Allocate skbuffs greedily, even though we batch updates to the
-     * receive ring. This creates a less bursty demand on the memory allocator,
-     * so should reduce the chance of failed allocation requests both for
-     * ourself and for other kernel subsystems.
-     */
-    batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
-    for ( i = skb_queue_len(&np->rx_batch); i < batch_target; i++ )
-    {
-        if ( unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL) )
-            break;
-        __skb_queue_tail(&np->rx_batch, skb);
-    }
-
-    /* Is the batch large enough to be worthwhile? */
-    if ( i < (np->rx_target/2)  )
-        return;
-
-    for ( i = 0; ; i++ )
-    {
-        if ( (skb = __skb_dequeue(&np->rx_batch)) == NULL )
-            break;
-
-        skb->dev = dev;
-
-        id = GET_ID_FROM_FREELIST(np->rx_skbs);
-
-        np->rx_skbs[id] = skb;
-        
-        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
-        
-        rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
-
-       /* Remove this page from pseudo phys map before passing back to Xen. */
-       phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] 
-           = INVALID_P2M_ENTRY;
-
-        rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
-        rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
-        rx_mcl[i].args[1] = 0;
-        rx_mcl[i].args[2] = 0;
-    }
-
-    /*
-     * We may have allocated buffers which have entries outstanding in the page
-     * update queue -- make sure we flush those first!
-     */
-    flush_page_update_queue();
-
-    /* After all PTEs have been zapped we blow away stale TLB entries. */
-    rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
-
-    /* Give away a batch of pages. */
-    rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
-    rx_mcl[i].args[0] = MEMOP_decrease_reservation;
-    rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
-    rx_mcl[i].args[2] = (unsigned long)i;
-    rx_mcl[i].args[3] = 0;
-    rx_mcl[i].args[4] = DOMID_SELF;
-
-    /* Zap PTEs and give away pages in one big multicall. */
-    (void)HYPERVISOR_multicall(rx_mcl, i+1);
-
-    /* Check return status of HYPERVISOR_dom_mem_op(). */
-    if ( unlikely(rx_mcl[i].args[5] != i) )
-        panic("Unable to reduce memory reservation\n");
-
-    /* Above is a suitable barrier to ensure backend will see requests. */
-    np->rx->req_prod = req_prod + i;
-
-    /* Adjust our floating fill target if we risked running out of buffers. */
-    if ( ((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
-         ((np->rx_target *= 2) > RX_MAX_TARGET) )
-        np->rx_target = RX_MAX_TARGET;
-}
-
-
-static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-    unsigned short id;
-    struct net_private *np = (struct net_private *)dev->priv;
-    netif_tx_request_t *tx;
-    NETIF_RING_IDX i;
-
-    if ( unlikely(np->tx_full) )
-    {
-        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
-        netif_stop_queue(dev);
-        goto drop;
-    }
-
-    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
-                  PAGE_SIZE) )
-    {
-        struct sk_buff *nskb;
-        if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) )
-            goto drop;
-        skb_put(nskb, skb->len);
-        memcpy(nskb->data, skb->data, skb->len);
-        nskb->dev = skb->dev;
-        dev_kfree_skb(skb);
-        skb = nskb;
-    }
-    
-    spin_lock_irq(&np->tx_lock);
-
-    if ( np->backend_state != BEST_CONNECTED )
-    {
-        spin_unlock_irq(&np->tx_lock);
-        goto drop;
-    }
-
-    i = np->tx->req_prod;
-
-    id = GET_ID_FROM_FREELIST(np->tx_skbs);
-    np->tx_skbs[id] = skb;
-
-    tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
-
-    tx->id   = id;
-    tx->addr = virt_to_machine(skb->data);
-    tx->size = skb->len;
-
-    wmb(); /* Ensure that backend will see the request. */
-    np->tx->req_prod = i + 1;
-
-    network_tx_buf_gc(dev);
-
-    if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
-    {
-        np->tx_full = 1;
-        netif_stop_queue(dev);
-    }
-
-    spin_unlock_irq(&np->tx_lock);
-
-    np->stats.tx_bytes += skb->len;
-    np->stats.tx_packets++;
-
-    /* Only notify Xen if we really have to. */
-    mb();
-    if ( np->tx->TX_TEST_IDX == i )
-        notify_via_evtchn(np->evtchn);
-
-    return 0;
-
- drop:
-    np->stats.tx_dropped++;
-    dev_kfree_skb(skb);
-    return 0;
-}
-
-
-static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
-    struct net_device *dev = dev_id;
-    struct net_private *np = dev->priv;
-    unsigned long flags;
-
-    spin_lock_irqsave(&np->tx_lock, flags);
-    network_tx_buf_gc(dev);
-    spin_unlock_irqrestore(&np->tx_lock, flags);
-
-    if ( (np->rx_resp_cons != np->rx->resp_prod) &&
-         (np->user_state == UST_OPEN) )
-        netif_rx_schedule(dev);
-
-    return IRQ_HANDLED;
-}
-
-
-static int netif_poll(struct net_device *dev, int *pbudget)
-{
-    struct net_private *np = dev->priv;
-    struct sk_buff *skb, *nskb;
-    netif_rx_response_t *rx;
-    NETIF_RING_IDX i, rp;
-    mmu_update_t *mmu = rx_mmu;
-    multicall_entry_t *mcl = rx_mcl;
-    int work_done, budget, more_to_do = 1;
-    struct sk_buff_head rxq;
-    unsigned long flags;
-
-    spin_lock(&np->rx_lock);
-
-    if ( np->backend_state != BEST_CONNECTED )
-    {
-        spin_unlock(&np->rx_lock);
-        return 0;
-    }
-
-    skb_queue_head_init(&rxq);
-
-    if ( (budget = *pbudget) > dev->quota )
-        budget = dev->quota;
-
-    rp = np->rx->resp_prod;
-    rmb(); /* Ensure we see queued responses up to 'rp'. */
-
-    for ( i = np->rx_resp_cons, work_done = 0; 
-          (i != rp) && (work_done < budget); 
-          i++, work_done++ )
-    {
-        rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-
-        /*
-         * An error here is very odd. Usually indicates a backend bug,
-         * low-memory condition, or that we didn't have reservation headroom.
-         * Whatever - print an error and queue the id again straight away.
-         */
-        if ( unlikely(rx->status <= 0) )
-        {
-           printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
-            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
-            wmb();
-            np->rx->req_prod++;
-            continue;
-        }
-
-        skb = np->rx_skbs[rx->id];
-        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
-
-        /* NB. We handle skb overflow later. */
-        skb->data = skb->head + (rx->addr & ~PAGE_MASK);
-        skb->len  = rx->status;
-        skb->tail = skb->data + skb->len;
-
-        np->stats.rx_packets++;
-        np->stats.rx_bytes += rx->status;
-
-        /* Remap the page. */
-        mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
-        mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
-        mmu++;
-        mcl->op = __HYPERVISOR_update_va_mapping;
-        mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
-        mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
-        mcl->args[2] = 0;
-        mcl++;
-
-        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
-            rx->addr >> PAGE_SHIFT;
-
-        __skb_queue_tail(&rxq, skb);
-    }
-
-    /* Do all the remapping work, and M->P updates, in one big hypercall. */
-    if ( likely((mcl - rx_mcl) != 0) )
-    {
-        mcl->op = __HYPERVISOR_mmu_update;
-        mcl->args[0] = (unsigned long)rx_mmu;
-        mcl->args[1] = mmu - rx_mmu;
-        mcl->args[2] = 0;
-        mcl++;
-        (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-    }
-
-    while ( (skb = __skb_dequeue(&rxq)) != NULL )
-    {
-        /*
-         * Enough room in skbuff for the data we were passed? Also, Linux 
-         * expects at least 16 bytes headroom in each receive buffer.
-         */
-        if ( unlikely(skb->tail > skb->end) ||
-             unlikely((skb->data - skb->head) < 16) )
-        {
-            nskb = NULL;
-
-            /* Only copy the packet if it fits in the current MTU. */
-            if ( skb->len <= (dev->mtu + ETH_HLEN) )
-            {
-                if ( (skb->tail > skb->end) && net_ratelimit() )
-                    printk(KERN_INFO "Received packet needs %d bytes more "
-                           "headroom.\n", skb->tail - skb->end);
-
-                if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL )
-                {
-                    skb_reserve(nskb, 2);
-                    skb_put(nskb, skb->len);
-                    memcpy(nskb->data, skb->data, skb->len);
-                    nskb->dev = skb->dev;
-                }
-            }
-            else if ( net_ratelimit() )
-                printk(KERN_INFO "Received packet too big for MTU "
-                       "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
-
-            /* Reinitialise and then destroy the old skbuff. */
-            skb->len  = 0;
-            skb->tail = skb->data;
-            init_skb_shinfo(skb);
-            dev_kfree_skb(skb);
-
-            /* Switch old for new, if we copied the buffer. */
-            if ( (skb = nskb) == NULL )
-                continue;
-        }
-        
-        /* Set the shared-info area, which is hidden behind the real data. */
-        init_skb_shinfo(skb);
-
-        /* Ethernet-specific work. Delayed to here as it peeks the header. */
-        skb->protocol = eth_type_trans(skb, dev);
-
-        /* Pass it up. */
-        netif_receive_skb(skb);
-        dev->last_rx = jiffies;
-    }
-
-    np->rx_resp_cons = i;
-
-    /* If we get a callback with very few responses, reduce fill target. */
-    /* NB. Note exponential increase, linear decrease. */
-    if ( ((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
-         (--np->rx_target < RX_MIN_TARGET) )
-        np->rx_target = RX_MIN_TARGET;
-
-    network_alloc_rx_buffers(dev);
-
-    *pbudget   -= work_done;
-    dev->quota -= work_done;
-
-    if ( work_done < budget )
-    {
-        local_irq_save(flags);
-
-        np->rx->event = i + 1;
-    
-        /* Deal with hypervisor racing our resetting of rx_event. */
-        mb();
-        if ( np->rx->resp_prod == i )
-        {
-            __netif_rx_complete(dev);
-            more_to_do = 0;
-        }
-
-        local_irq_restore(flags);
-    }
-
-    spin_unlock(&np->rx_lock);
-
-    return more_to_do;
-}
-
-
-static int network_close(struct net_device *dev)
-{
-    struct net_private *np = dev->priv;
-    np->user_state = UST_CLOSED;
-    netif_stop_queue(np->dev);
-    return 0;
-}
-
-
-static struct net_device_stats *network_get_stats(struct net_device *dev)
-{
-    struct net_private *np = (struct net_private *)dev->priv;
-    return &np->stats;
-}
-
-
-static void network_connect(struct net_device *dev,
-                            netif_fe_interface_status_t *status)
-{
-    struct net_private *np;
-    int i, requeue_idx;
-    netif_tx_request_t *tx;
-
-    np = dev->priv;
-    spin_lock_irq(&np->tx_lock);
-    spin_lock(&np->rx_lock);
-
-    /* Recovery procedure: */
-
-    /* Step 1: Reinitialise variables. */
-    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
-    np->rx->event = np->tx->event = 1;
-
-    /* Step 2: Rebuild the RX and TX ring contents.
-     * NB. We could just free the queued TX packets now but we hope
-     * that sending them out might do some good.  We have to rebuild
-     * the RX ring because some of our pages are currently flipped out
-     * so we can't just free the RX skbs.
-     * NB2. Freelist index entries are always going to be less than
-     *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
-     * greater than __PAGE_OFFSET: we use this property to distinguish
-     * them.
-     */
-
-    /* Rebuild the TX buffer freelist and the TX ring itself.
-     * NB. This reorders packets.  We could keep more private state
-     * to avoid this but maybe it doesn't matter so much given the
-     * interface has been down.
-     */
-    for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
-    {
-            if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
-            {
-                struct sk_buff *skb = np->tx_skbs[i];
-                
-                tx = &np->tx->ring[requeue_idx++].req;
-                
-                tx->id   = i;
-                tx->addr = virt_to_machine(skb->data);
-                tx->size = skb->len;
-                
-                np->stats.tx_bytes += skb->len;
-                np->stats.tx_packets++;
-            }
-    }
-    wmb();
-    np->tx->req_prod = requeue_idx;
-
-    /* Rebuild the RX buffer freelist and the RX ring itself. */
-    for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
-        if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
-            np->rx->ring[requeue_idx++].req.id = i;
-    wmb();                
-    np->rx->req_prod = requeue_idx;
-
-    printk(KERN_ALERT "[XEN] Netfront recovered tx=%d rxfree=%d\n",
-           np->tx->req_prod,np->rx->req_prod);
-
-
-    /* Step 3: All public and private state should now be sane.  Get
-     * ready to start sending and receiving packets and give the driver
-     * domain a kick because we've probably just requeued some
-     * packets.
-     */
-    np->backend_state = BEST_CONNECTED;
-    wmb();
-    notify_via_evtchn(status->evtchn);  
-    network_tx_buf_gc(dev);
-
-    if ( np->user_state == UST_OPEN )
-        netif_start_queue(dev);
-
-    spin_unlock(&np->rx_lock);
-    spin_unlock_irq(&np->tx_lock);
-}
-
-static void vif_show(struct net_private *np)
-{
-#if DEBUG
-    if (np) {
-        IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
-               np->handle,
-               be_state_name[np->backend_state],
-               np->user_state ? "open" : "closed",
-               np->evtchn,
-               np->irq,
-               np->tx,
-               np->rx);
-    } else {
-        IPRINTK("<vif NULL>\n");
-    }
-#endif
-}
-
-/* Send a connect message to xend to tell it to bring up the interface. */
-static void send_interface_connect(struct net_private *np)
-{
-    ctrl_msg_t cmsg = {
-        .type    = CMSG_NETIF_FE,
-        .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
-        .length  = sizeof(netif_fe_interface_connect_t),
-    };
-    netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
-
-    DPRINTK(">\n"); vif_show(np); 
-    msg->handle = np->handle;
-    msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
-    msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
-        
-    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-    DPRINTK("<\n");
-}
-
-/* Send a driver status notification to the domain controller. */
-static int send_driver_status(int ok)
-{
-    int err = 0;
-    ctrl_msg_t cmsg = {
-        .type    = CMSG_NETIF_FE,
-        .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
-        .length  = sizeof(netif_fe_driver_status_t),
-    };
-    netif_fe_driver_status_t *msg = (void*)cmsg.msg;
-
-    msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
-    err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-    return err;
-}
-
-/* Stop network device and free tx/rx queues and irq.
- */
-static void vif_release(struct net_private *np)
-{
-    /* Stop old i/f to prevent errors whilst we rebuild the state. */
-    spin_lock_irq(&np->tx_lock);
-    spin_lock(&np->rx_lock);
-    netif_stop_queue(np->dev);
-    /* np->backend_state = BEST_DISCONNECTED; */
-    spin_unlock(&np->rx_lock);
-    spin_unlock_irq(&np->tx_lock);
-    
-    /* Free resources. */
-    if(np->tx != NULL){
-        free_irq(np->irq, np->dev);
-        unbind_evtchn_from_irq(np->evtchn);
-        free_page((unsigned long)np->tx);
-        free_page((unsigned long)np->rx);
-        np->irq = 0;
-        np->evtchn = 0;
-        np->tx = NULL;
-        np->rx = NULL;
-    }
-}
-
-/* Release vif resources and close it down completely.
- */
-static void vif_close(struct net_private *np)
-{
-    DPRINTK(">\n"); vif_show(np);
-    WPRINTK("Unexpected netif-CLOSED message in state %s\n",
-            be_state_name[np->backend_state]);
-    vif_release(np);
-    np->backend_state = BEST_CLOSED;
-    /* todo: take dev down and free. */
-    vif_show(np); DPRINTK("<\n");
-}
-
-/* Move the vif into disconnected state.
- * Allocates tx/rx pages.
- * Sends connect message to xend.
- */
-static void vif_disconnect(struct net_private *np){
-    DPRINTK(">\n");
-    if(np->tx) free_page((unsigned long)np->tx);
-    if(np->rx) free_page((unsigned long)np->rx);
-    // Before this np->tx and np->rx had better be null.
-    np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
-    np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
-    memset(np->tx, 0, PAGE_SIZE);
-    memset(np->rx, 0, PAGE_SIZE);
-    np->backend_state = BEST_DISCONNECTED;
-    send_interface_connect(np);
-    vif_show(np); DPRINTK("<\n");
-}
-
-/* Begin interface recovery.
- *
- * NB. Whilst we're recovering, we turn the carrier state off.  We
- * take measures to ensure that this device isn't used for
- * anything.  We also stop the queue for this device.  Various
- * different approaches (e.g. continuing to buffer packets) have
- * been tested but don't appear to improve the overall impact on
- * TCP connections.
- *
- * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
- * is initiated by a special "RESET" message - disconnect could
- * just mean we're not allowed to use this interface any more.
- */
-static void 
-vif_reset(
-    struct net_private *np)
-{
-    DPRINTK(">\n");
-    IPRINTK("Attempting to reconnect network interface: handle=%u\n",
-            np->handle);    
-    vif_release(np);
-    vif_disconnect(np);
-    vif_show(np); DPRINTK("<\n");
-}
-
-/* Move the vif into connected state.
- * Sets the mac and event channel from the message.
- * Binds the irq to the event channel.
- */
-static void
-vif_connect(
-    struct net_private *np, netif_fe_interface_status_t *status)
-{
-    struct net_device *dev = np->dev;
-    DPRINTK(">\n");
-    memcpy(dev->dev_addr, status->mac, ETH_ALEN);
-    network_connect(dev, status);
-    np->evtchn = status->evtchn;
-    np->irq = bind_evtchn_to_irq(np->evtchn);
-    (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
-    netctrl_connected_count();
-    vif_wake(dev);
-    vif_show(np); DPRINTK("<\n");
-}
-
-
-/** Create a network device.
- * @param handle device handle
- * @param val return parameter for created device
- * @return 0 on success, error code otherwise
- */
-static int create_netdev(int handle, struct net_device **val)
-{
-    int i, err = 0;
-    struct net_device *dev = NULL;
-    struct net_private *np = NULL;
-
-    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
-    {
-        printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
-        err = -ENOMEM;
-        goto exit;
-    }
-
-    np                = dev->priv;
-    np->backend_state = BEST_CLOSED;
-    np->user_state    = UST_CLOSED;
-    np->handle        = handle;
-    
-    spin_lock_init(&np->tx_lock);
-    spin_lock_init(&np->rx_lock);
-
-    skb_queue_head_init(&np->rx_batch);
-    np->rx_target = RX_MIN_TARGET;
-
-    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
-    for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
-        np->tx_skbs[i] = (void *)(i+1);
-    for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
-        np->rx_skbs[i] = (void *)(i+1);
-
-    dev->open            = network_open;
-    dev->hard_start_xmit = network_start_xmit;
-    dev->stop            = network_close;
-    dev->get_stats       = network_get_stats;
-    dev->poll            = netif_poll;
-    dev->weight          = 64;
-    
-    if ( (err = register_netdev(dev)) != 0 )
-    {
-        printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
-        goto exit;
-    }
-    np->dev = dev;
-    list_add(&np->list, &dev_list);
-
-  exit:
-    if ( (err != 0) && (dev != NULL ) )
-        kfree(dev);
-    else if ( val != NULL )
-        *val = dev;
-    return err;
-}
-
-/* Get the target interface for a status message.
- * Creates the interface when it makes sense.
- * The returned interface may be null when there is no error.
- *
- * @param status status message
- * @param np return parameter for interface state
- * @return 0 on success, error code otherwise
- */
-static int 
-target_vif(
-    netif_fe_interface_status_t *status, struct net_private **np)
-{
-    int err = 0;
-    struct net_device *dev;
-
-    DPRINTK("> handle=%d\n", status->handle);
-    if ( status->handle < 0 )
-    {
-        err = -EINVAL;
-        goto exit;
-    }
-
-    if ( (dev = find_dev_by_handle(status->handle)) != NULL )
-        goto exit;
-
-    if ( status->status == NETIF_INTERFACE_STATUS_CLOSED )
-        goto exit;
-    if ( status->status == NETIF_INTERFACE_STATUS_CHANGED )
-        goto exit;
-
-    /* It's a new interface in a good state - create it. */
-    DPRINTK("> create device...\n");
-    if ( (err = create_netdev(status->handle, &dev)) != 0 )
-        goto exit;
-
-    netctrl.interface_n++;
-
-  exit:
-    if ( np != NULL )
-        *np = ((dev && !err) ? dev->priv : NULL);
-    DPRINTK("< err=%d\n", err);
-    return err;
-}
-
-/* Handle an interface status message. */
-static void netif_interface_status(netif_fe_interface_status_t *status)
-{
-    int err = 0;
-    struct net_private *np = NULL;
-    
-    DPRINTK(">\n");
-    DPRINTK("> status=%s handle=%d\n",
-            status_name[status->status], status->handle);
-
-    if ( (err = target_vif(status, &np)) != 0 )
-    {
-        WPRINTK("Invalid netif: handle=%u\n", status->handle);
-        return;
-    }
-
-    if ( np == NULL )
-    {
-        DPRINTK("> no vif\n");
-        return;
-    }
-
-    DPRINTK(">\n"); vif_show(np);
-
-    switch ( status->status )
-    {
-    case NETIF_INTERFACE_STATUS_CLOSED:
-        switch ( np->backend_state )
-        {
-        case BEST_CLOSED:
-        case BEST_DISCONNECTED:
-        case BEST_CONNECTED:
-            vif_close(np);
-            break;
-        }
-        break;
-
-    case NETIF_INTERFACE_STATUS_DISCONNECTED:
-        switch ( np->backend_state )
-        {
-        case BEST_CLOSED:
-            vif_disconnect(np);
-            break;
-        case BEST_DISCONNECTED:
-        case BEST_CONNECTED:
-            vif_reset(np);
-            break;
-        }
-        break;
-
-    case NETIF_INTERFACE_STATUS_CONNECTED:
-        switch ( np->backend_state )
-        {
-        case BEST_CLOSED:
-            WPRINTK("Unexpected netif status %s in state %s\n",
-                    status_name[status->status],
-                    be_state_name[np->backend_state]);
-            vif_disconnect(np);
-            vif_connect(np, status);
-            break;
-        case BEST_DISCONNECTED:
-            vif_connect(np, status);
-            break;
-        }
-        break;
-
-    case NETIF_INTERFACE_STATUS_CHANGED:
-        /*
-         * The domain controller is notifying us that a device has been
-         * added or removed.
-         */
-        break;
-
-    default:
-        WPRINTK("Invalid netif status code %d\n", status->status);
-        break;
-    }
-    vif_show(np);
-    DPRINTK("<\n");
-}
-
-/*
- * Initialize the network control interface. 
- */
-static void netif_driver_status(netif_fe_driver_status_t *status)
-{
-    DPRINTK("> status=%d\n", status->status);
-    netctrl.up = status->status;
-    //netctrl.interface_n = status->max_handle;
-    //netctrl.connected_n = 0;
-    netctrl_connected_count();
-}
-
-/* Receive handler for control messages. */
-static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-
-    switch ( msg->subtype )
-    {
-    case CMSG_NETIF_FE_INTERFACE_STATUS:
-        if ( msg->length != sizeof(netif_fe_interface_status_t) )
-            goto error;
-        netif_interface_status((netif_fe_interface_status_t *)
-                               &msg->msg[0]);
-        break;
-
-    case CMSG_NETIF_FE_DRIVER_STATUS:
-        if ( msg->length != sizeof(netif_fe_driver_status_t) )
-            goto error;
-        netif_driver_status((netif_fe_driver_status_t *)
-                            &msg->msg[0]);
-        break;
-
-    error:
-    default:
-        msg->length = 0;
-        break;
-    }
-
-    ctrl_if_send_response(msg);
-}
-
-
-#if 1
-/* Wait for all interfaces to be connected.
- *
- * This works OK, but we'd like to use the probing mode (see below).
- */
-static int probe_interfaces(void)
-{
-    int err = 0, conn = 0;
-    int wait_i, wait_n = 100;
-
-    DPRINTK(">\n");
-
-    for ( wait_i = 0; wait_i < wait_n; wait_i++)
-    { 
-        DPRINTK("> wait_i=%d\n", wait_i);
-        conn = netctrl_connected();
-        if(conn) break;
-        DPRINTK("> schedule_timeout...\n");
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(10);
-    }
-
-    DPRINTK("> wait finished...\n");
-    if ( conn <= 0 )
-    {
-        err = netctrl_err(-ENETDOWN);
-        WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
-    }
-
-    DPRINTK("< err=%d\n", err);
-
-    return err;
-}
-#else
-/* Probe for interfaces until no more are found.
- *
- * This is the mode we'd like to use, but at the moment it panics the kernel.
-*/
-static int probe_interfaces(void)
-{
-    int err = 0;
-    int wait_i, wait_n = 100;
-    ctrl_msg_t cmsg = {
-        .type    = CMSG_NETIF_FE,
-        .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
-        .length  = sizeof(netif_fe_interface_status_t),
-    };
-    netif_fe_interface_status_t msg = {};
-    ctrl_msg_t rmsg = {};
-    netif_fe_interface_status_t *reply = (void*)rmsg.msg;
-    int state = TASK_UNINTERRUPTIBLE;
-    u32 query = -1;
-
-    DPRINTK(">\n");
-
-    netctrl.interface_n = 0;
-    for ( wait_i = 0; wait_i < wait_n; wait_i++ )
-    { 
-        DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
-        msg.handle = query;
-        memcpy(cmsg.msg, &msg, sizeof(msg));
-        DPRINTK("> set_current_state...\n");
-        set_current_state(state);
-        DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
-        DPRINTK("> sending...\n");
-        err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
-        DPRINTK("> err=%d\n", err);
-        if(err) goto exit;
-        DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
-        if((int)reply->handle < 0){
-            // No more interfaces.
-            break;
-        }
-        query = -reply->handle - 2;
-        DPRINTK(">netif_interface_status ...\n");
-        netif_interface_status(reply);
-    }
-
-  exit:
-    if ( err )
-    {
-        err = netctrl_err(-ENETDOWN);
-        WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
-    }
-
-    DPRINTK("< err=%d\n", err);
-    return err;
-}
-
-#endif
-
-static int __init netif_init(void)
-{
-    int err = 0;
-
-    if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
-         (xen_start_info.flags & SIF_NET_BE_DOMAIN) )
-        return 0;
-
-    IPRINTK("Initialising virtual ethernet driver.\n");
-    INIT_LIST_HEAD(&dev_list);
-    netctrl_init();
-    (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
-                                    CALLBACK_IN_BLOCKING_CONTEXT);
-    send_driver_status(1);
-    err = probe_interfaces();
-    if ( err )
-        ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
-
-    DPRINTK("< err=%d\n", err);
-    return err;
-}
-
-static void vif_suspend(struct net_private *np)
-{
-    // Avoid having tx/rx stuff happen until we're ready.
-    DPRINTK(">\n");
-    free_irq(np->irq, np->dev);
-    unbind_evtchn_from_irq(np->evtchn);
-    DPRINTK("<\n");
-}
-
-static void vif_resume(struct net_private *np)
-{
-    // Connect regardless of whether IFF_UP flag set.
-    // Stop bad things from happening until we're back up.
-    DPRINTK(">\n");
-    np->backend_state = BEST_DISCONNECTED;
-    memset(np->tx, 0, PAGE_SIZE);
-    memset(np->rx, 0, PAGE_SIZE);
-    
-    send_interface_connect(np);
-    DPRINTK("<\n");
-}
-
-void netif_suspend(void)
-{
-#if 1 /* XXX THIS IS TEMPORARY */
-    struct list_head *ent;
-    struct net_private *np;
-    
-    DPRINTK(">\n");
-    list_for_each(ent, &dev_list){
-        np = list_entry(ent, struct net_private, list);
-        vif_suspend(np);
-    }
-    DPRINTK("<\n");
-#endif
-}
-
-void netif_resume(void)
-{
-#if 1
-    /* XXX THIS IS TEMPORARY */
-    struct list_head *ent;
-    struct net_private *np;
-
-    DPRINTK(">\n");
-    list_for_each ( ent, &dev_list )
-    {
-        np = list_entry(ent, struct net_private, list);
-        vif_resume(np);
-    }
-    DPRINTK("<\n");
-#endif     
-}
-
-
-__initcall(netif_init);
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/Makefile b/linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/Makefile
deleted file mode 100644 (file)
index e218695..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-y  := privcmd.o
diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/privcmd.c b/linux-2.6.8.1-xen-sparse/drivers/xen/privcmd/privcmd.c
deleted file mode 100644 (file)
index 42c5b13..0000000
+++ /dev/null
@@ -1,235 +0,0 @@
-/******************************************************************************
- * core.c
- * 
- * Interface to privileged domain-0 commands.
- * 
- * Copyright (c) 2002-2004, K A Fraser, B Dragovic
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/smp_lock.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/seq_file.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-#include <asm-xen/proc_cmd.h>
-#include <asm/hypervisor-ifs/dom0_ops.h>
-#include <asm-xen/xen_proc.h>
-
-static struct proc_dir_entry *privcmd_intf;
-
-static int privcmd_ioctl(struct inode *inode, struct file *file,
-                         unsigned int cmd, unsigned long data)
-{
-    int ret = -ENOSYS;
-
-    switch ( cmd )
-    {
-    case IOCTL_PRIVCMD_HYPERCALL:
-    {
-        privcmd_hypercall_t hypercall;
-  
-        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
-            return -EFAULT;
-
-        __asm__ __volatile__ (
-            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
-            "movl  4(%%eax),%%ebx ;"
-            "movl  8(%%eax),%%ecx ;"
-            "movl 12(%%eax),%%edx ;"
-            "movl 16(%%eax),%%esi ;"
-            "movl 20(%%eax),%%edi ;"
-            "movl   (%%eax),%%eax ;"
-            TRAP_INSTR "; "
-            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
-            : "=a" (ret) : "0" (&hypercall) : "memory" );
-
-    }
-    break;
-
-    case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
-    {
-        extern int initdom_ctrlif_domcontroller_port;
-        ret = initdom_ctrlif_domcontroller_port;
-    }
-    break;
-    
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-    case IOCTL_PRIVCMD_MMAP:
-    {
-#define PRIVCMD_MMAP_SZ 32
-        privcmd_mmap_t mmapcmd;
-        privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
-        int i, rc;
-
-        if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
-            return -EFAULT;
-
-        p = mmapcmd.entry;
-
-        for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
-        {
-            int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
-                PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
-            if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
-                return -EFAULT;
-     
-            for ( j = 0; j < n; j++ )
-            {
-                struct vm_area_struct *vma = 
-                    find_vma( current->mm, msg[j].va );
-
-                if ( !vma )
-                    return -EINVAL;
-
-                if ( msg[j].va > PAGE_OFFSET )
-                    return -EINVAL;
-
-                if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
-                    return -EINVAL;
-
-                if ( (rc = direct_remap_area_pages(vma->vm_mm, 
-                                                   msg[j].va&PAGE_MASK, 
-                                                   msg[j].mfn<<PAGE_SHIFT, 
-                                                   msg[j].npages<<PAGE_SHIFT, 
-                                                   vma->vm_page_prot,
-                                                   mmapcmd.dom)) < 0 )
-                    return rc;
-            }
-        }
-        ret = 0;
-    }
-    break;
-
-    case IOCTL_PRIVCMD_MMAPBATCH:
-    {
-#define MAX_DIRECTMAP_MMU_QUEUE 130
-        mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
-        privcmd_mmapbatch_t m;
-        struct vm_area_struct *vma = NULL;
-        unsigned long *p, addr;
-        unsigned long mfn;
-        int i;
-
-        if ( copy_from_user(&m, (void *)data, sizeof(m)) )
-        { ret = -EFAULT; goto batch_err; }
-
-        vma = find_vma( current->mm, m.addr );
-
-        if ( !vma )
-        { ret = -EINVAL; goto batch_err; }
-
-        if ( m.addr > PAGE_OFFSET )
-        { ret = -EFAULT; goto batch_err; }
-
-        if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
-        { ret = -EFAULT; goto batch_err; }
-
-        u[0].ptr  = MMU_EXTENDED_COMMAND;
-        u[0].val  = MMUEXT_SET_FOREIGNDOM;
-        u[0].val |= (unsigned long)m.dom << 16;
-        v = w = &u[1];
-
-        p = m.arr;
-        addr = m.addr;
-        for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
-        {
-            if ( get_user(mfn, p) )
-                return -EFAULT;
-
-            v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
-
-            __direct_remap_area_pages(vma->vm_mm,
-                                      addr, 
-                                      PAGE_SIZE, 
-                                      v);
-
-            if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
-                put_user( 0xF0000000 | mfn, p );
-
-            v = w;
-        }
-        ret = 0;
-        break;
-
-    batch_err:
-        printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
-               ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
-        break;
-    }
-    break;
-#endif
-
-    case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
-    {
-       unsigned long m2p_start_mfn = 
-           HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
-
-       if( put_user( m2p_start_mfn, (unsigned long *) data ) )
-           ret = -EFAULT;
-       else
-           ret = 0;
-    }
-    break;
-
-    default:
-        ret = -EINVAL;
-        break;
-    }
-    return ret;
-}
-
-static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
-{
-    /* DONTCOPY is essential for Xen as copy_page_range is broken. */
-    vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
-    return 0;
-}
-
-static struct file_operations privcmd_file_ops = {
-    ioctl : privcmd_ioctl,
-    mmap:   privcmd_mmap
-};
-
-
-static int __init privcmd_init(void)
-{
-    if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
-        return 0;
-
-    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
-    if ( privcmd_intf != NULL )
-    {
-        privcmd_intf->owner      = THIS_MODULE;
-        privcmd_intf->nlink      = 1;
-        privcmd_intf->proc_fops  = &privcmd_file_ops;
-    }
-
-    return 0;
-}
-
-
-static void __exit privcmd_cleanup(void)
-{
-    if ( privcmd_intf == NULL ) return;
-    remove_xen_proc_entry("privcmd");
-    privcmd_intf = NULL;
-}
-
-
-module_init(privcmd_init);
-module_exit(privcmd_cleanup);
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-generic/pgtable.h b/linux-2.6.8.1-xen-sparse/include/asm-generic/pgtable.h
deleted file mode 100644 (file)
index e7d6c37..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
-
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- *  - flush the old one
- *  - update the page tables
- *  - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#define ptep_establish(__vma, __address, __ptep, __entry)              \
-do {                                                                   \
-       set_pte(__ptep, __entry);                                       \
-       flush_tlb_page(__vma, __address);                               \
-} while (0)
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-/*
- * Largely same as above, but only sets the access flags (dirty,
- * accessed, and writable). Furthermore, we know it always gets set
- * to a "more permissive" setting, which allows most architectures
- * to optimize this.
- */
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do {                                                                     \
-       set_pte(__ptep, __entry);                                         \
-       flush_tlb_page(__vma, __address);                                 \
-} while (0)
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH_NEW
-#define ptep_establish_new(__vma, __address, __ptep, __entry)          \
-do {                                                                   \
-       set_pte(__ptep, __entry);                                       \
-} while (0)
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       if (!pte_young(pte))
-               return 0;
-       set_pte(ptep, pte_mkold(pte));
-       return 1;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep)               \
-({                                                                     \
-       int __young = ptep_test_and_clear_young(__ptep);                \
-       if (__young)                                                    \
-               flush_tlb_page(__vma, __address);                       \
-       __young;                                                        \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       if (!pte_dirty(pte))
-               return 0;
-       set_pte(ptep, pte_mkclean(pte));
-       return 1;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep)               \
-({                                                                     \
-       int __dirty = ptep_test_and_clear_dirty(__ptep);                \
-       if (__dirty)                                                    \
-               flush_tlb_page(__vma, __address);                       \
-       __dirty;                                                        \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       pte_clear(ptep);
-       return pte;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define ptep_clear_flush(__vma, __address, __ptep)                     \
-({                                                                     \
-       pte_t __pte = ptep_get_and_clear(__ptep);                       \
-       flush_tlb_page(__vma, __address);                               \
-       __pte;                                                          \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(pte_t *ptep)
-{
-       pte_t old_pte = *ptep;
-       set_pte(ptep, pte_wrprotect(old_pte));
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_MKDIRTY
-static inline void ptep_mkdirty(pte_t *ptep)
-{
-       pte_t old_pte = *ptep;
-       set_pte(ptep, pte_mkdirty(old_pte));
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B)  (pte_val(A) == pte_val(B))
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(page) (0)
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
-#define page_test_and_clear_young(page) (0)
-#endif
-
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr)      pgd_offset(mm, addr)
-#endif
-
-#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/desc.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/desc.h
deleted file mode 100644 (file)
index 3cebc41..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#ifndef __ARCH_DESC_H
-#define __ARCH_DESC_H
-
-#include <asm/ldt.h>
-#include <asm/segment.h>
-
-#ifndef __ASSEMBLY__
-
-#include <linux/preempt.h>
-#include <linux/smp.h>
-
-#include <asm/mmu.h>
-
-extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
-
-struct Xgt_desc_struct {
-       unsigned short size;
-       unsigned long address __attribute__((packed));
-       unsigned short pad;
-} __attribute__ ((packed));
-
-extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
-
-#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
-
-#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)cpu_gdt_descr[(_cpu)].address)
-
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
-extern void set_intr_gate(unsigned int irq, void * addr);
-
-#define _set_tssldt_desc(n,addr,limit,type) \
-__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
-       "movw %%ax,2(%2)\n\t" \
-       "rorl $16,%%eax\n\t" \
-       "movb %%al,4(%2)\n\t" \
-       "movb %4,5(%2)\n\t" \
-       "movb $0,6(%2)\n\t" \
-       "movb %%ah,7(%2)\n\t" \
-       "rorl $16,%%eax" \
-       : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
-
-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
-{
-       _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
-               offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
-}
-
-#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
-
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
-{
-       _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
-           (int)addr, ((size << 3)-1), 0x82);
-}
-
-#define LDT_entry_a(info) \
-       ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-
-#define LDT_entry_b(info) \
-       (((info)->base_addr & 0xff000000) | \
-       (((info)->base_addr & 0x00ff0000) >> 16) | \
-       ((info)->limit & 0xf0000) | \
-       (((info)->read_exec_only ^ 1) << 9) | \
-       ((info)->contents << 10) | \
-       (((info)->seg_not_present ^ 1) << 15) | \
-       ((info)->seg_32bit << 22) | \
-       ((info)->limit_in_pages << 23) | \
-       ((info)->useable << 20) | \
-       0x7000)
-
-#define LDT_empty(info) (\
-       (info)->base_addr       == 0    && \
-       (info)->limit           == 0    && \
-       (info)->contents        == 0    && \
-       (info)->read_exec_only  == 1    && \
-       (info)->seg_32bit       == 0    && \
-       (info)->limit_in_pages  == 0    && \
-       (info)->seg_not_present == 1    && \
-       (info)->useable         == 0    )
-
-#if TLS_SIZE != 24
-# error update this code.
-#endif
-
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-{
-#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
-       C(0); C(1); C(2);
-#undef C
-}
-
-static inline void clear_LDT(void)
-{
-       int cpu = get_cpu();
-
-       /*
-        * NB. We load the default_ldt for lcall7/27 handling on demand, as
-        * it slows down context switching. Noone uses it anyway.
-        */
-       cpu = cpu;              /* XXX avoid compiler warning */
-       queue_set_ldt(0UL, 0);
-       put_cpu();
-}
-
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
-{
-       void *segments = pc->ldt;
-       int count = pc->size;
-
-       if (likely(!count))
-               segments = NULL;
-
-       queue_set_ldt((unsigned long)segments, count);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
-       int cpu = get_cpu();
-       load_LDT_nolock(pc, cpu);
-       put_cpu();
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
deleted file mode 100644 (file)
index a865309..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef _ASM_I386_DMA_MAPPING_H
-#define _ASM_I386_DMA_MAPPING_H
-
-#include <linux/device.h>
-#include <linux/mm.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, int flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-       flush_write_buffers();
-       return virt_to_bus(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-          enum dma_data_direction direction)
-{
-       int i;
-
-       BUG_ON(direction == DMA_NONE);
-
-       for (i = 0; i < nents; i++ ) {
-               BUG_ON(!sg[i].page);
-
-               sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
-       }
-
-       flush_write_buffers();
-       return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-            size_t size, enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-       return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-                       enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-                       enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                   enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-                   enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-static inline int
-dma_mapping_error(dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
-        /*
-         * we fall back to GFP_DMA when the mask isn't all 1s,
-         * so we can't guarantee allocations that must be
-         * within a tighter range than GFP_DMA..
-         */
-        if(mask < 0x00ffffff)
-                return 0;
-
-       return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
-       if(!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
-}
-
-static inline int
-dma_get_cache_alignment(void)
-{
-       /* no easy way to get cache size on all x86, so return the
-        * maximum possible, to be safe */
-       return (1 << L1_CACHE_SHIFT_MAX);
-}
-
-#define dma_is_consistent(d)   (1)
-
-static inline void
-dma_cache_sync(void *vaddr, size_t size,
-              enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/fixmap.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/fixmap.h
deleted file mode 100644 (file)
index 89b3ba6..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <asm/acpi.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-enum fixed_addresses {
-       FIX_HOLE,
-       FIX_VSYSCALL,
-#ifdef CONFIG_X86_LOCAL_APIC
-       FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
-#endif
-#ifdef CONFIG_X86_IO_APIC
-       FIX_IO_APIC_BASE_0,
-       FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-#endif
-#ifdef CONFIG_X86_VISWS_APIC
-       FIX_CO_CPU,     /* Cobalt timer */
-       FIX_CO_APIC,    /* Cobalt APIC Redirection Table */ 
-       FIX_LI_PCIA,    /* Lithium PCI Bridge A */
-       FIX_LI_PCIB,    /* Lithium PCI Bridge B */
-#endif
-#ifdef CONFIG_X86_F00F_BUG
-       FIX_F00F_IDT,   /* Virtual mapping for IDT */
-#endif
-#ifdef CONFIG_X86_CYCLONE_TIMER
-       FIX_CYCLONE_TIMER, /*cyclone timer register*/
-#endif 
-#ifdef CONFIG_HIGHMEM
-       FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
-       FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
-#ifdef CONFIG_ACPI_BOOT
-       FIX_ACPI_BEGIN,
-       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
-#ifdef CONFIG_PCI_MMCONFIG
-       FIX_PCIE_MCFG,
-#endif
-       FIX_SHARED_INFO,
-       FIX_GNTTAB,
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define NR_FIX_ISAMAPS 256
-       FIX_ISAMAP_END,
-       FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-#endif
-       __end_of_permanent_fixed_addresses,
-       /* temporary boot-time mappings, used before ioremap() is functional */
-#define NR_FIX_BTMAPS  16
-       FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-       FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
-       FIX_WP_TEST,
-       __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
-                                       unsigned long phys, pgprot_t flags);
-extern void __set_fixmap_ma (enum fixed_addresses idx,
-                                       unsigned long mach, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
-               __set_fixmap(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma(idx, phys) \
-               __set_fixmap_ma(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma_ro(idx, phys) \
-               __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
-               __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-#define clear_fixmap(idx) \
-               __set_fixmap(idx, 0, __pgprot(0))
-
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap.
- */
-#define FIXADDR_TOP    (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
-#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START  (FIXADDR_TOP - __FIXADDR_SIZE)
-
-#define __fix_to_virt(x)       (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x)       ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-/*
- * This is the range that is readable by user mode, and things
- * acting like user mode such as get_user_pages.
- */
-#define FIXADDR_USER_START     (__fix_to_virt(FIX_VSYSCALL))
-#define FIXADDR_USER_END       (FIXADDR_USER_START + PAGE_SIZE)
-
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
-       /*
-        * this branch gets completely eliminated after inlining,
-        * except when someone tries to use fixaddr indices in an
-        * illegal way. (such as mixing up address types or using
-        * out-of-range indices).
-        *
-        * If it doesn't get removed, the linker will complain
-        * loudly with a reasonably clear error message..
-        */
-       if (idx >= __end_of_fixed_addresses)
-               __this_fixmap_does_not_exist();
-
-        return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
-       BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
-       return __virt_to_fix(vaddr);
-}
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/highmem.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/highmem.h
deleted file mode 100644 (file)
index f608a13..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- *                   Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with 
- * up to 16 Terabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/interrupt.h>
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#include <asm/tlbflush.h>
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-extern pte_t *pkmap_page_table;
-
-extern void kmap_init(void);
-
-/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- */
-#if NR_CPUS <= 32
-#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23))
-#else
-#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23) - 0x200000UL)
-#endif
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-extern void * FASTCALL(kmap_high(struct page *page));
-extern void FASTCALL(kunmap_high(struct page *page));
-
-void *kmap(struct page *page);
-void kunmap(struct page *page);
-void *kmap_atomic(struct page *page, enum km_type type);
-void *kmap_atomic_pte(struct page *page, enum km_type type);
-void kunmap_atomic(void *kvaddr, enum km_type type);
-void kunmap_atomic_force(void *kvaddr, enum km_type type);
-struct page *kmap_atomic_to_page(void *ptr);
-
-#define flush_cache_kmaps()    do { } while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_HIGHMEM_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/io.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/io.h
deleted file mode 100644 (file)
index db076b4..0000000
+++ /dev/null
@@ -1,416 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-#include <linux/config.h>
-
-/*
- * This file contains the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated
- * to (a) handle it all in a way that makes gcc able to optimize it
- * as well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- */
-
-/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- *             Linus
- */
-
- /*
-  *  Bit simplified and optimized by Jan Hubicka
-  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-  *
-  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-  *  isa_read[wl] and isa_write[wl] fixed
-  *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-  */
-
-#define IO_SPACE_LIMIT 0xffff
-
-#define XQUAD_PORTIO_BASE 0xfe400000
-#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
-
-#ifdef __KERNEL__
-
-#include <linux/vmalloc.h>
-#include <asm/fixmap.h>
-
-/**
- *     virt_to_phys    -       map virtual addresses to physical
- *     @address: address to remap
- *
- *     The returned physical address is the physical (CPU) mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses directly mapped or allocated via kmalloc. 
- *
- *     This function does not give bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-static inline unsigned long virt_to_phys(volatile void * address)
-{
-       return __pa(address);
-}
-
-/**
- *     phys_to_virt    -       map physical address to virtual
- *     @address: address to remap
- *
- *     The returned virtual address is a current CPU mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses that have a kernel mapping
- *
- *     This function does not handle bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-
-static inline void * phys_to_virt(unsigned long address)
-{
-       return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#define page_to_phys(page)       (phys_to_machine(page_to_pseudophys(page)))
-
-#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
-#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
-       (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-        ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == bvec_to_pseudophys((vec2))))
-
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-/**
- * ioremap     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address. 
- */
-
-static inline void * ioremap (unsigned long offset, unsigned long size)
-{
-       return __ioremap(offset, size, 0);
-}
-
-extern void * ioremap_nocache (unsigned long offset, unsigned long size);
-extern void iounmap(void *addr);
-
-/*
- * bt_ioremap() and bt_iounmap() are for temporary early boot-time
- * mappings, before the real ioremap() is functional.
- * A boot-time mapping is currently limited to at most 16 pages.
- */
-extern void *bt_ioremap(unsigned long offset, unsigned long size);
-extern void bt_iounmap(void *addr, unsigned long size);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define isa_bus_to_virt(_x) (void *)__fix_to_virt(FIX_ISAMAP_BEGIN - ((_x) >> PAGE_SHIFT))
-#else
-#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
-#endif
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
-#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-#define readb(addr) (*(volatile unsigned char *) (addr))
-#define readw(addr) (*(volatile unsigned short *) (addr))
-#define readl(addr) (*(volatile unsigned int *) (addr))
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-
-#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
-#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-#define memset_io(a,b,c)       memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c)   __memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c)     __memcpy((void *)(a),(b),(c))
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char *)(PAGE_OFFSET))
-
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c)           memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c)       memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c)         memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
-/*
- * Again, i386 does not require mem IO specific function.
- */
-
-#define eth_io_copy_and_sum(a,b,c,d)           eth_copy_and_sum((a),(void *)(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d)       eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
-
-/**
- *     check_signature         -       find BIOS signatures
- *     @io_addr: mmio address to check 
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the mmio address io_addr. This
- *     address should have been obtained by ioremap.
- *     Returns 1 on a match.
- */
-static inline int check_signature(unsigned long io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
-/**
- *     isa_check_signature             -       find BIOS signatures
- *     @io_addr: mmio address to check 
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the ISA mmio address io_addr.
- *     Returns 1 on a match.
- *
- *     This function is deprecated. New drivers should use ioremap and
- *     check_signature.
- */
-
-static inline int isa_check_signature(unsigned long io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (isa_readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
-/*
- *     Cache management
- *
- *     This needed for two cases
- *     1. Out of order aware processors
- *     2. Accidentally out of order processors (PPro errata #51)
- */
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-
-static inline void flush_write_buffers(void)
-{
-       __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-}
-
-#define dma_cache_inv(_start,_size)            flush_write_buffers()
-#define dma_cache_wback(_start,_size)          flush_write_buffers()
-#define dma_cache_wback_inv(_start,_size)      flush_write_buffers()
-
-#else
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size)            do { } while (0)
-#define dma_cache_wback(_start,_size)          do { } while (0)
-#define dma_cache_wback_inv(_start,_size)      do { } while (0)
-#define flush_write_buffers()
-
-#endif
-
-#endif /* __KERNEL__ */
-
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
-#elif defined(__UNSAFE_IO__)
-#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-#else
-#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n"                \
-                      "2:\n"                           \
-                      ".section __ex_table,\"a\"\n\t"  \
-                      ".align 4\n\t"                   \
-                      ".long 1b,2b\n"                  \
-                      ".previous"
-#endif
-
-static inline void slow_down_io(void) {
-       __asm__ __volatile__(
-               __SLOW_DOWN_IO
-#ifdef REALLY_SLOW_IO
-               __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-               : : );
-}
-
-#ifdef CONFIG_X86_NUMAQ
-extern void *xquad_portio;    /* Where the IO area was mapped */
-#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
-       if (xquad_portio) \
-               write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
-       else \
-               out##bwl##_local(value, port); \
-} \
-static inline void out##bwl(unsigned type value, int port) { \
-       out##bwl##_quad(value, port, 0); \
-} \
-static inline unsigned type in##bwl##_quad(int port, int quad) { \
-       if (xquad_portio) \
-               return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
-       else \
-               return in##bwl##_local(port); \
-} \
-static inline unsigned type in##bwl(int port) { \
-       return in##bwl##_quad(port, 0); \
-}
-#else 
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl(unsigned type value, int port) { \
-       out##bwl##_local(value, port); \
-} \
-static inline unsigned type in##bwl(int port) { \
-       return in##bwl##_local(port); \
-}
-#endif
-
-
-#if __UNSAFE_IO__
-#define ____BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
-       __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
-       unsigned type value; \
-       __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
-       return value; \
-}
-#else
-#define ____BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
-       __asm__ __volatile__("1: out" #bwl " %" #bw "0, %w1\n"          \
-                            "2:\n"                                     \
-                            ".section __ex_table,\"a\"\n\t"            \
-                            ".align 4\n\t"                             \
-                            ".long 1b,2b\n"                            \
-                            ".previous" : : "a"(value), "Nd"(port));   \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
-       unsigned type value; \
-       __asm__ __volatile__("1:in" #bwl " %w1, %" #bw "0\n"            \
-                            "2:\n"                                     \
-                            ".section .fixup,\"ax\"\n"                 \
-                            "3: mov" #bwl " $~0,%" #bw "0\n\t"         \
-                            "jmp 2b\n"                                 \
-                            ".previous\n"                              \
-                            ".section __ex_table,\"a\"\n\t"            \
-                            ".align 4\n\t"                             \
-                            ".long 1b,3b\n"                            \
-                            ".previous" : "=a"(value) : "Nd"(port));   \
-       return value; \
-}
-#endif
-
-#define BUILDIO(bwl,bw,type) \
-____BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local_p(unsigned type value, int port) { \
-       out##bwl##_local(value, port); \
-       slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_local_p(int port) { \
-       unsigned type value = in##bwl##_local(port); \
-       slow_down_io(); \
-       return value; \
-} \
-__BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_p(unsigned type value, int port) { \
-       out##bwl(value, port); \
-       slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_p(int port) { \
-       unsigned type value = in##bwl(port); \
-       slow_down_io(); \
-       return value; \
-} \
-static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
-       __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-} \
-static inline void ins##bwl(int port, void *addr, unsigned long count) { \
-       __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
-}
-
-BUILDIO(b,b,char)
-BUILDIO(w,w,short)
-BUILDIO(l,,int)
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
deleted file mode 100644 (file)
index 565ba96..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/* defines for inline arch setup functions */
-
-#include <asm/apic.h>
-
-/**
- * do_timer_interrupt_hook - hook into timer tick
- * @regs:      standard registers from interrupt
- *
- * Description:
- *     This hook is called immediately after the timer interrupt is ack'd.
- *     It's primary purpose is to allow architectures that don't possess
- *     individual per CPU clocks (like the CPU APICs supply) to broadcast the
- *     timer interrupt as a means of triggering reschedules etc.
- **/
-
-static inline void do_timer_interrupt_hook(struct pt_regs *regs)
-{
-       do_timer(regs);
-/*
- * In the SMP case we use the local APIC timer interrupt to do the
- * profiling, except when we simulate SMP mode on a uniprocessor
- * system, in that case we have to call the local interrupt handler.
- */
-#ifndef CONFIG_X86_LOCAL_APIC
-       if (regs)
-               x86_do_profile(regs);
-#else
-       if (regs && !using_apic_timer)
-               smp_local_timer_interrupt(regs);
-#endif
-}
-
-
-/* you can safely undefine this if you don't have the Neptune chipset */
-
-#define BUGGY_NEPTUN_TIMER
-
-/**
- * do_timer_overflow - process a detected timer overflow condition
- * @count:     hardware timer interrupt count on overflow
- *
- * Description:
- *     This call is invoked when the jiffies count has not incremented but
- *     the hardware timer interrupt has.  It means that a timer tick interrupt
- *     came along while the previous one was pending, thus a tick was missed
- **/
-static inline int do_timer_overflow(int count)
-{
-       int i;
-
-       spin_lock(&i8259A_lock);
-       /*
-        * This is tricky when I/O APICs are used;
-        * see do_timer_interrupt().
-        */
-       i = inb(0x20);
-       spin_unlock(&i8259A_lock);
-       
-       /* assumption about timer being IRQ0 */
-       if (i & 0x01) {
-               /*
-                * We cannot detect lost timer interrupts ... 
-                * well, that's why we call them lost, don't we? :)
-                * [hmm, on the Pentium and Alpha we can ... sort of]
-                */
-               count -= LATCH;
-       } else {
-#ifdef BUGGY_NEPTUN_TIMER
-               /*
-                * for the Neptun bug we know that the 'latch'
-                * command doesn't latch the high and low value
-                * of the counter atomically. Thus we have to 
-                * substract 256 from the counter 
-                * ... funny, isnt it? :)
-                */
-               
-               count -= 256;
-#else
-               printk("do_slow_gettimeoffset(): hardware timer problem?\n");
-#endif
-       }
-       return count;
-}
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
deleted file mode 100644 (file)
index a96d9f6..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *  arch/i386/mach-generic/io_ports.h
- *
- *  Machine specific IO port address definition for generic.
- *  Written by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_IO_PORTS_H
-#define _MACH_IO_PORTS_H
-
-/* i8253A PIT registers */
-#define PIT_MODE               0x43
-#define PIT_CH0                        0x40
-#define PIT_CH2                        0x42
-
-/* i8259A PIC registers */
-#define PIC_MASTER_CMD         0x20
-#define PIC_MASTER_IMR         0x21
-#define PIC_MASTER_ISR         PIC_MASTER_CMD
-#define PIC_MASTER_POLL                PIC_MASTER_ISR
-#define PIC_MASTER_OCW3                PIC_MASTER_ISR
-#define PIC_SLAVE_CMD          0xa0
-#define PIC_SLAVE_IMR          0xa1
-
-/* i8259A PIC related value */
-#define PIC_CASCADE_IR         2
-#define MASTER_ICW4_DEFAULT    0x01
-#define SLAVE_ICW4_DEFAULT     0x01
-#define PIC_ICW4_AEOI          2
-
-#endif /* !_MACH_IO_PORTS_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
deleted file mode 100644 (file)
index b8384ef..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * This file should contain #defines for all of the interrupt vector
- * numbers used by this architecture.
- *
- * In addition, there are some standard defines:
- *
- *     FIRST_EXTERNAL_VECTOR:
- *             The first free place for external interrupts
- *
- *     SYSCALL_VECTOR:
- *             The IRQ vector a syscall makes the user to kernel transition
- *             under.
- *
- *     TIMER_IRQ:
- *             The IRQ number the timer interrupt comes in at.
- *
- *     NR_IRQS:
- *             The total number of interrupt vectors (including all the
- *             architecture specific interrupts) needed.
- *
- */                    
-#ifndef _ASM_IRQ_VECTORS_H
-#define _ASM_IRQ_VECTORS_H
-
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR  0x20
-
-#define SYSCALL_VECTOR         0x80
-
-/*
- * Vectors 0x20-0x2f are used for ISA interrupts.
- */
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- *  some of the following vectors are 'rare', they are merged
- *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- *  TLB, reschedule and local APIC vectors are performance-critical.
- *
- *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
- */
-#define SPURIOUS_APIC_VECTOR   0xff
-#define ERROR_APIC_VECTOR      0xfe
-#define INVALIDATE_TLB_VECTOR  0xfd
-#define RESCHEDULE_VECTOR      0xfc
-#define CALL_FUNCTION_VECTOR   0xfb
-
-#define THERMAL_APIC_VECTOR    0xf0
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR     0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee)
- * we start at 0x31 to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR    0x31
-#define FIRST_SYSTEM_VECTOR    0xef
-
-/*  #define TIMER_IRQ _EVENT_TIMER */
-
-/*
- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
- * Right now the APIC is mostly only used for SMP.
- * 256 vectors is an architectural limit. (we can have
- * more than 256 devices theoretically, but they will
- * have to use shared interrupts)
- * Since vectors 0x00-0x1f are used/reserved for the CPU,
- * the usable vector space is 0x20-0xff (224 vectors)
- */
-
-#if 0
-/*
- * The maximum number of vectors supported by i386 processors
- * is limited to 256. For processors other than i386, NR_VECTORS
- * should be changed accordingly.
- */
-#define NR_VECTORS 256
-
-#ifdef CONFIG_PCI_USE_VECTOR
-#define NR_IRQS FIRST_SYSTEM_VECTOR
-#define NR_IRQ_VECTORS NR_IRQS
-#else
-#ifdef CONFIG_X86_IO_APIC
-#define NR_IRQS 224
-# if (224 >= 32 * NR_CPUS)
-# define NR_IRQ_VECTORS NR_IRQS
-# else
-# define NR_IRQ_VECTORS (32 * NR_CPUS)
-# endif
-#else
-#define NR_IRQS 16
-#define NR_IRQ_VECTORS NR_IRQS
-#endif
-#endif
-#endif
-
-#define FPU_IRQ                        13
-
-#define        FIRST_VM86_IRQ          3
-#define LAST_VM86_IRQ          15
-#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
-
-/*
- * The flat IRQ space is divided into two regions:
- *  1. A one-to-one mapping of real physical IRQs. This space is only used
- *     if we have physical device-access privilege. This region is at the 
- *     start of the IRQ space so that existing device drivers do not need
- *     to be modified to translate physical IRQ numbers into our IRQ space.
- *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
- *     are bound using the provided bind/unbind functions.
- */
-
-#define PIRQ_BASE   0
-#define NR_PIRQS  128
-
-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS  128
-
-#define NR_IRQS   (NR_PIRQS + NR_DYNIRQS)
-#define NR_IRQ_VECTORS NR_IRQS
-
-#define pirq_to_irq(_x)   ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x)   ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-
-#ifndef __ASSEMBLY__
-/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
-extern int  bind_virq_to_irq(int virq);
-extern void unbind_virq_from_irq(int virq);
-extern int  bind_evtchn_to_irq(int evtchn);
-extern void unbind_evtchn_from_irq(int evtchn);
-
-extern void irq_suspend(void);
-extern void irq_resume(void);
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h
deleted file mode 100644 (file)
index 6b5dadc..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ASM_MACH_MPSPEC_H
-#define __ASM_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-#define MAX_MP_BUSSES 32
-
-#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h
deleted file mode 100644 (file)
index 521e227..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *  arch/i386/mach-generic/mach_reboot.h
- *
- *  Machine specific reboot functions for generic.
- *  Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_REBOOT_H
-#define _MACH_REBOOT_H
-
-static inline void kb_wait(void)
-{
-       int i;
-
-       for (i = 0; i < 0x10000; i++)
-               if ((inb_p(0x64) & 0x02) == 0)
-                       break;
-}
-
-static inline void mach_reboot(void)
-{
-       int i;
-       for (i = 0; i < 100; i++) {
-               kb_wait();
-               udelay(50);
-               outb(0xfe, 0x64);         /* pulse reset low */
-               udelay(50);
-       }
-}
-
-#endif /* !_MACH_REBOOT_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h
deleted file mode 100644 (file)
index b37858d..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- *  include/asm-i386/mach-default/mach_resources.h
- *
- *  Machine specific resource allocation for generic.
- *  Split out from setup.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_RESOURCES_H
-#define _MACH_RESOURCES_H
-
-struct resource standard_io_resources[] = {
-       { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
-       { "pic1", 0x20, 0x21, IORESOURCE_BUSY },
-       { "timer", 0x40, 0x5f, IORESOURCE_BUSY },
-       { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
-       { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
-       { "pic2", 0xa0, 0xa1, IORESOURCE_BUSY },
-       { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
-       { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
-};
-
-#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
-
-static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
-
-/* System ROM resources */
-#define MAXROMS 6
-static struct resource rom_resources[MAXROMS] = {
-       { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
-       { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
-};
-
-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-
-static inline void probe_video_rom(int roms)
-{
-       unsigned long base;
-       unsigned char *romstart;
-
-
-       /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
-       for (base = 0xC0000; base < 0xE0000; base += 2048) {
-               romstart = isa_bus_to_virt(base);
-               if (!romsignature(romstart))
-                       continue;
-               request_resource(&iomem_resource, rom_resources + roms);
-               roms++;
-               break;
-       }
-}
-
-static inline void probe_extension_roms(int roms)
-{
-       unsigned long base;
-       unsigned char *romstart;
-
-       /* Extension roms at C800:0000 - DFFF:0000 */
-       for (base = 0xC8000; base < 0xE0000; base += 2048) {
-               unsigned long length;
-
-               romstart = isa_bus_to_virt(base);
-               if (!romsignature(romstart))
-                       continue;
-               length = romstart[2] * 512;
-               if (length) {
-                       unsigned int i;
-                       unsigned char chksum;
-
-                       chksum = 0;
-                       for (i = 0; i < length; i++)
-                               chksum += romstart[i];
-
-                       /* Good checksum? */
-                       if (!chksum) {
-                               rom_resources[roms].start = base;
-                               rom_resources[roms].end = base + length - 1;
-                               rom_resources[roms].name = "Extension ROM";
-                               rom_resources[roms].flags = IORESOURCE_BUSY;
-
-                               request_resource(&iomem_resource, rom_resources + roms);
-                               roms++;
-                               if (roms >= MAXROMS)
-                                       return;
-                       }
-               }
-       }
-
-       /* Final check for motherboard extension rom at E000:0000 */
-       base = 0xE0000;
-       romstart = isa_bus_to_virt(base);
-
-       if (romsignature(romstart)) {
-               rom_resources[roms].start = base;
-               rom_resources[roms].end = base + 65535;
-               rom_resources[roms].name = "Extension ROM";
-               rom_resources[roms].flags = IORESOURCE_BUSY;
-
-               request_resource(&iomem_resource, rom_resources + roms);
-       }
-}
-
-static inline void request_graphics_resource(void)
-{
-       request_resource(&iomem_resource, &vram_resource);
-}
-
-#endif /* !_MACH_RESOURCES_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h
deleted file mode 100644 (file)
index b749aa4..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *  include/asm-i386/mach-default/mach_time.h
- *
- *  Machine specific set RTC function for generic.
- *  Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_TIME_H
-#define _MACH_TIME_H
-
-#include <linux/mc146818rtc.h>
-
-/* for check timing call set_rtc_mmss() 500ms     */
-/* used in arch/i386/time.c::do_timer_interrupt() */
-#define USEC_AFTER     500000
-#define USEC_BEFORE    500000
-
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- *      sets the minutes. Usually you'll only notice that after reboot!
- */
-static inline int mach_set_rtc_mmss(unsigned long nowtime)
-{
-       int retval = 0;
-       int real_seconds, real_minutes, cmos_minutes;
-       unsigned char save_control, save_freq_select;
-
-       save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
-       CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-
-       save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
-       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
-       cmos_minutes = CMOS_READ(RTC_MINUTES);
-       if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-               BCD_TO_BIN(cmos_minutes);
-
-       /*
-        * since we're only adjusting minutes and seconds,
-        * don't interfere with hour overflow. This avoids
-        * messing with unknown time zones but requires your
-        * RTC not to be off by more than 15 minutes
-        */
-       real_seconds = nowtime % 60;
-       real_minutes = nowtime / 60;
-       if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-               real_minutes += 30;             /* correct for half hour time zone */
-       real_minutes %= 60;
-
-       if (abs(real_minutes - cmos_minutes) < 30) {
-               if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-                       BIN_TO_BCD(real_seconds);
-                       BIN_TO_BCD(real_minutes);
-               }
-               CMOS_WRITE(real_seconds,RTC_SECONDS);
-               CMOS_WRITE(real_minutes,RTC_MINUTES);
-       } else {
-               printk(KERN_WARNING
-                      "set_rtc_mmss: can't update from %d to %d\n",
-                      cmos_minutes, real_minutes);
-               retval = -1;
-       }
-
-       /* The following flags have to be released exactly in this order,
-        * otherwise the DS12887 (popular MC146818A clone with integrated
-        * battery and quartz) will not reset the oscillator and will not
-        * update precisely 500 ms later. You won't find this mentioned in
-        * the Dallas Semiconductor data sheets, but who believes data
-        * sheets anyway ...                           -- Markus Kuhn
-        */
-       CMOS_WRITE(save_control, RTC_CONTROL);
-       CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-
-       return retval;
-}
-
-static inline unsigned long mach_get_cmos_time(void)
-{
-       unsigned int year, mon, day, hour, min, sec;
-       int i;
-
-       /* The Linux interpretation of the CMOS clock register contents:
-        * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
-        * RTC registers show the second which has precisely just started.
-        * Let's hope other operating systems interpret the RTC the same way.
-        */
-       /* read RTC exactly on falling edge of update flag */
-       for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
-               if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
-                       break;
-       for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
-               if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
-                       break;
-       do { /* Isn't this overkill ? UIP above should guarantee consistency */
-               sec = CMOS_READ(RTC_SECONDS);
-               min = CMOS_READ(RTC_MINUTES);
-               hour = CMOS_READ(RTC_HOURS);
-               day = CMOS_READ(RTC_DAY_OF_MONTH);
-               mon = CMOS_READ(RTC_MONTH);
-               year = CMOS_READ(RTC_YEAR);
-       } while (sec != CMOS_READ(RTC_SECONDS));
-       if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-         {
-           BCD_TO_BIN(sec);
-           BCD_TO_BIN(min);
-           BCD_TO_BIN(hour);
-           BCD_TO_BIN(day);
-           BCD_TO_BIN(mon);
-           BCD_TO_BIN(year);
-         }
-       if ((year += 1900) < 1970)
-               year += 100;
-
-       return mktime(year, mon, day, hour, min, sec);
-}
-
-#endif /* !_MACH_TIME_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h
deleted file mode 100644 (file)
index 4b9703b..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  include/asm-i386/mach-default/mach_timer.h
- *
- *  Machine specific calibrate_tsc() for generic.
- *  Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-/* ------ Calibrate the TSC ------- 
- * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
- * Too much 64-bit arithmetic here to do this cleanly in C, and for
- * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
- * output busy loop as low as possible. We avoid reading the CTC registers
- * directly because of the awkward 8-bit access mechanism of the 82C54
- * device.
- */
-#ifndef _MACH_TIMER_H
-#define _MACH_TIMER_H
-
-#define CALIBRATE_LATCH        (5 * LATCH)
-
-static inline void mach_prepare_counter(void)
-{
-       /* Set the Gate high, disable speaker */
-       outb((inb(0x61) & ~0x02) | 0x01, 0x61);
-
-       /*
-        * Now let's take care of CTC channel 2
-        *
-        * Set the Gate high, program CTC channel 2 for mode 0,
-        * (interrupt on terminal count mode), binary count,
-        * load 5 * LATCH count, (LSB and MSB) to begin countdown.
-        *
-        * Some devices need a delay here.
-        */
-       outb(0xb0, 0x43);                       /* binary, mode 0, LSB/MSB, Ch 2 */
-       outb_p(CALIBRATE_LATCH & 0xff, 0x42);   /* LSB of count */
-       outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
-}
-
-static inline void mach_countup(unsigned long *count_p)
-{
-       unsigned long count = 0;
-       do {
-               count++;
-       } while ((inb_p(0x61) & 0x20) == 0);
-       *count_p = count;
-}
-
-#endif /* !_MACH_TIMER_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h
deleted file mode 100644 (file)
index c98c288..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- *  include/asm-i386/mach-default/mach_traps.h
- *
- *  Machine specific NMI handling for generic.
- *  Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _MACH_TRAPS_H
-#define _MACH_TRAPS_H
-
-static inline void clear_mem_error(unsigned char reason)
-{
-       reason = (reason & 0xf) | 4;
-       outb(reason, 0x61);
-}
-
-static inline unsigned char get_nmi_reason(void)
-{
-       return inb(0x61);
-}
-
-static inline void reassert_nmi(void)
-{
-       outb(0x8f, 0x70);
-       inb(0x71);              /* dummy */
-       outb(0x0f, 0x70);
-       inb(0x71);              /* dummy */
-}
-
-#endif /* !_MACH_TRAPS_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h
deleted file mode 100644 (file)
index ed0bab4..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *     PCI BIOS function numbering for conventional PCI BIOS 
- *     systems
- */
-
-#define PCIBIOS_PCI_FUNCTION_ID        0xb1XX
-#define PCIBIOS_PCI_BIOS_PRESENT       0xb101
-#define PCIBIOS_FIND_PCI_DEVICE                0xb102
-#define PCIBIOS_FIND_PCI_CLASS_CODE    0xb103
-#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
-#define PCIBIOS_READ_CONFIG_BYTE       0xb108
-#define PCIBIOS_READ_CONFIG_WORD       0xb109
-#define PCIBIOS_READ_CONFIG_DWORD      0xb10a
-#define PCIBIOS_WRITE_CONFIG_BYTE      0xb10b
-#define PCIBIOS_WRITE_CONFIG_WORD      0xb10c
-#define PCIBIOS_WRITE_CONFIG_DWORD     0xb10d
-#define PCIBIOS_GET_ROUTING_OPTIONS    0xb10e
-#define PCIBIOS_SET_PCI_HW_INT         0xb10f
-
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
deleted file mode 100644 (file)
index 8f6c7b2..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * machine_specific_memory_setup - Hook for machine specific memory setup.
- *
- * Description:
- *     This is included late in kernel/setup.c so that it can make
- *     use of all of the static functions.
- **/
-
-static char * __init machine_specific_memory_setup(void)
-{
-       char *who;
-       unsigned long start_pfn, max_pfn;
-
-       who = "Xen";
-
-       start_pfn = 0;
-       max_pfn = xen_start_info.nr_pages;
-
-       e820.nr_map = 0;
-       add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
-
-       return who;
-}
-
-void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
-{
-       clear_bit(X86_FEATURE_VME, c->x86_capability);
-       clear_bit(X86_FEATURE_DE, c->x86_capability);
-       clear_bit(X86_FEATURE_PSE, c->x86_capability);
-       clear_bit(X86_FEATURE_PGE, c->x86_capability);
-       clear_bit(X86_FEATURE_MTRR, c->x86_capability);
-       clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-}
-
-extern void hypervisor_callback(void);
-extern void failsafe_callback(void);
-
-static void __init machine_specific_arch_setup(void)
-{
-       HYPERVISOR_set_callbacks(
-           __KERNEL_CS, (unsigned long)hypervisor_callback,
-           __KERNEL_CS, (unsigned long)failsafe_callback);
-
-       machine_specific_modify_cpu_capabilities(&boot_cpu_data);
-}
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
deleted file mode 100644 (file)
index b18df68..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/* Hook to call BIOS initialisation function */
-
-#define ARCH_SETUP machine_specific_arch_setup();
-
-static void __init machine_specific_arch_setup(void);
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mmu_context.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
deleted file mode 100644 (file)
index 223c719..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef __I386_SCHED_H
-#define __I386_SCHED_H
-
-#include <linux/config.h>
-#include <asm/desc.h>
-#include <asm/atomic.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-/*
- * Used for LDT copy/destruction.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
-
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       unsigned cpu = smp_processor_id();
-       if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
-               cpu_tlbstate[cpu].state = TLBSTATE_LAZY;        
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
-                            struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       int cpu = smp_processor_id();
-
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               cpu_tlbstate[cpu].state = TLBSTATE_OK;
-               cpu_tlbstate[cpu].active_mm = next;
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-
-               /* Re-load page tables */
-               load_cr3(next->pgd);
-
-               /*
-                * load the LDT, if the LDT is different:
-                */
-               if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context, cpu);
-       }
-#ifdef CONFIG_SMP
-       else {
-               cpu_tlbstate[cpu].state = TLBSTATE_OK;
-               BUG_ON(cpu_tlbstate[cpu].active_mm != next);
-
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled 
-                        * tlb flush IPI delivery. We must reload %cr3.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context, cpu);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm) \
-       asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
-
-#define activate_mm(prev, next) \
-do { \
-       switch_mm((prev),(next),NULL); \
-       flush_page_update_queue();                      \
-} while ( 0 )
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/msr.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/msr.h
deleted file mode 100644 (file)
index 4cc4f31..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-#ifndef __ASM_MSR_H
-#define __ASM_MSR_H
-
-#include <linux/smp.h>
-#include <asm-xen/hypervisor.h>
-
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(_msr,_val1,_val2) do { \
-       dom0_op_t op; \
-       op.cmd = DOM0_MSR; \
-       op.u.msr.write = 0; \
-       op.u.msr.msr = (_msr); \
-       op.u.msr.cpu_mask = (1 << smp_processor_id()); \
-       HYPERVISOR_dom0_op(&op); \
-       (_val1) = op.u.msr.out1; \
-       (_val2) = op.u.msr.out2; \
-} while(0)
-
-#define wrmsr(_msr,_val1,_val2) do { \
-       dom0_op_t op; \
-       op.cmd = DOM0_MSR; \
-       op.u.msr.write = 1; \
-       op.u.msr.cpu_mask = (1 << smp_processor_id()); \
-       op.u.msr.msr = (_msr); \
-       op.u.msr.in1 = (_val1); \
-       op.u.msr.in2 = (_val2); \
-       HYPERVISOR_dom0_op(&op); \
-} while(0)
-
-#define rdmsrl(msr,val) do { \
-       unsigned long l__,h__; \
-       rdmsr (msr, l__, h__);  \
-       val = l__;  \
-       val |= ((u64)h__<<32);  \
-} while(0)
-
-static inline void wrmsrl (unsigned long msr, unsigned long long val)
-{
-       unsigned long lo, hi;
-       lo = (unsigned long) val;
-       hi = val >> 32;
-       wrmsr (msr, lo, hi);
-}
-
-#define rdtsc(low,high) \
-     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
-     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
-     __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
-     __asm__ __volatile__("rdpmc" \
-                         : "=a" (low), "=d" (high) \
-                         : "c" (counter))
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR            0
-#define MSR_IA32_P5_MC_TYPE            1
-#define MSR_IA32_PLATFORM_ID           0x17
-#define MSR_IA32_EBL_CR_POWERON                0x2a
-
-#define MSR_IA32_APICBASE              0x1b
-#define MSR_IA32_APICBASE_BSP          (1<<8)
-#define MSR_IA32_APICBASE_ENABLE       (1<<11)
-#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE           0x79
-#define MSR_IA32_UCODE_REV             0x8b
-
-#define MSR_P6_PERFCTR0                0xc1
-#define MSR_P6_PERFCTR1                0xc2
-
-#define MSR_IA32_BBL_CR_CTL            0x119
-
-#define MSR_IA32_SYSENTER_CS           0x174
-#define MSR_IA32_SYSENTER_ESP          0x175
-#define MSR_IA32_SYSENTER_EIP          0x176
-
-#define MSR_IA32_MCG_CAP               0x179
-#define MSR_IA32_MCG_STATUS            0x17a
-#define MSR_IA32_MCG_CTL               0x17b
-
-/* P4/Xeon+ specific */
-#define MSR_IA32_MCG_EAX               0x180
-#define MSR_IA32_MCG_EBX               0x181
-#define MSR_IA32_MCG_ECX               0x182
-#define MSR_IA32_MCG_EDX               0x183
-#define MSR_IA32_MCG_ESI               0x184
-#define MSR_IA32_MCG_EDI               0x185
-#define MSR_IA32_MCG_EBP               0x186
-#define MSR_IA32_MCG_ESP               0x187
-#define MSR_IA32_MCG_EFLAGS            0x188
-#define MSR_IA32_MCG_EIP               0x189
-#define MSR_IA32_MCG_RESERVED          0x18A
-
-#define MSR_P6_EVNTSEL0                        0x186
-#define MSR_P6_EVNTSEL1                        0x187
-
-#define MSR_IA32_PERF_STATUS           0x198
-#define MSR_IA32_PERF_CTL              0x199
-
-#define MSR_IA32_THERM_CONTROL         0x19a
-#define MSR_IA32_THERM_INTERRUPT       0x19b
-#define MSR_IA32_THERM_STATUS          0x19c
-#define MSR_IA32_MISC_ENABLE           0x1a0
-
-#define MSR_IA32_DEBUGCTLMSR           0x1d9
-#define MSR_IA32_LASTBRANCHFROMIP      0x1db
-#define MSR_IA32_LASTBRANCHTOIP                0x1dc
-#define MSR_IA32_LASTINTFROMIP         0x1dd
-#define MSR_IA32_LASTINTTOIP           0x1de
-
-#define MSR_IA32_MC0_CTL               0x400
-#define MSR_IA32_MC0_STATUS            0x401
-#define MSR_IA32_MC0_ADDR              0x402
-#define MSR_IA32_MC0_MISC              0x403
-
-/* Pentium IV performance counter MSRs */
-#define MSR_P4_BPU_PERFCTR0            0x300
-#define MSR_P4_BPU_PERFCTR1            0x301
-#define MSR_P4_BPU_PERFCTR2            0x302
-#define MSR_P4_BPU_PERFCTR3            0x303
-#define MSR_P4_MS_PERFCTR0             0x304
-#define MSR_P4_MS_PERFCTR1             0x305
-#define MSR_P4_MS_PERFCTR2             0x306
-#define MSR_P4_MS_PERFCTR3             0x307
-#define MSR_P4_FLAME_PERFCTR0          0x308
-#define MSR_P4_FLAME_PERFCTR1          0x309
-#define MSR_P4_FLAME_PERFCTR2          0x30a
-#define MSR_P4_FLAME_PERFCTR3          0x30b
-#define MSR_P4_IQ_PERFCTR0             0x30c
-#define MSR_P4_IQ_PERFCTR1             0x30d
-#define MSR_P4_IQ_PERFCTR2             0x30e
-#define MSR_P4_IQ_PERFCTR3             0x30f
-#define MSR_P4_IQ_PERFCTR4             0x310
-#define MSR_P4_IQ_PERFCTR5             0x311
-#define MSR_P4_BPU_CCCR0               0x360
-#define MSR_P4_BPU_CCCR1               0x361
-#define MSR_P4_BPU_CCCR2               0x362
-#define MSR_P4_BPU_CCCR3               0x363
-#define MSR_P4_MS_CCCR0                0x364
-#define MSR_P4_MS_CCCR1                0x365
-#define MSR_P4_MS_CCCR2                0x366
-#define MSR_P4_MS_CCCR3                0x367
-#define MSR_P4_FLAME_CCCR0             0x368
-#define MSR_P4_FLAME_CCCR1             0x369
-#define MSR_P4_FLAME_CCCR2             0x36a
-#define MSR_P4_FLAME_CCCR3             0x36b
-#define MSR_P4_IQ_CCCR0                0x36c
-#define MSR_P4_IQ_CCCR1                0x36d
-#define MSR_P4_IQ_CCCR2                0x36e
-#define MSR_P4_IQ_CCCR3                0x36f
-#define MSR_P4_IQ_CCCR4                0x370
-#define MSR_P4_IQ_CCCR5                0x371
-#define MSR_P4_ALF_ESCR0               0x3ca
-#define MSR_P4_ALF_ESCR1               0x3cb
-#define MSR_P4_BPU_ESCR0               0x3b2
-#define MSR_P4_BPU_ESCR1               0x3b3
-#define MSR_P4_BSU_ESCR0               0x3a0
-#define MSR_P4_BSU_ESCR1               0x3a1
-#define MSR_P4_CRU_ESCR0               0x3b8
-#define MSR_P4_CRU_ESCR1               0x3b9
-#define MSR_P4_CRU_ESCR2               0x3cc
-#define MSR_P4_CRU_ESCR3               0x3cd
-#define MSR_P4_CRU_ESCR4               0x3e0
-#define MSR_P4_CRU_ESCR5               0x3e1
-#define MSR_P4_DAC_ESCR0               0x3a8
-#define MSR_P4_DAC_ESCR1               0x3a9
-#define MSR_P4_FIRM_ESCR0              0x3a4
-#define MSR_P4_FIRM_ESCR1              0x3a5
-#define MSR_P4_FLAME_ESCR0             0x3a6
-#define MSR_P4_FLAME_ESCR1             0x3a7
-#define MSR_P4_FSB_ESCR0               0x3a2
-#define MSR_P4_FSB_ESCR1               0x3a3
-#define MSR_P4_IQ_ESCR0                0x3ba
-#define MSR_P4_IQ_ESCR1                0x3bb
-#define MSR_P4_IS_ESCR0                0x3b4
-#define MSR_P4_IS_ESCR1                0x3b5
-#define MSR_P4_ITLB_ESCR0              0x3b6
-#define MSR_P4_ITLB_ESCR1              0x3b7
-#define MSR_P4_IX_ESCR0                0x3c8
-#define MSR_P4_IX_ESCR1                0x3c9
-#define MSR_P4_MOB_ESCR0               0x3aa
-#define MSR_P4_MOB_ESCR1               0x3ab
-#define MSR_P4_MS_ESCR0                0x3c0
-#define MSR_P4_MS_ESCR1                0x3c1
-#define MSR_P4_PMH_ESCR0               0x3ac
-#define MSR_P4_PMH_ESCR1               0x3ad
-#define MSR_P4_RAT_ESCR0               0x3bc
-#define MSR_P4_RAT_ESCR1               0x3bd
-#define MSR_P4_SAAT_ESCR0              0x3ae
-#define MSR_P4_SAAT_ESCR1              0x3af
-#define MSR_P4_SSU_ESCR0               0x3be
-#define MSR_P4_SSU_ESCR1               0x3bf    /* guess: not defined in manual */
-#define MSR_P4_TBPU_ESCR0              0x3c2
-#define MSR_P4_TBPU_ESCR1              0x3c3
-#define MSR_P4_TC_ESCR0                0x3c4
-#define MSR_P4_TC_ESCR1                0x3c5
-#define MSR_P4_U2L_ESCR0               0x3b0
-#define MSR_P4_U2L_ESCR1               0x3b1
-
-/* AMD Defined MSRs */
-#define MSR_K6_EFER                    0xC0000080
-#define MSR_K6_STAR                    0xC0000081
-#define MSR_K6_WHCR                    0xC0000082
-#define MSR_K6_UWCCR                   0xC0000085
-#define MSR_K6_EPMR                    0xC0000086
-#define MSR_K6_PSOR                    0xC0000087
-#define MSR_K6_PFIR                    0xC0000088
-
-#define MSR_K7_EVNTSEL0                        0xC0010000
-#define MSR_K7_EVNTSEL1                        0xC0010001
-#define MSR_K7_EVNTSEL2                        0xC0010002
-#define MSR_K7_EVNTSEL3                        0xC0010003
-#define MSR_K7_PERFCTR0                        0xC0010004
-#define MSR_K7_PERFCTR1                        0xC0010005
-#define MSR_K7_PERFCTR2                        0xC0010006
-#define MSR_K7_PERFCTR3                        0xC0010007
-#define MSR_K7_HWCR                    0xC0010015
-#define MSR_K7_CLK_CTL                 0xC001001b
-#define MSR_K7_FID_VID_CTL             0xC0010041
-#define MSR_K7_FID_VID_STATUS          0xC0010042
-
-/* extended feature register */
-#define MSR_EFER                       0xc0000080
-
-/* EFER bits: */
-
-/* Execute Disable enable */
-#define _EFER_NX                       11
-#define EFER_NX                                (1<<_EFER_NX)
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1                   0x107
-#define MSR_IDT_FCR2                   0x108
-#define MSR_IDT_FCR3                   0x109
-#define MSR_IDT_FCR4                   0x10a
-
-#define MSR_IDT_MCR0                   0x110
-#define MSR_IDT_MCR1                   0x111
-#define MSR_IDT_MCR2                   0x112
-#define MSR_IDT_MCR3                   0x113
-#define MSR_IDT_MCR4                   0x114
-#define MSR_IDT_MCR5                   0x115
-#define MSR_IDT_MCR6                   0x116
-#define MSR_IDT_MCR7                   0x117
-#define MSR_IDT_MCR_CTRL               0x120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR                    0x1107
-#define MSR_VIA_LONGHAUL               0x110a
-#define MSR_VIA_RNG                    0x110b
-#define MSR_VIA_BCR2                   0x1147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL          0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS         0x80868011
-#define MSR_TMTA_LRTI_READOUT          0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ         0x8086801a
-
-#endif /* __ASM_MSR_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/page.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/page.h
deleted file mode 100644 (file)
index ce9a1c6..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-#ifndef _I386_PAGE_H
-#define _I386_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT     12
-#define PAGE_SIZE      (1UL << PAGE_SHIFT)
-#define PAGE_MASK      (~(PAGE_SIZE-1))
-
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-
-#ifdef CONFIG_XEN_SCRUB_PAGES
-#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-#else
-#define scrub_pages(_p,_n) ((void)0)
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-
-#include <asm/mmx.h>
-
-#define clear_page(page)       mmx_clear_page((void *)(page))
-#define copy_page(to,from)     mmx_copy_page(to,from)
-
-#else
-
-/*
- *     On older X86 processors it's not a win to use MMX here it seems.
- *     Maybe the K6-III ?
- */
-#define clear_page(page)       memset((void *)(page), 0, PAGE_SIZE)
-#define copy_page(to,from)     memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-
-#endif
-
-#define clear_user_page(page, vaddr, pg)       clear_page(page)
-#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
-
-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned long *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
-static inline unsigned long phys_to_machine(unsigned long phys)
-{
-    unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-    machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-    return machine;
-}
-static inline unsigned long machine_to_phys(unsigned long machine)
-{
-    unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-    phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-    return phys;
-}
-
-/*
- * These are used to make use of C type-checking..
- */
-#ifdef CONFIG_X86_PAE
-extern unsigned long long __supported_pte_mask;
-extern int nx_enabled;
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
-typedef struct { unsigned long long pgd; } pgd_t;
-typedef struct { unsigned long long pgprot; } pgprot_t;
-#define pte_val(x)     ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-#define HPAGE_SHIFT    21
-#else
-#define nx_enabled 0
-typedef struct { unsigned long pte_low; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
-typedef struct { unsigned long pgd; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define boot_pte_t pte_t /* or would you rather have a typedef */
-#if 0                          /* XXXcl for MMU_UPDATE_DEBUG */
-static inline unsigned long pte_val(pte_t x)
-{
-       unsigned long ret = x.pte_low;
-       if ( (ret & 1) ) ret = machine_to_phys(ret);
-       return ret;
-}
-#else
-#define pte_val(x)     (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : (x).pte_low)
-#endif
-#define pte_val_ma(x)  ((x).pte_low)
-#define HPAGE_SHIFT    22
-#endif
-#define PTE_MASK       PAGE_MASK
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HPAGE_SIZE     ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK     (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
-#endif
-
-
-static inline unsigned long pmd_val(pmd_t x)
-{
-    unsigned long ret = x.pmd;
-    if ( (ret) ) ret = machine_to_phys(ret);
-    return ret;
-}
-#define pgd_val(x)     ({ BUG(); (unsigned long)0; })
-#define pgprot_val(x)  ((x).pgprot)
-
-static inline pte_t __pte(unsigned long x)
-{
-       if ( (x & 1) ) x = phys_to_machine(x);
-       return ((pte_t) { (x) });
-}
-#define __pte_ma(x) ((pte_t) { (x) } )
-static inline pmd_t __pmd(unsigned long x)
-{
-       if ( (x & 1) ) x = phys_to_machine(x);
-       return ((pmd_t) { (x) });
-}
-#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
-#define __pgprot(x)    ((pgprot_t) { (x) } )
-
-#endif /* !__ASSEMBLY__ */
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr)       (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/*
- * This handles the memory map.. We could make this a config
- * option, but too many people screw it up, and too few need
- * it.
- *
- * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 950MB. 
- *
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
- */
-
-/*
- * This much address space is reserved for vmalloc() and iomap()
- * as well as fixmap mappings.
- */
-#define __VMALLOC_RESERVE      (128 << 20)
-
-#ifndef __ASSEMBLY__
-
-/* Pure 2^n version of get_order */
-static __inline__ int get_order(unsigned long size)
-{
-       int order;
-
-       size = (size-1) >> (PAGE_SHIFT-1);
-       order = -1;
-       do {
-               size >>= 1;
-               order++;
-       } while (size);
-       return order;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/* 
- * XXXcl two options for PAGE_OFFSET
- * - 0xC0000000:
- *   change text offset in arch/xen/i386/kernel/vmlinux.lds.S
- *   change __pa/__va macros
- * - 0xC0100000:
- *   change TASK_SIZE 
- */
-#ifdef __ASSEMBLY__
-#define __PAGE_OFFSET          (0xC0000000)
-#else
-#define __PAGE_OFFSET          (0xC0000000UL)
-#endif
-
-
-#define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
-#define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
-#define MAXMEM                 (-__PAGE_OFFSET-__VMALLOC_RESERVE)
-#define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-#ifndef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn)       (mem_map + (pfn))
-#define page_to_pfn(page)      ((unsigned long)((page) - mem_map))
-#define pfn_valid(pfn)         ((pfn) < max_mapnr)
-#endif /* !CONFIG_DISCONTIGMEM */
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-#define VM_DATA_DEFAULT_FLAGS \
-       (VM_READ | VM_WRITE | \
-       ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(_a)    (phys_to_machine(__pa(_a)))
-#define machine_to_virt(_m)    (__va(machine_to_phys(_m)))
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_PAGE_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/param.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/param.h
deleted file mode 100644 (file)
index 658b29e..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASMi386_PARAM_H
-#define _ASMi386_PARAM_H
-
-#ifdef __KERNEL__
-# define HZ            100/*  0 */             /* Internal kernel timer frequency */
-# define USER_HZ       100             /* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC                (USER_HZ)       /* like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE  4096
-
-#ifndef NOGROUP
-#define NOGROUP                (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64      /* max length of hostname */
-#define COMMAND_LINE_SIZE 256
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pci.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pci.h
deleted file mode 100644 (file)
index 32d08ad..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef __i386_PCI_H
-#define __i386_PCI_H
-
-#include <linux/config.h>
-
-#ifdef __KERNEL__
-#include <linux/mm.h>          /* for struct page */
-
-/* Can be used to override the logic in pci_scan_bus for skipping
-   already-configured bus numbers - to be used for buggy BIOSes
-   or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses()    0
-#endif
-#define pcibios_scan_all_fns(a, b)     0
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO         0x1000
-#define PCIBIOS_MIN_MEM                (pci_mem_start)
-
-#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-/* Dynamic DMA mapping stuff.
- * i386 has everything mapped statically.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include <linux/string.h>
-#include <asm/io.h>
-
-struct pci_dev;
-
-/* The PCI address space does equal the physical memory
- * address space.  The networking and block device layers use
- * this boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS    (1)
-
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME)         (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)        do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME)           (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL)  do { } while (0)
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask)   (1)
-
-static inline dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-{
-       return ((dma64_addr_t) page_to_phys(page) +
-               (dma64_addr_t) offset);
-}
-
-static inline struct page *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
-       return pfn_to_page(dma_addr >> PAGE_SHIFT);
-}
-
-static inline unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
-       return (dma_addr & ~PAGE_MASK);
-}
-
-static inline void
-pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
-}
-
-static inline void
-pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
-       flush_write_buffers();
-}
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-                              enum pci_mmap_state mmap_state, int write_combine);
-
-
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
-#endif /* __KERNEL__ */
-
-/* implement the pci_ DMA API in terms of the generic device dma_ one */
-#include <asm-generic/pci-dma-compat.h>
-
-/* generic pci stuff */
-#include <asm-generic/pci.h>
-
-/* On Xen we have to scan all functions since Xen hides bridges from
- * us.  If a bridge is at fn=0 and that slot has a multifunction
- * device, we won't find the additional devices without scanning all
- * functions. */
-#undef pcibios_scan_all_fns
-#define pcibios_scan_all_fns(a, b)     1
-
-#endif /* __i386_PCI_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgalloc.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
deleted file mode 100644 (file)
index 77d16e3..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef _I386_PGALLOC_H
-#define _I386_PGALLOC_H
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <linux/threads.h>
-#include <linux/mm.h>          /* for struct page */
-#include <asm/io.h>            /* for phys_to_virt and page_to_pseudophys */
-
-#define pmd_populate_kernel(mm, pmd, pte) \
-               set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-{
-       set_pmd(pmd, __pmd(_PAGE_TABLE +
-               ((unsigned long long)page_to_pfn(pte) <<
-                       (unsigned long long) PAGE_SHIFT)));
-       flush_page_update_queue();
-       /* XXXcl queue */
-}
-/*
- * Allocate and free page tables.
- */
-
-extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
-
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-
-static inline void pte_free_kernel(pte_t *pte)
-{
-       free_page((unsigned long)pte);
-       __make_page_writable(pte);
-       flush_page_update_queue();
-}
-
-extern void pte_free(struct page *pte);
-
-#define __pte_free_tlb(tlb,pte)        tlb_remove_page((tlb),(pte))
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- * (In the PAE case we free the pmds as part of the pgd.)
- */
-
-#define pmd_alloc_one(mm, addr)                ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x)                    do { } while (0)
-#define __pmd_free_tlb(tlb,x)          do { } while (0)
-#define pgd_populate(mm, pmd, pte)     BUG()
-
-#define check_pgt_cache()      do { } while (0)
-
-int direct_remap_area_pages(struct mm_struct *mm,
-                            unsigned long address, 
-                            unsigned long machine_addr,
-                            unsigned long size, 
-                            pgprot_t prot,
-                            domid_t  domid);
-int __direct_remap_area_pages(struct mm_struct *mm,
-                             unsigned long address, 
-                             unsigned long size, 
-                             mmu_update_t *v);
-
-#endif /* _I386_PGALLOC_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
deleted file mode 100644 (file)
index 2afc4fb..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
-#define _I386_PGTABLE_2LEVEL_DEFS_H
-
-/*
- * traditional i386 two-level paging structure:
- */
-
-#define PGDIR_SHIFT    22
-#define PTRS_PER_PGD   1024
-#define PTRS_PER_PGD_NO_HV     (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
-
-/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
- */
-#define PMD_SHIFT      22
-#define PTRS_PER_PMD   1
-
-#define PTRS_PER_PTE   1024
-
-#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
deleted file mode 100644 (file)
index 1defe99..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#ifndef _I386_PGTABLE_2LEVEL_H
-#define _I386_PGTABLE_2LEVEL_H
-
-#define pte_ERROR(e) \
-       printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
-#define pmd_ERROR(e) \
-       printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
-       printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd)          { return 0; }
-static inline int pgd_bad(pgd_t pgd)           { return 0; }
-static inline int pgd_present(pgd_t pgd)       { return 1; }
-#define pgd_clear(xp)                          do { } while (0)
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
- */
-#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-#define set_pte_atomic(pteptr, pteval) (*(pteptr) = pteval)
-#else
-#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
-#define set_pte_atomic(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
-#endif
-/*
- * (pmds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval).pmd)
-#define set_pgd(pgdptr, pgdval) ((void)0)
-
-#define pgd_page(pgd) \
-((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
-       return (pmd_t *) dir;
-}
-
-/*
- * A note on implementation of this atomic 'get-and-clear' operation.
- * This is actually very simple because Xen Linux can only run on a single
- * processor. Therefore, we cannot race other processors setting the 'accessed'
- * or 'dirty' bits on a page-table entry.
- * Even if pages are shared between domains, that is not a problem because
- * each domain will have separate page tables, with their own versions of
- * accessed & dirty state.
- */
-static inline pte_t ptep_get_and_clear(pte_t *xp)
-{
-       pte_t pte = *xp;
-       if (pte.pte_low)
-               set_pte(xp, __pte_ma(0));
-       return pte;
-}
-
-#define pte_same(a, b)         ((a).pte_low == (b).pte_low)
-/*                                 
- * We detect special mappings in one of two ways:
- *  1. If the MFN is an I/O page then Xen will set the m2p entry
- *     to be outside our maximum possible pseudophys range.
- *  2. If the MFN belongs to a different domain then we will certainly
- *     not have MFN in our p2m table. Conversely, if the page is ours,
- *     then we'll have p2m(m2p(MFN))==MFN.
- * If we detect a special mapping then it doesn't have a 'struct page'.
- * We force !pfn_valid() by returning an out-of-range pointer.
- *
- * NB. These checks require that, for any MFN that is not in our reservation,
- * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
- * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
- * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
- * 
- * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
- *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- *      require. In all the cases we care about, the high bit gets shifted out
- *      (e.g., phys_to_machine()) so behaviour there is correct.
- */
-#define INVALID_P2M_ENTRY (~0UL)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_pfn(_pte)                                                   \
-({                                                                      \
-    unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;                   \
-    unsigned long pfn = mfn_to_pfn(mfn);                                \
-    if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) )               \
-        pfn = max_mapnr; /* special: force !pfn_valid() */              \
-    pfn;                                                                \
-})
-
-#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
-
-#define pte_none(x)            (!(x).pte_low)
-
-#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pte_ma(pfn, prot)  __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-/*
- * All present user pages are user-executable:
- */
-static inline int pte_exec(pte_t pte)
-{
-       return pte_user(pte);
-}
-
-/*
- * All present pages are kernel-executable:
- */
-static inline int pte_exec_kernel(pte_t pte)
-{
-       return 1;
-}
-
-/*
- * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS      29
-
-#define pte_to_pgoff(pte) \
-       ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
-
-#define pgoff_to_pte(off) \
-       ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
-
-#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable.h
deleted file mode 100644 (file)
index 3c722ee..0000000
+++ /dev/null
@@ -1,556 +0,0 @@
-#ifndef _I386_PGTABLE_H
-#define _I386_PGTABLE_H
-
-#include <linux/config.h>
-#include <asm-xen/hypervisor.h>
-
-/*
- * The Linux memory management assumes a three-level page table setup. On
- * the i386, we use that, but "fold" the mid level into the top-level page
- * table, so that we physically have the same two-level page table as the
- * i386 mmu expects.
- *
- * This file contains the functions and defines necessary to modify and use
- * the i386 page table tree.
- */
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <linux/threads.h>
-
-#ifndef _I386_BITOPS_H
-#include <asm/bitops.h>
-#endif
-
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-extern unsigned long empty_zero_page[1024];
-extern pgd_t swapper_pg_dir[1024];
-extern kmem_cache_t *pgd_cache;
-extern kmem_cache_t *pmd_cache;
-extern kmem_cache_t *pte_cache;
-extern spinlock_t pgd_lock;
-extern struct page *pgd_list;
-
-void pte_ctor(void *, kmem_cache_t *, unsigned long);
-void pte_dtor(void *, kmem_cache_t *, unsigned long);
-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
-void pgtable_cache_init(void);
-void paging_init(void);
-
-/*
- * The Linux x86 paging architecture is 'compile-time dual-mode', it
- * implements both the traditional 2-level x86 page tables and the
- * newer 3-level PAE-mode page tables.
- */
-#ifdef CONFIG_X86_PAE
-# include <asm/pgtable-3level-defs.h>
-#else
-# include <asm/pgtable-2level-defs.h>
-#endif
-
-#define PMD_SIZE       (1UL << PMD_SHIFT)
-#define PMD_MASK       (~(PMD_SIZE-1))
-#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK     (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_PGD_NR      0
-
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
-#define TWOLEVEL_PGDIR_SHIFT   22
-#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-
-/* Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START  (((unsigned long) high_memory + vmalloc_earlyreserve + \
-                       2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
-#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END   (PKMAP_BASE-2*PAGE_SIZE)
-#else
-# define VMALLOC_END   (FIXADDR_START-2*PAGE_SIZE)
-#endif
-
-extern void * high_memory;
-extern unsigned long vmalloc_earlyreserve;
-
-/*
- * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
- * of the Pentium details, but assuming intel did the straightforward
- * thing, this bit set in the page directory entry just means that
- * the page directory entry points directly to a 4MB-aligned block of
- * memory. 
- */
-#define _PAGE_BIT_PRESENT      0
-#define _PAGE_BIT_RW           1
-#define _PAGE_BIT_USER         2
-#define _PAGE_BIT_PWT          3
-#define _PAGE_BIT_PCD          4
-#define _PAGE_BIT_ACCESSED     5
-#define _PAGE_BIT_DIRTY                6
-#define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1      9       /* available for programmer */
-#define _PAGE_BIT_UNUSED2      10
-#define _PAGE_BIT_UNUSED3      11
-#define _PAGE_BIT_NX           63
-
-#define _PAGE_PRESENT  0x001
-#define _PAGE_RW       0x002
-#define _PAGE_USER     0x004
-#define _PAGE_PWT      0x008
-#define _PAGE_PCD      0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY    0x040
-#define _PAGE_PSE      0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_GLOBAL   0x100   /* Global TLB entry PPro+ */
-#define _PAGE_UNUSED1  0x200   /* available for programmer */
-#define _PAGE_UNUSED2  0x400
-#define _PAGE_UNUSED3  0x800
-
-#define _PAGE_FILE     0x040   /* set:pagecache unset:swap */
-#define _PAGE_PROTNONE 0x080   /* If not present */
-#ifdef CONFIG_X86_PAE
-#define _PAGE_NX       (1ULL<<_PAGE_BIT_NX)
-#else
-#define _PAGE_NX       0
-#endif
-
-#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE \
-       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED \
-       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-
-#define PAGE_SHARED_EXEC \
-       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC \
-       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC \
-       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY \
-       PAGE_COPY_NOEXEC
-#define PAGE_READONLY \
-       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC \
-       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-
-#define _PAGE_KERNEL \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define _PAGE_KERNEL_EXEC \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-
-extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
-#define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
-#define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_PCD)
-#define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define PAGE_KERNEL            __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC       __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-
-/*
- * The i386 can't do page protection for execute, and considers that
- * the same are read. Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
-/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'verify_area(VERIFY_WRITE,..)'
- */
-#undef TEST_VERIFY_AREA
-
-/* The boot page tables (all created as a single array) */
-extern unsigned long pg0[];
-
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp)  do { set_pte(xp, __pte(0)); } while (0)
-
-#define pmd_none(x)    (!pmd_val(x))
-/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-   can temporarily clear it. */
-#define pmd_present(x) (pmd_val(x))
-/* pmd_clear below */
-#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_user(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
-static inline int pte_read(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
-static inline int pte_dirty(pte_t pte)         { return (pte).pte_low & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte)         { return (pte).pte_low & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte)         { return (pte).pte_low & _PAGE_RW; }
-
-/*
- * The following only works if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte)          { return (pte).pte_low & _PAGE_FILE; }
-
-static inline pte_t pte_rdprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_mkclean(pte_t pte)     { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkold(pte_t pte)       { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_RW; return pte; }
-static inline pte_t pte_mkread(pte_t pte)      { (pte).pte_low |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte)      { (pte).pte_low |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte)     { (pte).pte_low |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte)     { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte_low |= _PAGE_RW; return pte; }
-
-#ifdef CONFIG_X86_PAE
-# include <asm/pgtable-3level.h>
-#else
-# include <asm/pgtable-2level.h>
-#endif
-
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       int ret = pte_dirty(pte);
-       if (ret)
-               xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
-       return ret;
-}
-
-static inline int ptep_test_and_clear_young(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       int ret = pte_young(pte);
-       if (ret)
-               xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
-       return ret;
-}
-
-static inline void ptep_set_wrprotect(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       if (pte_write(pte))
-               set_pte(ptep, pte_wrprotect(pte));
-}
-static inline void ptep_mkdirty(pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       if (!pte_dirty(pte))
-               xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
-}
-
-/*
- * Macro to mark a page protection value as "uncacheable".  On processors which do not support
- * it, this is a no-op.
- */
-#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3)                                          \
-                                ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
-#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
-#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
-       pte.pte_low &= _PAGE_CHG_MASK;
-       pte.pte_low |= pgprot_val(newprot);
-#ifdef CONFIG_X86_PAE
-       /*
-        * Chop off the NX bit (if present), and add the NX portion of
-        * the newprot (if present):
-        */
-       pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-       pte.pte_high |= (pgprot_val(newprot) >> 32) & \
-                                       (__supported_pte_mask >> 32);
-#endif
-       return pte;
-}
-
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
-#define pmd_page_kernel(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pmd_clear(xp)  do {                                    \
-       set_pmd(xp, __pmd(0));                                  \
-       xen_flush_page_update_queue();                          \
-} while (0)
-
-#ifndef CONFIG_DISCONTIGMEM
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#endif /* !CONFIG_DISCONTIGMEM */
-
-#define pmd_large(pmd) \
-       ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
-
-/*
- * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * this macro returns the index of the entry in the pgd page which would
- * control the given virtual address
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-#define pmd_index(address) \
-               (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address) \
-               (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
-       ((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
-
-/*
- * Helper function that returns the kernel pagetable entry controlling
- * the virtual address 'address'. NULL means no pagetable entry present.
- * NOTE: the return type is pte_t but if the pmd is PSE then we return it
- * as a pte too.
- */
-extern pte_t *lookup_address(unsigned long address);
-
-/*
- * Make a given kernel text page executable/non-executable.
- * Returns the previous executability setting of that page (which
- * is used to restore the previous state). Used by the SMP bootup code.
- * NOTE: this is an __init function for security reasons.
- */
-#ifdef CONFIG_X86_PAE
- extern int set_kernel_exec(unsigned long vaddr, int enable);
-#else
- static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
-#endif
-
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
-       ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
-        pte_index(address))
-#define pte_offset_map_nested(dir, address) \
-       ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
-        pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
-#else
-#define pte_offset_map(dir, address) \
-       ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-#endif
-
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- *
- * Also, we only update the dirty/accessed state if we set
- * the dirty bit by hand in the kernel, since the hardware
- * will do the accessed bit for us, and we don't want to
- * race with other CPU's that might be updating the dirty
- * bit at the same time.
- */
-#define update_mmu_cache(vma,address,pte) do { } while (0)
-#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-
-#if 0
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-       do {                                                              \
-               if (__dirty) {                                            \
-                       queue_l1_entry_update((__ptep), (__entry).pte_low); \
-                       flush_tlb_page(__vma, __address);                 \
-                       xen_flush_page_update_queue();                    \
-               }                                                         \
-       } while (0)
-#else
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-       do {                                                              \
-               if (__dirty) {                                            \
-                       if ( likely(vma->vm_mm == current->mm) ) {        \
-                           xen_flush_page_update_queue();                \
-                           HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, entry, UVMF_INVLPG); \
-                       } else {                                          \
-                            xen_l1_entry_update((__ptep), (__entry).pte_low); \
-                           flush_tlb_page(__vma, __address);             \
-                       }                                                 \
-               }                                                         \
-       } while (0)
-
-#endif
-
-#define __HAVE_ARCH_PTEP_ESTABLISH
-#define ptep_establish(__vma, __address, __ptep, __entry)              \
-do {                                                                   \
-       ptep_set_access_flags(__vma, __address, __ptep, __entry, 1);    \
-} while (0)
-
-#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
-#define ptep_establish_new(__vma, __address, __ptep, __entry)          \
-do {                                                                   \
-       if ( likely((__vma)->vm_mm == current->mm) ) {                  \
-               xen_flush_page_update_queue();                          \
-               HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT,   \
-                                            __entry, 0);               \
-       } else {                                                        \
-               xen_l1_entry_update((__ptep), (__entry).pte_low);       \
-       }                                                               \
-} while (0)
-
-/* Encode and de-code a swap entry */
-#define __swp_type(x)                  (((x).val >> 1) & 0x1f)
-#define __swp_offset(x)                        ((x).val >> 8)
-#define __swp_entry(type, offset)      ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define __pte_to_swp_entry(pte)                ((swp_entry_t) { (pte).pte_low })
-#define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
-
-/* NOTE: make_page* callers must call flush_page_update_queue() */
-static inline void __make_page_readonly(void *va)
-{
-       pgd_t *pgd = pgd_offset_k((unsigned long)va);
-       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
-       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
-       queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
-}
-
-static inline void __make_page_writable(void *va)
-{
-       pgd_t *pgd = pgd_offset_k((unsigned long)va);
-       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
-       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
-       queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
-}
-
-static inline void make_page_readonly(void *va)
-{
-       pgd_t *pgd = pgd_offset_k((unsigned long)va);
-       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
-       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
-       queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
-       if ( (unsigned long)va >= VMALLOC_START )
-               __make_page_readonly(machine_to_virt(
-                       *(unsigned long *)pte&PAGE_MASK));
-}
-
-static inline void make_page_writable(void *va)
-{
-       pgd_t *pgd = pgd_offset_k((unsigned long)va);
-       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
-       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
-       queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
-       if ( (unsigned long)va >= VMALLOC_START )
-               __make_page_writable(machine_to_virt(
-                       *(unsigned long *)pte&PAGE_MASK));
-}
-
-static inline void make_pages_readonly(void *va, unsigned int nr)
-{
-       while ( nr-- != 0 )
-       {
-               make_page_readonly(va);
-               va = (void *)((unsigned long)va + PAGE_SIZE);
-       }
-}
-
-static inline void make_pages_writable(void *va, unsigned int nr)
-{
-       while ( nr-- != 0 )
-       {
-               make_page_writable(va);
-               va = (void *)((unsigned long)va + PAGE_SIZE);
-       }
-}
-
-static inline unsigned long arbitrary_virt_to_phys(void *va)
-{
-       pgd_t *pgd = pgd_offset_k((unsigned long)va);
-       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
-       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
-       unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
-       return pa | ((unsigned long)va & (PAGE_SIZE-1));
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#ifndef CONFIG_DISCONTIGMEM
-#define kern_addr_valid(addr)  (1)
-#endif /* !CONFIG_DISCONTIGMEM */
-
-#define io_remap_page_range remap_page_range
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTEP_MKDIRTY
-#define __HAVE_ARCH_PTE_SAME
-#include <asm-generic/pgtable.h>
-
-#endif /* _I386_PGTABLE_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/processor.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/processor.h
deleted file mode 100644 (file)
index 0d1860f..0000000
+++ /dev/null
@@ -1,666 +0,0 @@
-/*
- * include/asm-i386/processor.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_I386_PROCESSOR_H
-#define __ASM_I386_PROCESSOR_H
-
-#include <asm/vm86.h>
-#include <asm/math_emu.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <asm/msr.h>
-#include <asm/system.h>
-#include <linux/cache.h>
-#include <linux/config.h>
-#include <linux/threads.h>
-
-/* flag for disabling the tsc */
-extern int tsc_disable;
-
-struct desc_struct {
-       unsigned long a,b;
-};
-
-#define desc_empty(desc) \
-               (!((desc)->a + (desc)->b))
-
-#define desc_equal(desc1, desc2) \
-               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
-
-/*
- *  CPU type and hardware bug flags. Kept separately for each CPU.
- *  Members of this structure are referenced in head.S, so think twice
- *  before touching them. [mj]
- */
-
-struct cpuinfo_x86 {
-       __u8    x86;            /* CPU family */
-       __u8    x86_vendor;     /* CPU vendor */
-       __u8    x86_model;
-       __u8    x86_mask;
-       char    wp_works_ok;    /* It doesn't on 386's */
-       char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
-       char    hard_math;
-       char    rfu;
-               int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
-       unsigned long   x86_capability[NCAPINTS];
-       char    x86_vendor_id[16];
-       char    x86_model_id[64];
-       int     x86_cache_size;  /* in KB - valid for CPUS which support this
-                                   call  */
-       int     x86_cache_alignment;    /* In bytes */
-       int     fdiv_bug;
-       int     f00f_bug;
-       int     coma_bug;
-       unsigned long loops_per_jiffy;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_RISE 6
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NSC 8
-#define X86_VENDOR_NUM 9
-#define X86_VENDOR_UNKNOWN 0xff
-
-/*
- * capabilities of CPUs
- */
-
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct cpuinfo_x86 new_cpu_data;
-extern struct tss_struct init_tss[NR_CPUS];
-extern struct tss_struct doublefault_tss;
-extern pgd_t *cur_pgd;         /* XXXsmp */
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
-#endif
-
-extern char ignore_fpu_irq;
-
-extern void identify_cpu(struct cpuinfo_x86 *);
-extern void print_cpu_info(struct cpuinfo_x86 *);
-extern void dodgy_tsc(void);
-
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF  0x00000001 /* Carry Flag */
-#define X86_EFLAGS_PF  0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF  0x00000010 /* Auxillary carry Flag */
-#define X86_EFLAGS_ZF  0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF  0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF  0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF  0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF  0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF  0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL        0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT  0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF  0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM  0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC  0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID  0x00200000 /* CPUID detection flag */
-
-/*
- * Generic CPUID function
- */
-static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
-{
-       __asm__("cpuid"
-               : "=a" (*eax),
-                 "=b" (*ebx),
-                 "=c" (*ecx),
-                 "=d" (*edx)
-               : "0" (op));
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
-       unsigned int eax;
-
-       __asm__("cpuid"
-               : "=a" (eax)
-               : "0" (op)
-               : "bx", "cx", "dx");
-       return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
-       unsigned int eax, ebx;
-
-       __asm__("cpuid"
-               : "=a" (eax), "=b" (ebx)
-               : "0" (op)
-               : "cx", "dx" );
-       return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
-       unsigned int eax, ecx;
-
-       __asm__("cpuid"
-               : "=a" (eax), "=c" (ecx)
-               : "0" (op)
-               : "bx", "dx" );
-       return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
-       unsigned int eax, edx;
-
-       __asm__("cpuid"
-               : "=a" (eax), "=d" (edx)
-               : "0" (op)
-               : "bx", "cx");
-       return edx;
-}
-
-#define load_cr3(pgdir) do {                           \
-       queue_pt_switch(__pa(pgdir));                   \
-       cur_pgd = pgdir;        /* XXXsmp */            \
-} while (/* CONSTCOND */0)
-
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME            0x0001  /* enable vm86 extensions */
-#define X86_CR4_PVI            0x0002  /* virtual interrupts flag enable */
-#define X86_CR4_TSD            0x0004  /* disable time stamp at ipl 3 */
-#define X86_CR4_DE             0x0008  /* enable debugging extensions */
-#define X86_CR4_PSE            0x0010  /* enable page size extensions */
-#define X86_CR4_PAE            0x0020  /* enable physical address extensions */
-#define X86_CR4_MCE            0x0040  /* Machine check enable */
-#define X86_CR4_PGE            0x0080  /* enable global pages */
-#define X86_CR4_PCE            0x0100  /* enable performance counters at ipl 3 */
-#define X86_CR4_OSFXSR         0x0200  /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT     0x0400  /* enable unmasked SSE exceptions */
-
-/*
- * Save the cr4 feature set we're using (ie
- * Pentium 4MB enable and PPro Global page
- * enable), so that any CPU's that boot up
- * after us can get the correct flags.
- */
-extern unsigned long mmu_cr4_features;
-
-static inline void set_in_cr4 (unsigned long mask)
-{
-       mmu_cr4_features |= mask;
-       switch (mask) {
-       case X86_CR4_OSFXSR:
-       case X86_CR4_OSXMMEXCPT:
-               break;
-       default:
-               do {
-                       const char *msg = "Xen unsupported cr4 update\n";
-                       (void)HYPERVISOR_console_io(
-                               CONSOLEIO_write, __builtin_strlen(msg),
-                               (char *)msg);
-                       BUG();
-               } while (0);
-       }
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
-       mmu_cr4_features &= ~mask;
-       __asm__("movl %%cr4,%%eax\n\t"
-               "andl %0,%%eax\n\t"
-               "movl %%eax,%%cr4\n"
-               : : "irg" (~mask)
-               :"ax");
-}
-
-/*
- *      NSC/Cyrix CPU configuration register indexes
- */
-
-#define CX86_PCR0 0x20
-#define CX86_GCR  0xb8
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_PCR1 0xf0
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-/*
- *      NSC/Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
-       outb((reg), 0x22); \
-       outb((data), 0x23); \
-} while (0)
-
-/*
- * Bus types (default is ISA, but people can check others with these..)
- */
-extern int MCA_bus;
-
-static inline void __monitor(const void *eax, unsigned long ecx,
-               unsigned long edx)
-{
-       /* "monitor %eax,%ecx,%edx;" */
-       asm volatile(
-               ".byte 0x0f,0x01,0xc8;"
-               : :"a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
-       /* "mwait %eax,%ecx;" */
-       asm volatile(
-               ".byte 0x0f,0x01,0xc9;"
-               : :"a" (eax), "c" (ecx));
-}
-
-/* from system description table in BIOS.  Mostly for MCA use, but
-others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
-extern unsigned int mca_pentium_flag;
-
-/*
- * User space process size: 3GB (default).
- */
-#define TASK_SIZE      (PAGE_OFFSET)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
-
-/*
- * Size of io_bitmap.
- */
-#define IO_BITMAP_BITS  65536
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-
-struct i387_fsave_struct {
-       long    cwd;
-       long    swd;
-       long    twd;
-       long    fip;
-       long    fcs;
-       long    foo;
-       long    fos;
-       long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
-       long    status;         /* software status information */
-};
-
-struct i387_fxsave_struct {
-       unsigned short  cwd;
-       unsigned short  swd;
-       unsigned short  twd;
-       unsigned short  fop;
-       long    fip;
-       long    fcs;
-       long    foo;
-       long    fos;
-       long    mxcsr;
-       long    mxcsr_mask;
-       long    st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
-       long    xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
-       long    padding[56];
-} __attribute__ ((aligned (16)));
-
-struct i387_soft_struct {
-       long    cwd;
-       long    swd;
-       long    twd;
-       long    fip;
-       long    fcs;
-       long    foo;
-       long    fos;
-       long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
-       unsigned char   ftop, changed, lookahead, no_update, rm, alimit;
-       struct info     *info;
-       unsigned long   entry_eip;
-};
-
-union i387_union {
-       struct i387_fsave_struct        fsave;
-       struct i387_fxsave_struct       fxsave;
-       struct i387_soft_struct soft;
-};
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-struct tss_struct {
-       unsigned short  back_link,__blh;
-       unsigned long   esp0;
-       unsigned short  ss0,__ss0h;
-       unsigned long   esp1;
-       unsigned short  ss1,__ss1h;     /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
-       unsigned long   esp2;
-       unsigned short  ss2,__ss2h;
-       unsigned long   __cr3;
-       unsigned long   eip;
-       unsigned long   eflags;
-       unsigned long   eax,ecx,edx,ebx;
-       unsigned long   esp;
-       unsigned long   ebp;
-       unsigned long   esi;
-       unsigned long   edi;
-       unsigned short  es, __esh;
-       unsigned short  cs, __csh;
-       unsigned short  ss, __ssh;
-       unsigned short  ds, __dsh;
-       unsigned short  fs, __fsh;
-       unsigned short  gs, __gsh;
-       unsigned short  ldt, __ldth;
-       unsigned short  trace, io_bitmap_base;
-       /*
-        * The extra 1 is there because the CPU will access an
-        * additional byte beyond the end of the IO permission
-        * bitmap. The extra byte must be all 1 bits, and must
-        * be within the limit.
-        */
-       unsigned long   io_bitmap[IO_BITMAP_LONGS + 1];
-       /*
-        * pads the TSS to be cacheline-aligned (size is 0x100)
-        */
-       unsigned long __cacheline_filler[37];
-       /*
-        * .. and then another 0x100 bytes for emergency kernel stack
-        */
-       unsigned long stack[64];
-} __attribute__((packed));
-
-#define ARCH_MIN_TASKALIGN     16
-
-struct thread_struct {
-/* cached TLS descriptors. */
-       struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-       unsigned long   esp0;
-       unsigned long   sysenter_cs;
-       unsigned long   eip;
-       unsigned long   esp;
-       unsigned long   fs;
-       unsigned long   gs;
-       unsigned int    io_pl;
-/* Hardware debugging registers */
-       unsigned long   debugreg[8];  /* %%db0-7 debug registers */
-/* fault info */
-       unsigned long   cr2, trap_no, error_code;
-/* floating point info */
-       union i387_union        i387;
-/* virtual 86 mode info */
-       struct vm86_struct __user * vm86_info;
-       unsigned long           screen_bitmap;
-       unsigned long           v86flags, v86mask, saved_esp0;
-       unsigned int            saved_fs, saved_gs;
-/* IO permissions */
-       unsigned long   *io_bitmap_ptr;
-};
-
-#define INIT_THREAD  {                                                 \
-       .vm86_info = NULL,                                              \
-       .sysenter_cs = __KERNEL_CS,                                     \
-       .io_bitmap_ptr = NULL,                                          \
-}
-
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS  {                                                    \
-       .esp0           = sizeof(init_stack) + (long)&init_stack,       \
-       .ss0            = __KERNEL_DS,                                  \
-       .esp1           = sizeof(init_tss[0]) + (long)&init_tss[0],     \
-       .ss1            = __KERNEL_CS,                                  \
-       .ldt            = GDT_ENTRY_LDT,                                \
-       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,                     \
-       .io_bitmap      = { [ 0 ... IO_BITMAP_LONGS] = ~0 },            \
-}
-
-static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-{
-       tss->esp0 = thread->esp0;
-       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
-       if (unlikely(tss->ss1 != thread->sysenter_cs)) {
-               tss->ss1 = thread->sysenter_cs;
-               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-       }
-       HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
-}
-
-#define start_thread(regs, new_eip, new_esp) do {              \
-       __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));       \
-       set_fs(USER_DS);                                        \
-       regs->xds = __USER_DS;                                  \
-       regs->xes = __USER_DS;                                  \
-       regs->xss = __USER_DS;                                  \
-       regs->xcs = __USER_CS;                                  \
-       regs->eip = new_eip;                                    \
-       regs->esp = new_esp;                                    \
-} while (0)
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
-/*
- * create a kernel thread without removing it from tasklists
- */
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-void show_trace(struct task_struct *task, unsigned long *stack);
-
-unsigned long get_wchan(struct task_struct *p);
-
-#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
-#define KSTK_TOP(info)                                                 \
-({                                                                     \
-       unsigned long *__ptr = (unsigned long *)(info);                 \
-       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
-})
-
-#define task_pt_regs(task)                                             \
-({                                                                     \
-       struct pt_regs *__regs__;                                       \
-       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
-       __regs__ - 1;                                                   \
-})
-
-#define KSTK_EIP(task) (task_pt_regs(task)->eip)
-#define KSTK_ESP(task) (task_pt_regs(task)->esp)
-
-
-struct microcode_header {
-       unsigned int hdrver;
-       unsigned int rev;
-       unsigned int date;
-       unsigned int sig;
-       unsigned int cksum;
-       unsigned int ldrver;
-       unsigned int pf;
-       unsigned int datasize;
-       unsigned int totalsize;
-       unsigned int reserved[3];
-};
-
-struct microcode {
-       struct microcode_header hdr;
-       unsigned int bits[0];
-};
-
-typedef struct microcode microcode_t;
-typedef struct microcode_header microcode_header_t;
-
-/* microcode format is extended from prescott processors */
-struct extended_signature {
-       unsigned int sig;
-       unsigned int pf;
-       unsigned int cksum;
-};
-
-struct extended_sigtable {
-       unsigned int count;
-       unsigned int cksum;
-       unsigned int reserved[3];
-       struct extended_signature sigs[0];
-};
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE      _IO('6',0)
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
-       __asm__ __volatile__("rep;nop": : :"memory");
-}
-
-#define cpu_relax()    rep_nop()
-
-/* generic versions from gas */
-#define GENERIC_NOP1   ".byte 0x90\n"
-#define GENERIC_NOP2           ".byte 0x89,0xf6\n"
-#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
-#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
-#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
-#define GENERIC_NOP6   ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP7   ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP8   GENERIC_NOP1 GENERIC_NOP7
-
-/* Opteron nops */
-#define K8_NOP1 GENERIC_NOP1
-#define K8_NOP2        ".byte 0x66,0x90\n" 
-#define K8_NOP3        ".byte 0x66,0x66,0x90\n" 
-#define K8_NOP4        ".byte 0x66,0x66,0x66,0x90\n" 
-#define K8_NOP5        K8_NOP3 K8_NOP2 
-#define K8_NOP6        K8_NOP3 K8_NOP3
-#define K8_NOP7        K8_NOP4 K8_NOP3
-#define K8_NOP8        K8_NOP4 K8_NOP4
-
-/* K7 nops */
-/* uses eax dependencies (arbitary choice) */
-#define K7_NOP1  GENERIC_NOP1
-#define K7_NOP2        ".byte 0x8b,0xc0\n" 
-#define K7_NOP3        ".byte 0x8d,0x04,0x20\n"
-#define K7_NOP4        ".byte 0x8d,0x44,0x20,0x00\n"
-#define K7_NOP5        K7_NOP4 ASM_NOP1
-#define K7_NOP6        ".byte 0x8d,0x80,0,0,0,0\n"
-#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-#define K7_NOP8        K7_NOP7 ASM_NOP1
-
-#ifdef CONFIG_MK8
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#elif defined(CONFIG_MK7)
-#define ASM_NOP1 K7_NOP1
-#define ASM_NOP2 K7_NOP2
-#define ASM_NOP3 K7_NOP3
-#define ASM_NOP4 K7_NOP4
-#define ASM_NOP5 K7_NOP5
-#define ASM_NOP6 K7_NOP6
-#define ASM_NOP7 K7_NOP7
-#define ASM_NOP8 K7_NOP8
-#else
-#define ASM_NOP1 GENERIC_NOP1
-#define ASM_NOP2 GENERIC_NOP2
-#define ASM_NOP3 GENERIC_NOP3
-#define ASM_NOP4 GENERIC_NOP4
-#define ASM_NOP5 GENERIC_NOP5
-#define ASM_NOP6 GENERIC_NOP6
-#define ASM_NOP7 GENERIC_NOP7
-#define ASM_NOP8 GENERIC_NOP8
-#endif
-
-#define ASM_NOP_MAX 8
-
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
-   because they are microcoded there and very slow.
-   However we don't do prefetches for pre XP Athlons currently
-   That should be fixed. */
-#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
-{
-       alternative_input(ASM_NOP4,
-                         "prefetchnta (%1)",
-                         X86_FEATURE_XMM,
-                         "r" (x));
-}
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-/* 3dnow! prefetch to get an exclusive cache line. Useful for 
-   spinlocks to avoid one state transition in the cache coherency protocol. */
-extern inline void prefetchw(const void *x)
-{
-       alternative_input(ASM_NOP4,
-                         "prefetchw (%1)",
-                         X86_FEATURE_3DNOW,
-                         "r" (x));
-}
-#define spin_lock_prefetch(x)  prefetchw(x)
-
-extern void select_idle_routine(const struct cpuinfo_x86 *c);
-
-#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-
-#ifdef CONFIG_SCHED_SMT
-#define ARCH_HAS_SCHED_DOMAIN
-#define ARCH_HAS_SCHED_WAKE_IDLE
-#endif
-
-#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/ptrace.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/ptrace.h
deleted file mode 100644 (file)
index 0165ccc..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef _I386_PTRACE_H
-#define _I386_PTRACE_H
-
-#define EBX 0
-#define ECX 1
-#define EDX 2
-#define ESI 3
-#define EDI 4
-#define EBP 5
-#define EAX 6
-#define DS 7
-#define ES 8
-#define FS 9
-#define GS 10
-#define ORIG_EAX 11
-#define EIP 12
-#define CS  13
-#define EFL 14
-#define UESP 15
-#define SS   16
-#define FRAME_SIZE 17
-
-/* this struct defines the way the registers are stored on the 
-   stack during a system call. */
-
-struct pt_regs {
-       long ebx;
-       long ecx;
-       long edx;
-       long esi;
-       long edi;
-       long ebp;
-       long eax;
-       int  xds;
-       int  xes;
-       long orig_eax;
-       long eip;
-       int  xcs;
-       long eflags;
-       long esp;
-       int  xss;
-};
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS            12
-#define PTRACE_SETREGS            13
-#define PTRACE_GETFPREGS          14
-#define PTRACE_SETFPREGS          15
-#define PTRACE_GETFPXREGS         18
-#define PTRACE_SETFPXREGS         19
-
-#define PTRACE_OLDSETOPTIONS         21
-
-#define PTRACE_GET_THREAD_AREA    25
-#define PTRACE_SET_THREAD_AREA    26
-
-#ifdef __KERNEL__
-#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
-#define instruction_pointer(regs) ((regs)->eip)
-#endif
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/segment.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/segment.h
deleted file mode 100644 (file)
index 288243f..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-/*
- * The layout of the per-CPU GDT under Linux:
- *
- *   0 - null
- *   1 - reserved
- *   2 - reserved
- *   3 - reserved
- *
- *   4 - unused                        <==== new cacheline
- *   5 - unused
- *
- *  ------- start of TLS (Thread-Local Storage) segments:
- *
- *   6 - TLS segment #1                        [ glibc's TLS segment ]
- *   7 - TLS segment #2                        [ Wine's %fs Win32 segment ]
- *   8 - TLS segment #3
- *   9 - reserved
- *  10 - reserved
- *  11 - reserved
- *
- *  ------- start of kernel segments:
- *
- *  12 - kernel code segment           <==== new cacheline
- *  13 - kernel data segment
- *  14 - default user CS
- *  15 - default user DS
- *  16 - TSS
- *  17 - LDT
- *  18 - PNPBIOS support (16->32 gate)
- *  19 - PNPBIOS support
- *  20 - PNPBIOS support
- *  21 - PNPBIOS support
- *  22 - PNPBIOS support
- *  23 - APM BIOS support
- *  24 - APM BIOS support
- *  25 - APM BIOS support 
- *
- *  26 - unused
- *  27 - unused
- *  28 - unused
- *  29 - unused
- *  30 - unused
- *  31 - TSS for double fault handler
- */
-#define GDT_ENTRY_TLS_ENTRIES  3
-#define GDT_ENTRY_TLS_MIN      6
-#define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-
-#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-
-#define GDT_ENTRY_DEFAULT_USER_CS      14
-#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
-
-#define GDT_ENTRY_DEFAULT_USER_DS      15
-#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
-
-#define GDT_ENTRY_KERNEL_BASE  12
-
-#define GDT_ENTRY_KERNEL_CS            (GDT_ENTRY_KERNEL_BASE + 0)
-#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
-
-#define GDT_ENTRY_KERNEL_DS            (GDT_ENTRY_KERNEL_BASE + 1)
-#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
-
-#define GDT_ENTRY_TSS                  (GDT_ENTRY_KERNEL_BASE + 4)
-#define GDT_ENTRY_LDT                  (GDT_ENTRY_KERNEL_BASE + 5)
-
-#define GDT_ENTRY_PNPBIOS_BASE         (GDT_ENTRY_KERNEL_BASE + 6)
-#define GDT_ENTRY_APMBIOS_BASE         (GDT_ENTRY_KERNEL_BASE + 11)
-
-#define GDT_ENTRY_DOUBLEFAULT_TSS      31
-
-/*
- * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
- */
-#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
-
-#define GDT_SIZE (GDT_ENTRIES * 8)
-
-/* Simple and small GDT entries for booting only */
-
-#define __BOOT_CS      FLAT_GUESTOS_CS
-
-#define __BOOT_DS      FLAT_GUESTOS_DS
-
-/*
- * The interrupt descriptor table has room for 256 idt's,
- * the global descriptor table is dependent on the number
- * of tasks we can have..
- */
-#define IDT_ENTRIES 256
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/setup.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/setup.h
deleted file mode 100644 (file)
index 03a3a64..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *     Just a place holder. We don't want to have to test x86 before
- *     we include stuff
- */
-
-#ifndef _i386_SETUP_H
-#define _i386_SETUP_H
-
-#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-#define PFN_DOWN(x)    ((x) >> PAGE_SHIFT)
-#define PFN_PHYS(x)    ((x) << PAGE_SHIFT)
-
-/*
- * Reserved space for vmalloc and iomap - defined in asm/page.h
- */
-#define MAXMEM_PFN     PFN_DOWN(MAXMEM)
-#define MAX_NONPAE_PFN (1 << 20)
-
-#define PARAM_SIZE 2048
-#define COMMAND_LINE_SIZE 256
-
-#define OLD_CL_MAGIC_ADDR      0x90020
-#define OLD_CL_MAGIC           0xA33F
-#define OLD_CL_BASE_ADDR       0x90000
-#define OLD_CL_OFFSET          0x90022
-#define NEW_CL_POINTER         0x228   /* Relative to real mode data */
-
-#ifndef __ASSEMBLY__
-/*
- * This is set up by the setup-routine at boot-time
- */
-extern unsigned char boot_params[PARAM_SIZE];
-
-#define PARAM  (boot_params)
-#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
-#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
-#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-#define INITRD_START (__pa(xen_start_info.mod_start))
-#define INITRD_SIZE (xen_start_info.mod_len)
-#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
-#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _i386_SETUP_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
deleted file mode 100644 (file)
index 8093de0..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef __XEN_SYNCH_BITOPS_H__
-#define __XEN_SYNCH_BITOPS_H__
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Heavily modified to provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-#include <linux/config.h>
-
-#define ADDR (*(volatile long *) addr)
-
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ ( 
-        "lock btsl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btrl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_change_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btcl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-
-    __asm__ __volatile__ (
-        "lock btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-{
-    return ((1UL << (nr & 31)) & 
-            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "btl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-    return oldbit;
-}
-
-#define synch_test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- synch_const_test_bit((nr),(addr)) : \
- synch_var_test_bit((nr),(addr)))
-
-#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/system.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/system.h
deleted file mode 100644 (file)
index 133f1ea..0000000
+++ /dev/null
@@ -1,522 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <asm/synch_bitops.h>
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/evtchn.h>
-
-#ifdef __KERNEL__
-
-struct task_struct;    /* one of the stranger aspects of C forward declarations.. */
-extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-
-#define switch_to(prev,next,last) do {                                 \
-       unsigned long esi,edi;                                          \
-       asm volatile("pushfl\n\t"                                       \
-                    "pushl %%ebp\n\t"                                  \
-                    "movl %%esp,%0\n\t"        /* save ESP */          \
-                    "movl %5,%%esp\n\t"        /* restore ESP */       \
-                    "movl $1f,%1\n\t"          /* save EIP */          \
-                    "pushl %6\n\t"             /* restore EIP */       \
-                    "jmp __switch_to\n"                                \
-                    "1:\t"                                             \
-                    "popl %%ebp\n\t"                                   \
-                    "popfl"                                            \
-                    :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
-                     "=a" (last),"=S" (esi),"=D" (edi)                 \
-                    :"m" (next->thread.esp),"m" (next->thread.eip),    \
-                     "2" (prev), "d" (next));                          \
-} while (0)
-
-#define _set_base(addr,base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-       "rorl $16,%%edx\n\t" \
-       "movb %%dl,%2\n\t" \
-       "movb %%dh,%3" \
-       :"=&d" (__pr) \
-       :"m" (*((addr)+2)), \
-        "m" (*((addr)+4)), \
-        "m" (*((addr)+7)), \
-         "0" (base) \
-        ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-       "rorl $16,%%edx\n\t" \
-       "movb %2,%%dh\n\t" \
-       "andb $0xf0,%%dh\n\t" \
-       "orb %%dh,%%dl\n\t" \
-       "movb %%dl,%2" \
-       :"=&d" (__lr) \
-       :"m" (*(addr)), \
-        "m" (*((addr)+6)), \
-        "0" (limit) \
-        ); } while(0)
-
-#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
-static inline unsigned long _get_base(char * addr)
-{
-       unsigned long __base;
-       __asm__("movb %3,%%dh\n\t"
-               "movb %2,%%dl\n\t"
-               "shll $16,%%edx\n\t"
-               "movw %1,%%dx"
-               :"=&d" (__base)
-               :"m" (*((addr)+2)),
-                "m" (*((addr)+4)),
-                "m" (*((addr)+7)));
-       return __base;
-}
-
-#define get_base(ldt) _get_base( ((char *)&(ldt)) )
-
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value)                 \
-       asm volatile("\n"                       \
-               "1:\t"                          \
-               "movl %0,%%" #seg "\n"          \
-               "2:\n"                          \
-               ".section .fixup,\"ax\"\n"      \
-               "3:\t"                          \
-               "pushl $0\n\t"                  \
-               "popl %%" #seg "\n\t"           \
-               "jmp 2b\n"                      \
-               ".previous\n"                   \
-               ".section __ex_table,\"a\"\n\t" \
-               ".align 4\n\t"                  \
-               ".long 1b,3b\n"                 \
-               ".previous"                     \
-               : :"m" (*(unsigned int *)&(value)))
-
-/*
- * Save a segment register away
- */
-#define savesegment(seg, value) \
-       asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
-
-/*
- * Clear and set 'TS' bit respectively
- */
-/* NB. 'clts' is done for us by Xen during virtual trap. */
-#define clts() ((void)0)
-#define read_cr0() \
-       BUG();
-#define write_cr0(x) \
-       BUG();
-
-#define read_cr4() \
-       BUG();
-#define write_cr4(x) \
-       BUG();
-#define stts() (HYPERVISOR_fpu_taskswitch())
-
-#endif /* __KERNEL__ */
-
-static inline void wbinvd(void)
-{
-    mmu_update_t u;
-    u.ptr = MMU_EXTENDED_COMMAND;
-    u.val = MMUEXT_FLUSH_CACHE;
-    (void)HYPERVISOR_mmu_update(&u, 1, NULL);
-}
-
-static inline unsigned long get_limit(unsigned long segment)
-{
-       unsigned long __limit;
-       __asm__("lsll %1,%0"
-               :"=r" (__limit):"r" (segment));
-       return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-
-/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- *
- * cmpxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
- */
-static inline void __set_64bit (unsigned long long * ptr,
-               unsigned int low, unsigned int high)
-{
-       __asm__ __volatile__ (
-               "\n1:\t"
-               "movl (%0), %%eax\n\t"
-               "movl 4(%0), %%edx\n\t"
-               "lock cmpxchg8b (%0)\n\t"
-               "jnz 1b"
-               : /* no outputs */
-               :       "D"(ptr),
-                       "b"(low),
-                       "c"(high)
-               :       "ax","dx","memory");
-}
-
-static inline void __set_64bit_constant (unsigned long long *ptr,
-                                                unsigned long long value)
-{
-       __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x)      *(((unsigned int*)&(x))+0)
-#define ll_high(x)     *(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
-                        unsigned long long value)
-{
-       __set_64bit(ptr,ll_low(value), ll_high(value));
-}
-
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- *       but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
-       switch (size) {
-               case 1:
-                       __asm__ __volatile__("xchgb %b0,%1"
-                               :"=q" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 2:
-                       __asm__ __volatile__("xchgw %w0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 4:
-                       __asm__ __volatile__("xchgl %0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-       }
-       return x;
-}
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#ifdef CONFIG_X86_CMPXCHG
-#define __HAVE_ARCH_CMPXCHG 1
-#endif
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                     unsigned long new, int size)
-{
-       unsigned long prev;
-       switch (size) {
-       case 1:
-               __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-                                    : "=a"(prev)
-                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
-                                    : "memory");
-               return prev;
-       case 2:
-               __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-                                    : "=a"(prev)
-                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
-                                    : "memory");
-               return prev;
-       case 4:
-               __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-                                    : "=a"(prev)
-                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
-                                    : "memory");
-               return prev;
-       }
-       return old;
-}
-
-#define cmpxchg(ptr,o,n)\
-       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-                                       (unsigned long)(n),sizeof(*(ptr))))
-    
-#ifdef __KERNEL__
-struct alt_instr { 
-       __u8 *instr;            /* original instruction */
-       __u8 *replacement;
-       __u8  cpuid;            /* cpuid bit set for replacement */
-       __u8  instrlen;         /* length of original instruction */
-       __u8  replacementlen;   /* length of new instruction, <= instrlen */ 
-       __u8  pad;
-}; 
-#endif
-
-/* 
- * Alternative instructions for different CPU types or capabilities.
- * 
- * This allows to use optimized instructions even on generic binary
- * kernels.
- * 
- * length of oldinstr must be longer or equal the length of newinstr
- * It can be padded with nops as needed.
- * 
- * For non barrier like inlines please define new variants
- * without volatile and memory clobber.
- */
-#define alternative(oldinstr, newinstr, feature)       \
-       asm volatile ("661:\n\t" oldinstr "\n662:\n"                 \
-                     ".section .altinstructions,\"a\"\n"            \
-                     "  .align 4\n"                                   \
-                     "  .long 661b\n"            /* label */          \
-                     "  .long 663f\n"            /* new instruction */         \
-                     "  .byte %c0\n"             /* feature bit */    \
-                     "  .byte 662b-661b\n"       /* sourcelen */      \
-                     "  .byte 664f-663f\n"       /* replacementlen */ \
-                     ".previous\n"                                             \
-                     ".section .altinstr_replacement,\"ax\"\n"                 \
-                     "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
-                     ".previous" :: "i" (feature) : "memory")  
-
-/*
- * Alternative inline assembly with input.
- * 
- * Pecularities:
- * No memory clobber here. 
- * Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the 
- * replacement maake sure to pad to the worst case length.
- */
-#define alternative_input(oldinstr, newinstr, feature, input)                  \
-       asm volatile ("661:\n\t" oldinstr "\n662:\n"                            \
-                     ".section .altinstructions,\"a\"\n"                       \
-                     "  .align 4\n"                                            \
-                     "  .long 661b\n"            /* label */                   \
-                     "  .long 663f\n"            /* new instruction */         \
-                     "  .byte %c0\n"             /* feature bit */             \
-                     "  .byte 662b-661b\n"       /* sourcelen */               \
-                     "  .byte 664f-663f\n"       /* replacementlen */          \
-                     ".previous\n"                                             \
-                     ".section .altinstr_replacement,\"ax\"\n"                 \
-                     "663:\n\t" newinstr "\n664:\n"   /* replacement */        \
-                     ".previous" :: "i" (feature), input)  
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-
-/* 
- * Actually only lfence would be needed for mb() because all stores done 
- * by the kernel should be already ordered. But keep a full barrier for now. 
- */
-
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *     CPU 0                           CPU 1
- *
- *     b = 2;
- *     memory_barrier();
- *     p = &b;                         q = p;
- *                                     read_barrier_depends();
- *                                     d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *     CPU 0                           CPU 1
- *
- *     a = 2;
- *     memory_barrier();
- *     b = 3;                          y = b;
- *                                     read_barrier_depends();
- *                                     x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like thiswhere there are no data dependencies.
- **/
-
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_X86_OOSTORE
-/* Actually there are no OOO store capable CPUs for now that do SSE, 
-   but make it already an possibility. */
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-#else
-#define wmb()  __asm__ __volatile__ ("": : :"memory")
-#endif
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-/* interrupt control.. */
-
-/* 
- * The use of 'barrier' in the following reflects their use as local-lock
- * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operatiosn must complete
- * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
- * includes these barriers, for example.
- */
-
-#define __cli()                                                               \
-do {                                                                          \
-    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
-    barrier();                                                                \
-} while (0)
-
-#define __sti()                                                               \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    barrier();                                                                \
-    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
-    barrier(); /* unmask then check (avoid races) */                          \
-    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
-        force_evtchn_callback();                                              \
-} while (0)
-
-#define __save_flags(x)                                                       \
-do {                                                                          \
-    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
-} while (0)
-
-#define __restore_flags(x)                                                    \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    barrier();                                                                \
-    if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) {            \
-        barrier(); /* unmask then check (avoid races) */                      \
-        if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )          \
-            force_evtchn_callback();                                          \
-    }                                                                         \
-} while (0)
-
-#define safe_halt()             ((void)0)
-
-#define __save_and_cli(x)                                                     \
-do {                                                                          \
-    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
-    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
-    barrier();                                                                \
-} while (0)
-
-#define __save_and_sti(x)                                                     \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    barrier();                                                                \
-    (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                           \
-    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
-    barrier(); /* unmask then check (avoid races) */                          \
-    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
-        force_evtchn_callback();                                              \
-} while (0)
-
-#define local_irq_save(x)      __save_and_cli(x)
-#define local_irq_restore(x)    __restore_flags(x)
-#define local_save_flags(x)     __save_flags(x)
-#define local_irq_disable()     __cli()
-#define local_irq_enable()      __sti()
-
-#define irqs_disabled()                        \
-       HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-extern int es7000_plat;
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/timer.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/timer.h
deleted file mode 100644 (file)
index add9f20..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _ASMi386_TIMER_H
-#define _ASMi386_TIMER_H
-
-/**
- * struct timer_ops - used to define a timer source
- *
- * @name: name of the timer.
- * @init: Probes and initializes the timer. Takes clock= override 
- *        string as an argument. Returns 0 on success, anything else
- *        on failure.
- * @mark_offset: called by the timer interrupt.
- * @get_offset:  called by gettimeofday(). Returns the number of microseconds
- *               since the last timer interupt.
- * @monotonic_clock: returns the number of nanoseconds since the init of the
- *                   timer.
- * @delay: delays this many clock cycles.
- */
-struct timer_opts{
-       char* name;
-       int (*init)(char *override);
-       void (*mark_offset)(void);
-       unsigned long (*get_offset)(void);
-       unsigned long long (*monotonic_clock)(void);
-       void (*delay)(unsigned long);
-};
-
-#define TICK_SIZE (tick_nsec / 1000)
-
-extern struct timer_opts* select_timer(void);
-extern void clock_fallback(void);
-
-/* Modifiers for buggy PIT handling */
-
-extern int pit_latch_buggy;
-
-extern struct timer_opts *cur_timer;
-extern int timer_ack;
-
-/* list of externed timers */
-extern struct timer_opts timer_none;
-extern struct timer_opts timer_pit;
-extern struct timer_opts timer_tsc;
-#ifdef CONFIG_X86_CYCLONE_TIMER
-extern struct timer_opts timer_cyclone;
-#endif
-extern struct timer_opts timer_xen;
-
-extern unsigned long calibrate_tsc(void);
-extern void init_cpu_khz(void);
-#ifdef CONFIG_HPET_TIMER
-extern struct timer_opts timer_hpet;
-extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
-#endif
-
-#ifdef CONFIG_X86_PM_TIMER
-extern struct timer_opts timer_pmtmr;
-#endif
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/tlbflush.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
deleted file mode 100644 (file)
index bb1c124..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef _I386_TLBFLUSH_H
-#define _I386_TLBFLUSH_H
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <asm/processor.h>
-
-#define __flush_tlb() do {                                             \
-       xen_tlb_flush();                                                \
-} while (/*CONSTCOND*/0)
-
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-#define __flush_tlb_global()                                           \
-       do {                                                            \
-               xen_tlb_flush();                                        \
-       } while (0)
-
-extern unsigned long pgkern_mask;
-
-# define __flush_tlb_all()                                             \
-       do {                                                            \
-               if (cpu_has_pge)                                        \
-                       __flush_tlb_global();                           \
-               else                                                    \
-                       __flush_tlb();                                  \
-       } while (0)
-
-#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-
-#define __flush_tlb_single(addr) do {                                  \
-       xen_invlpg(addr);                                               \
-} while (/* CONSTCOND */0)
-
-# define __flush_tlb_one(addr) __flush_tlb_single(addr)
-
-/*
- * TLB flushing:
- *
- *  - flush_tlb() flushes the current mm struct TLBs
- *  - flush_tlb_all() flushes all processes TLBs
- *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
- *  - flush_tlb_page(vma, vmaddr) flushes one page
- *  - flush_tlb_range(vma, start, end) flushes a range of pages
- *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * ..but the i386 has somewhat limited tlb flushing capabilities,
- * and page-granular flushes are available only on i486 and up.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-       if (mm == current->active_mm)
-               __flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-       unsigned long addr)
-{
-       if (vma->vm_mm == current->active_mm)
-               __flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-       unsigned long start, unsigned long end)
-{
-       if (vma->vm_mm == current->active_mm)
-               __flush_tlb();
-}
-
-#else
-
-#include <asm/smp.h>
-
-#define local_flush_tlb() \
-       __flush_tlb()
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-
-#define flush_tlb()    flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-{
-       flush_tlb_mm(vma->vm_mm);
-}
-
-#define TLBSTATE_OK    1
-#define TLBSTATE_LAZY  2
-
-struct tlb_state
-{
-       struct mm_struct *active_mm;
-       int state;
-       char __cacheline_padding[L1_CACHE_BYTES-8];
-};
-extern struct tlb_state cpu_tlbstate[NR_CPUS];
-
-
-#endif
-
-#define flush_tlb_kernel_range(start, end) flush_tlb_all()
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
-                                     unsigned long start, unsigned long end)
-{
-       /* i386 does not keep any page table caches in TLB */
-}
-
-#endif /* _I386_TLBFLUSH_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/vga.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/vga.h
deleted file mode 100644 (file)
index 14b8209..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *     Access to VGA videoram
- *
- *     (c) 1998 Martin Mares <mj@ucw.cz>
- */
-
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-/*
- *     On the PC, we can just recalculate addresses and then
- *     access the videoram directly without any black magic.
- */
-
-#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
-
-#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/xor.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/xor.h
deleted file mode 100644 (file)
index 79a45de..0000000
+++ /dev/null
@@ -1,884 +0,0 @@
-/*
- * include/asm-i386/xor.h
- *
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * High-speed RAID5 checksumming functions utilizing MMX instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-#define LD(x,y)                "       movq   8*("#x")(%1), %%mm"#y"   ;\n"
-#define ST(x,y)                "       movq %%mm"#y",   8*("#x")(%1)   ;\n"
-#define XO1(x,y)       "       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
-#define XO2(x,y)       "       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
-#define XO3(x,y)       "       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
-#define XO4(x,y)       "       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
-
-#include <asm/i387.h>
-
-static void
-xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
-       unsigned long lines = bytes >> 7;
-
-       kernel_fpu_begin();
-
-       __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-       LD(i,0)                                 \
-               LD(i+1,1)                       \
-                       LD(i+2,2)               \
-                               LD(i+3,3)       \
-       XO1(i,0)                                \
-       ST(i,0)                                 \
-               XO1(i+1,1)                      \
-               ST(i+1,1)                       \
-                       XO1(i+2,2)              \
-                       ST(i+2,2)               \
-                               XO1(i+3,3)      \
-                               ST(i+3,3)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-       BLOCK(0)
-       BLOCK(4)
-       BLOCK(8)
-       BLOCK(12)
-
-       "       addl $128, %1         ;\n"
-       "       addl $128, %2         ;\n"
-       "       decl %0               ;\n"
-       "       jnz 1b                ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2)
-       :
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-static void
-xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-             unsigned long *p3)
-{
-       unsigned long lines = bytes >> 7;
-
-       kernel_fpu_begin();
-
-       __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-       LD(i,0)                                 \
-               LD(i+1,1)                       \
-                       LD(i+2,2)               \
-                               LD(i+3,3)       \
-       XO1(i,0)                                \
-               XO1(i+1,1)                      \
-                       XO1(i+2,2)              \
-                               XO1(i+3,3)      \
-       XO2(i,0)                                \
-       ST(i,0)                                 \
-               XO2(i+1,1)                      \
-               ST(i+1,1)                       \
-                       XO2(i+2,2)              \
-                       ST(i+2,2)               \
-                               XO2(i+3,3)      \
-                               ST(i+3,3)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-       BLOCK(0)
-       BLOCK(4)
-       BLOCK(8)
-       BLOCK(12)
-
-       "       addl $128, %1         ;\n"
-       "       addl $128, %2         ;\n"
-       "       addl $128, %3         ;\n"
-       "       decl %0               ;\n"
-       "       jnz 1b                ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3)
-       :
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-static void
-xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-             unsigned long *p3, unsigned long *p4)
-{
-       unsigned long lines = bytes >> 7;
-
-       kernel_fpu_begin();
-
-       __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-       LD(i,0)                                 \
-               LD(i+1,1)                       \
-                       LD(i+2,2)               \
-                               LD(i+3,3)       \
-       XO1(i,0)                                \
-               XO1(i+1,1)                      \
-                       XO1(i+2,2)              \
-                               XO1(i+3,3)      \
-       XO2(i,0)                                \
-               XO2(i+1,1)                      \
-                       XO2(i+2,2)              \
-                               XO2(i+3,3)      \
-       XO3(i,0)                                \
-       ST(i,0)                                 \
-               XO3(i+1,1)                      \
-               ST(i+1,1)                       \
-                       XO3(i+2,2)              \
-                       ST(i+2,2)               \
-                               XO3(i+3,3)      \
-                               ST(i+3,3)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-       BLOCK(0)
-       BLOCK(4)
-       BLOCK(8)
-       BLOCK(12)
-
-       "       addl $128, %1         ;\n"
-       "       addl $128, %2         ;\n"
-       "       addl $128, %3         ;\n"
-       "       addl $128, %4         ;\n"
-       "       decl %0               ;\n"
-       "       jnz 1b                ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
-       :
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-
-static void
-xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-             unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
-       unsigned long lines = bytes >> 7;
-
-       kernel_fpu_begin();
-
-       /* Make sure GCC forgets anything it knows about p4 or p5,
-          such that it won't pass to the asm volatile below a
-          register that is shared with any other variable.  That's
-          because we modify p4 and p5 there, but we can't mark them
-          as read/write, otherwise we'd overflow the 10-asm-operands
-          limit of GCC < 3.1.  */
-       __asm__ ("" : "+r" (p4), "+r" (p5));
-
-       __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-       LD(i,0)                                 \
-               LD(i+1,1)                       \
-                       LD(i+2,2)               \
-                               LD(i+3,3)       \
-       XO1(i,0)                                \
-               XO1(i+1,1)                      \
-                       XO1(i+2,2)              \
-                               XO1(i+3,3)      \
-       XO2(i,0)                                \
-               XO2(i+1,1)                      \
-                       XO2(i+2,2)              \
-                               XO2(i+3,3)      \
-       XO3(i,0)                                \
-               XO3(i+1,1)                      \
-                       XO3(i+2,2)              \
-                               XO3(i+3,3)      \
-       XO4(i,0)                                \
-       ST(i,0)                                 \
-               XO4(i+1,1)                      \
-               ST(i+1,1)                       \
-                       XO4(i+2,2)              \
-                       ST(i+2,2)               \
-                               XO4(i+3,3)      \
-                               ST(i+3,3)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-       BLOCK(0)
-       BLOCK(4)
-       BLOCK(8)
-       BLOCK(12)
-
-       "       addl $128, %1         ;\n"
-       "       addl $128, %2         ;\n"
-       "       addl $128, %3         ;\n"
-       "       addl $128, %4         ;\n"
-       "       addl $128, %5         ;\n"
-       "       decl %0               ;\n"
-       "       jnz 1b                ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3)
-       : "r" (p4), "r" (p5) 
-       : "memory");
-
-       /* p4 and p5 were modified, and now the variables are dead.
-          Clobber them just to be sure nobody does something stupid
-          like assuming they have some legal value.  */
-       __asm__ ("" : "=r" (p4), "=r" (p5));
-
-       kernel_fpu_end();
-}
-
-#undef LD
-#undef XO1
-#undef XO2
-#undef XO3
-#undef XO4
-#undef ST
-#undef BLOCK
-
-static void
-xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
-       unsigned long lines = bytes >> 6;
-
-       kernel_fpu_begin();
-
-       __asm__ __volatile__ (
-       " .align 32                  ;\n"
-       " 1:                         ;\n"
-       "       movq   (%1), %%mm0   ;\n"
-       "       movq  8(%1), %%mm1   ;\n"
-       "       pxor   (%2), %%mm0   ;\n"
-       "       movq 16(%1), %%mm2   ;\n"
-       "       movq %%mm0,   (%1)   ;\n"
-       "       pxor  8(%2), %%mm1   ;\n"
-       "       movq 24(%1), %%mm3   ;\n"
-       "       movq %%mm1,  8(%1)   ;\n"
-       "       pxor 16(%2), %%mm2   ;\n"
-       "       movq 32(%1), %%mm4   ;\n"
-       "       movq %%mm2, 16(%1)   ;\n"
-       "       pxor 24(%2), %%mm3   ;\n"
-       "       movq 40(%1), %%mm5   ;\n"
-       "       movq %%mm3, 24(%1)   ;\n"
-       "       pxor 32(%2), %%mm4   ;\n"
-       "       movq 48(%1), %%mm6   ;\n"
-       "       movq %%mm4, 32(%1)   ;\n"
-       "       pxor 40(%2), %%mm5   ;\n"
-       "       movq 56(%1), %%mm7   ;\n"
-       "       movq %%mm5, 40(%1)   ;\n"
-       "       pxor 48(%2), %%mm6   ;\n"
-       "       pxor 56(%2), %%mm7   ;\n"
-       "       movq %%mm6, 48(%1)   ;\n"
-       "       movq %%mm7, 56(%1)   ;\n"
-       
-       "       addl $64, %1         ;\n"
-       "       addl $64, %2         ;\n"
-       "       decl %0              ;\n"
-       "       jnz 1b               ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2)
-       :
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-            unsigned long *p3)
-{
-       unsigned long lines = bytes >> 6;
-
-       kernel_fpu_begin();
-
-       __asm__ __volatile__ (
-       " .align 32,0x90             ;\n"
-       " 1:                         ;\n"
-       "       movq   (%1), %%mm0   ;\n"
-       "       movq  8(%1), %%mm1   ;\n"
-       "       pxor   (%2), %%mm0   ;\n"
-       "       movq 16(%1), %%mm2   ;\n"
-       "       pxor  8(%2), %%mm1   ;\n"
-       "       pxor   (%3), %%mm0   ;\n"
-       "       pxor 16(%2), %%mm2   ;\n"
-       "       movq %%mm0,   (%1)   ;\n"
-       "       pxor  8(%3), %%mm1   ;\n"
-       "       pxor 16(%3), %%mm2   ;\n"
-       "       movq 24(%1), %%mm3   ;\n"
-       "       movq %%mm1,  8(%1)   ;\n"
-       "       movq 32(%1), %%mm4   ;\n"
-       "       movq 40(%1), %%mm5   ;\n"
-       "       pxor 24(%2), %%mm3   ;\n"
-       "       movq %%mm2, 16(%1)   ;\n"
-       "       pxor 32(%2), %%mm4   ;\n"
-       "       pxor 24(%3), %%mm3   ;\n"
-       "       pxor 40(%2), %%mm5   ;\n"
-       "       movq %%mm3, 24(%1)   ;\n"
-       "       pxor 32(%3), %%mm4   ;\n"
-       "       pxor 40(%3), %%mm5   ;\n"
-       "       movq 48(%1), %%mm6   ;\n"
-       "       movq %%mm4, 32(%1)   ;\n"
-       "       movq 56(%1), %%mm7   ;\n"
-       "       pxor 48(%2), %%mm6   ;\n"
-       "       movq %%mm5, 40(%1)   ;\n"
-       "       pxor 56(%2), %%mm7   ;\n"
-       "       pxor 48(%3), %%mm6   ;\n"
-       "       pxor 56(%3), %%mm7   ;\n"
-       "       movq %%mm6, 48(%1)   ;\n"
-       "       movq %%mm7, 56(%1)   ;\n"
-      
-       "       addl $64, %1         ;\n"
-       "       addl $64, %2         ;\n"
-       "       addl $64, %3         ;\n"
-       "       decl %0              ;\n"
-       "       jnz 1b               ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3)
-       :
-       : "memory" );
-
-       kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-            unsigned long *p3, unsigned long *p4)
-{
-       unsigned long lines = bytes >> 6;
-
-       kernel_fpu_begin();
-
-       __asm__ __volatile__ (
-       " .align 32,0x90             ;\n"
-       " 1:                         ;\n"
-       "       movq   (%1), %%mm0   ;\n"
-       "       movq  8(%1), %%mm1   ;\n"
-       "       pxor   (%2), %%mm0   ;\n"
-       "       movq 16(%1), %%mm2   ;\n"
-       "       pxor  8(%2), %%mm1   ;\n"
-       "       pxor   (%3), %%mm0   ;\n"
-       "       pxor 16(%2), %%mm2   ;\n"
-       "       pxor  8(%3), %%mm1   ;\n"
-       "       pxor   (%4), %%mm0   ;\n"
-       "       movq 24(%1), %%mm3   ;\n"
-       "       pxor 16(%3), %%mm2   ;\n"
-       "       pxor  8(%4), %%mm1   ;\n"
-       "       movq %%mm0,   (%1)   ;\n"
-       "       movq 32(%1), %%mm4   ;\n"
-       "       pxor 24(%2), %%mm3   ;\n"
-       "       pxor 16(%4), %%mm2   ;\n"
-       "       movq %%mm1,  8(%1)   ;\n"
-       "       movq 40(%1), %%mm5   ;\n"
-       "       pxor 32(%2), %%mm4   ;\n"
-       "       pxor 24(%3), %%mm3   ;\n"
-       "       movq %%mm2, 16(%1)   ;\n"
-       "       pxor 40(%2), %%mm5   ;\n"
-       "       pxor 32(%3), %%mm4   ;\n"
-       "       pxor 24(%4), %%mm3   ;\n"
-       "       movq %%mm3, 24(%1)   ;\n"
-       "       movq 56(%1), %%mm7   ;\n"
-       "       movq 48(%1), %%mm6   ;\n"
-       "       pxor 40(%3), %%mm5   ;\n"
-       "       pxor 32(%4), %%mm4   ;\n"
-       "       pxor 48(%2), %%mm6   ;\n"
-       "       movq %%mm4, 32(%1)   ;\n"
-       "       pxor 56(%2), %%mm7   ;\n"
-       "       pxor 40(%4), %%mm5   ;\n"
-       "       pxor 48(%3), %%mm6   ;\n"
-       "       pxor 56(%3), %%mm7   ;\n"
-       "       movq %%mm5, 40(%1)   ;\n"
-       "       pxor 48(%4), %%mm6   ;\n"
-       "       pxor 56(%4), %%mm7   ;\n"
-       "       movq %%mm6, 48(%1)   ;\n"
-       "       movq %%mm7, 56(%1)   ;\n"
-      
-       "       addl $64, %1         ;\n"
-       "       addl $64, %2         ;\n"
-       "       addl $64, %3         ;\n"
-       "       addl $64, %4         ;\n"
-       "       decl %0              ;\n"
-       "       jnz 1b               ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
-       :
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-            unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
-       unsigned long lines = bytes >> 6;
-
-       kernel_fpu_begin();
-
-       /* Make sure GCC forgets anything it knows about p4 or p5,
-          such that it won't pass to the asm volatile below a
-          register that is shared with any other variable.  That's
-          because we modify p4 and p5 there, but we can't mark them
-          as read/write, otherwise we'd overflow the 10-asm-operands
-          limit of GCC < 3.1.  */
-       __asm__ ("" : "+r" (p4), "+r" (p5));
-
-       __asm__ __volatile__ (
-       " .align 32,0x90             ;\n"
-       " 1:                         ;\n"
-       "       movq   (%1), %%mm0   ;\n"
-       "       movq  8(%1), %%mm1   ;\n"
-       "       pxor   (%2), %%mm0   ;\n"
-       "       pxor  8(%2), %%mm1   ;\n"
-       "       movq 16(%1), %%mm2   ;\n"
-       "       pxor   (%3), %%mm0   ;\n"
-       "       pxor  8(%3), %%mm1   ;\n"
-       "       pxor 16(%2), %%mm2   ;\n"
-       "       pxor   (%4), %%mm0   ;\n"
-       "       pxor  8(%4), %%mm1   ;\n"
-       "       pxor 16(%3), %%mm2   ;\n"
-       "       movq 24(%1), %%mm3   ;\n"
-       "       pxor   (%5), %%mm0   ;\n"
-       "       pxor  8(%5), %%mm1   ;\n"
-       "       movq %%mm0,   (%1)   ;\n"
-       "       pxor 16(%4), %%mm2   ;\n"
-       "       pxor 24(%2), %%mm3   ;\n"
-       "       movq %%mm1,  8(%1)   ;\n"
-       "       pxor 16(%5), %%mm2   ;\n"
-       "       pxor 24(%3), %%mm3   ;\n"
-       "       movq 32(%1), %%mm4   ;\n"
-       "       movq %%mm2, 16(%1)   ;\n"
-       "       pxor 24(%4), %%mm3   ;\n"
-       "       pxor 32(%2), %%mm4   ;\n"
-       "       movq 40(%1), %%mm5   ;\n"
-       "       pxor 24(%5), %%mm3   ;\n"
-       "       pxor 32(%3), %%mm4   ;\n"
-       "       pxor 40(%2), %%mm5   ;\n"
-       "       movq %%mm3, 24(%1)   ;\n"
-       "       pxor 32(%4), %%mm4   ;\n"
-       "       pxor 40(%3), %%mm5   ;\n"
-       "       movq 48(%1), %%mm6   ;\n"
-       "       movq 56(%1), %%mm7   ;\n"
-       "       pxor 32(%5), %%mm4   ;\n"
-       "       pxor 40(%4), %%mm5   ;\n"
-       "       pxor 48(%2), %%mm6   ;\n"
-       "       pxor 56(%2), %%mm7   ;\n"
-       "       movq %%mm4, 32(%1)   ;\n"
-       "       pxor 48(%3), %%mm6   ;\n"
-       "       pxor 56(%3), %%mm7   ;\n"
-       "       pxor 40(%5), %%mm5   ;\n"
-       "       pxor 48(%4), %%mm6   ;\n"
-       "       pxor 56(%4), %%mm7   ;\n"
-       "       movq %%mm5, 40(%1)   ;\n"
-       "       pxor 48(%5), %%mm6   ;\n"
-       "       pxor 56(%5), %%mm7   ;\n"
-       "       movq %%mm6, 48(%1)   ;\n"
-       "       movq %%mm7, 56(%1)   ;\n"
-      
-       "       addl $64, %1         ;\n"
-       "       addl $64, %2         ;\n"
-       "       addl $64, %3         ;\n"
-       "       addl $64, %4         ;\n"
-       "       addl $64, %5         ;\n"
-       "       decl %0              ;\n"
-       "       jnz 1b               ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3)
-       : "r" (p4), "r" (p5)
-       : "memory");
-
-       /* p4 and p5 were modified, and now the variables are dead.
-          Clobber them just to be sure nobody does something stupid
-          like assuming they have some legal value.  */
-       __asm__ ("" : "=r" (p4), "=r" (p5));
-
-       kernel_fpu_end();
-}
-
-static struct xor_block_template xor_block_pII_mmx = {
-       .name = "pII_mmx",
-       .do_2 = xor_pII_mmx_2,
-       .do_3 = xor_pII_mmx_3,
-       .do_4 = xor_pII_mmx_4,
-       .do_5 = xor_pII_mmx_5,
-};
-
-static struct xor_block_template xor_block_p5_mmx = {
-       .name = "p5_mmx",
-       .do_2 = xor_p5_mmx_2,
-       .do_3 = xor_p5_mmx_3,
-       .do_4 = xor_p5_mmx_4,
-       .do_5 = xor_p5_mmx_5,
-};
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define XMMS_SAVE do {                         \
-       preempt_disable();                      \
-       if (!(current_thread_info()->status & TS_USEDFPU))      \
-               clts();                         \
-       __asm__ __volatile__ (                  \
-               "movups %%xmm0,(%1)     ;\n\t"  \
-               "movups %%xmm1,0x10(%1) ;\n\t"  \
-               "movups %%xmm2,0x20(%1) ;\n\t"  \
-               "movups %%xmm3,0x30(%1) ;\n\t"  \
-               : "=&r" (cr0)                   \
-               : "r" (xmm_save)                \
-               : "memory");                    \
-} while(0)
-
-#define XMMS_RESTORE do {                      \
-       __asm__ __volatile__ (                  \
-               "sfence                 ;\n\t"  \
-               "movups (%1),%%xmm0     ;\n\t"  \
-               "movups 0x10(%1),%%xmm1 ;\n\t"  \
-               "movups 0x20(%1),%%xmm2 ;\n\t"  \
-               "movups 0x30(%1),%%xmm3 ;\n\t"  \
-               :                               \
-               : "r" (cr0), "r" (xmm_save)     \
-               : "memory");                    \
-       if (!(current_thread_info()->status & TS_USEDFPU))      \
-               stts();                         \
-       preempt_enable();                       \
-} while(0)
-
-#define ALIGN16 __attribute__((aligned(16)))
-
-#define OFFS(x)                "16*("#x")"
-#define PF_OFFS(x)     "256+16*("#x")"
-#define        PF0(x)          "       prefetchnta "PF_OFFS(x)"(%1)            ;\n"
-#define LD(x,y)                "       movaps   "OFFS(x)"(%1), %%xmm"#y"       ;\n"
-#define ST(x,y)                "       movaps %%xmm"#y",   "OFFS(x)"(%1)       ;\n"
-#define PF1(x)         "       prefetchnta "PF_OFFS(x)"(%2)            ;\n"
-#define PF2(x)         "       prefetchnta "PF_OFFS(x)"(%3)            ;\n"
-#define PF3(x)         "       prefetchnta "PF_OFFS(x)"(%4)            ;\n"
-#define PF4(x)         "       prefetchnta "PF_OFFS(x)"(%5)            ;\n"
-#define PF5(x)         "       prefetchnta "PF_OFFS(x)"(%6)            ;\n"
-#define XO1(x,y)       "       xorps   "OFFS(x)"(%2), %%xmm"#y"        ;\n"
-#define XO2(x,y)       "       xorps   "OFFS(x)"(%3), %%xmm"#y"        ;\n"
-#define XO3(x,y)       "       xorps   "OFFS(x)"(%4), %%xmm"#y"        ;\n"
-#define XO4(x,y)       "       xorps   "OFFS(x)"(%5), %%xmm"#y"        ;\n"
-#define XO5(x,y)       "       xorps   "OFFS(x)"(%6), %%xmm"#y"        ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
-        unsigned long lines = bytes >> 8;
-       char xmm_save[16*4] ALIGN16;
-       int cr0;
-
-       XMMS_SAVE;
-
-        __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-               LD(i,0)                                 \
-                       LD(i+1,1)                       \
-               PF1(i)                                  \
-                               PF1(i+2)                \
-                               LD(i+2,2)               \
-                                       LD(i+3,3)       \
-               PF0(i+4)                                \
-                               PF0(i+6)                \
-               XO1(i,0)                                \
-                       XO1(i+1,1)                      \
-                               XO1(i+2,2)              \
-                                       XO1(i+3,3)      \
-               ST(i,0)                                 \
-                       ST(i+1,1)                       \
-                               ST(i+2,2)               \
-                                       ST(i+3,3)       \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-        " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2)
-       :
-        : "memory");
-
-       XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3)
-{
-        unsigned long lines = bytes >> 8;
-       char xmm_save[16*4] ALIGN16;
-       int cr0;
-
-       XMMS_SAVE;
-
-        __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i+2)                \
-               LD(i,0)                                 \
-                       LD(i+1,1)                       \
-                               LD(i+2,2)               \
-                                       LD(i+3,3)       \
-               PF2(i)                                  \
-                               PF2(i+2)                \
-               PF0(i+4)                                \
-                               PF0(i+6)                \
-               XO1(i,0)                                \
-                       XO1(i+1,1)                      \
-                               XO1(i+2,2)              \
-                                       XO1(i+3,3)      \
-               XO2(i,0)                                \
-                       XO2(i+1,1)                      \
-                               XO2(i+2,2)              \
-                                       XO2(i+3,3)      \
-               ST(i,0)                                 \
-                       ST(i+1,1)                       \
-                               ST(i+2,2)               \
-                                       ST(i+3,3)       \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-        " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       addl $256, %3           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r"(p2), "+r"(p3)
-       :
-        : "memory" );
-
-       XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3, unsigned long *p4)
-{
-        unsigned long lines = bytes >> 8;
-       char xmm_save[16*4] ALIGN16;
-       int cr0;
-
-       XMMS_SAVE;
-
-        __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i+2)                \
-               LD(i,0)                                 \
-                       LD(i+1,1)                       \
-                               LD(i+2,2)               \
-                                       LD(i+3,3)       \
-               PF2(i)                                  \
-                               PF2(i+2)                \
-               XO1(i,0)                                \
-                       XO1(i+1,1)                      \
-                               XO1(i+2,2)              \
-                                       XO1(i+3,3)      \
-               PF3(i)                                  \
-                               PF3(i+2)                \
-               PF0(i+4)                                \
-                               PF0(i+6)                \
-               XO2(i,0)                                \
-                       XO2(i+1,1)                      \
-                               XO2(i+2,2)              \
-                                       XO2(i+3,3)      \
-               XO3(i,0)                                \
-                       XO3(i+1,1)                      \
-                               XO3(i+2,2)              \
-                                       XO3(i+3,3)      \
-               ST(i,0)                                 \
-                       ST(i+1,1)                       \
-                               ST(i+2,2)               \
-                                       ST(i+3,3)       \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-        " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       addl $256, %3           ;\n"
-        "       addl $256, %4           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
-       :
-        : "memory" );
-
-       XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
-        unsigned long lines = bytes >> 8;
-       char xmm_save[16*4] ALIGN16;
-       int cr0;
-
-       XMMS_SAVE;
-
-       /* Make sure GCC forgets anything it knows about p4 or p5,
-          such that it won't pass to the asm volatile below a
-          register that is shared with any other variable.  That's
-          because we modify p4 and p5 there, but we can't mark them
-          as read/write, otherwise we'd overflow the 10-asm-operands
-          limit of GCC < 3.1.  */
-       __asm__ ("" : "+r" (p4), "+r" (p5));
-
-        __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i+2)                \
-               LD(i,0)                                 \
-                       LD(i+1,1)                       \
-                               LD(i+2,2)               \
-                                       LD(i+3,3)       \
-               PF2(i)                                  \
-                               PF2(i+2)                \
-               XO1(i,0)                                \
-                       XO1(i+1,1)                      \
-                               XO1(i+2,2)              \
-                                       XO1(i+3,3)      \
-               PF3(i)                                  \
-                               PF3(i+2)                \
-               XO2(i,0)                                \
-                       XO2(i+1,1)                      \
-                               XO2(i+2,2)              \
-                                       XO2(i+3,3)      \
-               PF4(i)                                  \
-                               PF4(i+2)                \
-               PF0(i+4)                                \
-                               PF0(i+6)                \
-               XO3(i,0)                                \
-                       XO3(i+1,1)                      \
-                               XO3(i+2,2)              \
-                                       XO3(i+3,3)      \
-               XO4(i,0)                                \
-                       XO4(i+1,1)                      \
-                               XO4(i+2,2)              \
-                                       XO4(i+3,3)      \
-               ST(i,0)                                 \
-                       ST(i+1,1)                       \
-                               ST(i+2,2)               \
-                                       ST(i+3,3)       \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-        " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-        "       addl $256, %1           ;\n"
-        "       addl $256, %2           ;\n"
-        "       addl $256, %3           ;\n"
-        "       addl $256, %4           ;\n"
-        "       addl $256, %5           ;\n"
-        "       decl %0                 ;\n"
-        "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3)
-       : "r" (p4), "r" (p5)
-       : "memory");
-
-       /* p4 and p5 were modified, and now the variables are dead.
-          Clobber them just to be sure nobody does something stupid
-          like assuming they have some legal value.  */
-       __asm__ ("" : "=r" (p4), "=r" (p5));
-
-       XMMS_RESTORE;
-}
-
-static struct xor_block_template xor_block_pIII_sse = {
-        .name = "pIII_sse",
-        .do_2 =  xor_sse_2,
-        .do_3 =  xor_sse_3,
-        .do_4 =  xor_sse_4,
-        .do_5 = xor_sse_5,
-};
-
-/* Also try the generic routines.  */
-#include <asm-generic/xor.h>
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-       do {                                            \
-               xor_speed(&xor_block_8regs);            \
-               xor_speed(&xor_block_8regs_p);          \
-               xor_speed(&xor_block_32regs);           \
-               xor_speed(&xor_block_32regs_p);         \
-               if (cpu_has_xmm)                        \
-                       xor_speed(&xor_block_pIII_sse); \
-               if (cpu_has_mmx) {                      \
-                       xor_speed(&xor_block_pII_mmx);  \
-                       xor_speed(&xor_block_p5_mmx);   \
-               }                                       \
-       } while (0)
-
-/* We force the use of the SSE xor block because it can write around L2.
-   We may also be able to load into the L1 only depending on how the cpu
-   deals with a load to a line that is being prefetched.  */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
-       (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/ctrl_if.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/ctrl_if.h
deleted file mode 100644 (file)
index 9b01d85..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-/******************************************************************************
- * ctrl_if.h
- * 
- * Management functions for special interface to the domain controller.
- * 
- * Copyright (c) 2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_XEN__CTRL_IF_H__
-#define __ASM_XEN__CTRL_IF_H__
-
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/queues.h>
-
-typedef control_msg_t ctrl_msg_t;
-
-/*
- * Callback function type. Called for asynchronous processing of received
- * request messages, and responses to previously-transmitted request messages.
- * The parameters are (@msg, @id).
- *  @msg: Original request/response message (not a copy). The message can be
- *        modified in-place by the handler (e.g., a response callback can
- *        turn a request message into a response message in place). The message
- *        is no longer accessible after the callback handler returns -- if the
- *        message is required to persist for longer then it must be copied.
- *  @id:  (Response callbacks only) The 'id' that was specified when the
- *        original request message was queued for transmission.
- */
-typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
-
-/*
- * Send @msg to the domain controller. Execute @hnd when a response is
- * received, passing the response message and the specified @id. This
- * operation will not block: it will return -EAGAIN if there is no space.
- * Notes:
- *  1. The @msg is copied if it is transmitted and so can be freed after this
- *     function returns.
- *  2. If @hnd is NULL then no callback is executed.
- */
-int
-ctrl_if_send_message_noblock(
-    ctrl_msg_t *msg, 
-    ctrl_msg_handler_t hnd,
-    unsigned long id);
-
-/*
- * Send @msg to the domain controller. Execute @hnd when a response is
- * received, passing the response message and the specified @id. This
- * operation will block until the message is sent, or a signal is received
- * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
- * Notes:
- *  1. The @msg is copied if it is transmitted and so can be freed after this
- *     function returns.
- *  2. If @hnd is NULL then no callback is executed.
- */
-int
-ctrl_if_send_message_block(
-    ctrl_msg_t *msg, 
-    ctrl_msg_handler_t hnd, 
-    unsigned long id, 
-    long wait_state);
-
-/*
- * Send @msg to the domain controller. Block until the response is received,
- * and then copy it into the provided buffer, @rmsg.
- */
-int
-ctrl_if_send_message_and_get_response(
-    ctrl_msg_t *msg,
-    ctrl_msg_t *rmsg,
-    long wait_state);
-
-/*
- * Request a callback when there is /possibly/ space to immediately send a
- * message to the domain controller. This function returns 0 if there is
- * already space to trasnmit a message --- in this case the callback task /may/
- * still be executed. If this function returns 1 then the callback /will/ be
- * executed when space becomes available.
- */
-int
-ctrl_if_enqueue_space_callback(
-    struct tq_struct *task);
-
-/*
- * Send a response (@msg) to a message from the domain controller. This will 
- * never block.
- * Notes:
- *  1. The @msg is copied and so can be freed after this function returns.
- *  2. The @msg may be the original request message, modified in-place.
- */
-void
-ctrl_if_send_response(
-    ctrl_msg_t *msg);
-
-/*
- * Register a receiver for typed messages from the domain controller. The 
- * handler (@hnd) is called for every received message of specified @type.
- * Returns TRUE (non-zero) if the handler was successfully registered.
- * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
- * occur in a context in which it is safe to yield (i.e., process context).
- */
-#define CALLBACK_IN_BLOCKING_CONTEXT 1
-int ctrl_if_register_receiver(
-    u8 type, 
-    ctrl_msg_handler_t hnd,
-    unsigned int flags);
-
-/*
- * Unregister a receiver for typed messages from the domain controller. The 
- * handler (@hnd) will not be executed after this function returns.
- */
-void
-ctrl_if_unregister_receiver(
-    u8 type, ctrl_msg_handler_t hnd);
-
-/* Suspend/resume notifications. */
-void ctrl_if_suspend(void);
-void ctrl_if_resume(void);
-
-/* Start-of-day setup. */
-void ctrl_if_init(void);
-
-/*
- * Returns TRUE if there are no outstanding message requests at the domain
- * controller. This can be used to ensure that messages have really flushed
- * through when it is not possible to use the response-callback interface.
- * WARNING: If other subsystems are using the control interface then this
- * function might never return TRUE!
- */
-int ctrl_if_transmitter_empty(void);  /* !! DANGEROUS FUNCTION !! */
-
-/*
- * Manually discard response messages from the domain controller. 
- * WARNING: This is usually done automatically -- this function should only
- * be called when normal interrupt mechanisms are disabled!
- */
-void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
-
-#endif /* __ASM_XEN__CONTROL_IF_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/evtchn.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/evtchn.h
deleted file mode 100644 (file)
index 2ad902e..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/******************************************************************************
- * evtchn.h
- * 
- * Communication via Xen event channels.
- * Also definitions for the device that demuxes notifications to userspace.
- * 
- * Copyright (c) 2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_EVTCHN_H__
-#define __ASM_EVTCHN_H__
-
-#include <linux/config.h>
-#include <asm-xen/hypervisor.h>
-#include <asm/ptrace.h>
-#include <asm/synch_bitops.h>
-#include <asm/hypervisor-ifs/event_channel.h>
-
-/*
- * LOW-LEVEL DEFINITIONS
- */
-
-/* Force a proper event-channel callback from Xen. */
-void force_evtchn_callback(void);
-
-/* Entry point for notifications into Linux subsystems. */
-void evtchn_do_upcall(struct pt_regs *regs);
-
-/* Entry point for notifications into the userland character device. */
-void evtchn_device_upcall(int port);
-
-static inline void mask_evtchn(int port)
-{
-    shared_info_t *s = HYPERVISOR_shared_info;
-    synch_set_bit(port, &s->evtchn_mask[0]);
-}
-
-static inline void unmask_evtchn(int port)
-{
-    shared_info_t *s = HYPERVISOR_shared_info;
-
-    synch_clear_bit(port, &s->evtchn_mask[0]);
-
-    /*
-     * The following is basically the equivalent of 'hw_resend_irq'. Just like
-     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
-     */
-    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
-         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
-    {
-        s->vcpu_data[0].evtchn_upcall_pending = 1;
-        if ( !s->vcpu_data[0].evtchn_upcall_mask )
-            force_evtchn_callback();
-    }
-}
-
-static inline void clear_evtchn(int port)
-{
-    shared_info_t *s = HYPERVISOR_shared_info;
-    synch_clear_bit(port, &s->evtchn_pending[0]);
-}
-
-static inline void notify_via_evtchn(int port)
-{
-    evtchn_op_t op;
-    op.cmd = EVTCHNOP_send;
-    op.u.send.local_port = port;
-    (void)HYPERVISOR_event_channel_op(&op);
-}
-
-/*
- * CHARACTER-DEVICE DEFINITIONS
- */
-
-/* /dev/xen/evtchn resides at device number major=10, minor=201 */
-#define EVTCHN_MINOR 201
-
-/* /dev/xen/evtchn ioctls: */
-/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
-#define EVTCHN_RESET  _IO('E', 1)
-/* EVTCHN_BIND: Bind to teh specified event-channel port. */
-#define EVTCHN_BIND   _IO('E', 2)
-/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
-#define EVTCHN_UNBIND _IO('E', 3)
-
-#endif /* __ASM_EVTCHN_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/gnttab.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/gnttab.h
deleted file mode 100644 (file)
index 6e52923..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/******************************************************************************
- * gnttab.h
- * 
- * Two sets of functionality:
- * 1. Granting foreign access to our memory reservation.
- * 2. Accessing others' memory reservations via grant references.
- * (i.e., mechanisms for both sender and recipient of grant references)
- * 
- * Copyright (c) 2004, K A Fraser
- */
-
-#ifndef __ASM_GNTTAB_H__
-#define __ASM_GNTTAB_H__
-
-#include <linux/config.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/hypervisor-ifs/grant_table.h>
-
-int
-gnttab_grant_foreign_access(
-    domid_t domid, unsigned long frame, int readonly);
-
-void
-gnttab_end_foreign_access(
-    grant_ref_t ref, int readonly);
-
-int
-gnttab_grant_foreign_transfer(
-    domid_t domid);
-
-unsigned long
-gnttab_end_foreign_transfer(
-    grant_ref_t ref);
-
-#endif /* __ASM_GNTTAB_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h
deleted file mode 100644 (file)
index 3054b30..0000000
+++ /dev/null
@@ -1,650 +0,0 @@
-/******************************************************************************
- * hypervisor.h
- * 
- * Linux-specific hypervisor handling.
- * 
- * Copyright (c) 2002-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __HYPERVISOR_H__
-#define __HYPERVISOR_H__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-#include <asm/hypervisor-ifs/dom0_ops.h>
-#include <asm/hypervisor-ifs/io/domain_controller.h>
-#include <asm/ptrace.h>
-#include <asm/page.h>
-
-/* arch/xen/i386/kernel/setup.c */
-union xen_start_info_union
-{
-    start_info_t xen_start_info;
-    char padding[512];
-};
-extern union xen_start_info_union xen_start_info_union;
-#define xen_start_info (xen_start_info_union.xen_start_info)
-
-/* arch/xen/kernel/process.c */
-void xen_cpu_idle (void);
-
-/* arch/xen/i386/kernel/hypervisor.c */
-void do_hypervisor_callback(struct pt_regs *regs);
-
-/* arch/xen/i386/mm/init.c */
-/* NOTE: caller must call flush_page_update_queue() */
-#define PROT_ON  1
-#define PROT_OFF 0
-void /* __init */ protect_page(pgd_t *dpgd, void *page, int mode);
-void /* __init */ protect_pagetable(pgd_t *dpgd, pgd_t *spgd, int mode);
-
-/* arch/xen/i386/kernel/head.S */
-void lgdt_finish(void);
-
-/* arch/xen/i386/mm/hypervisor.c */
-/*
- * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
- * be MACHINE addresses.
- */
-
-extern unsigned int mmu_update_queue_idx;
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val);
-void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
-void queue_pt_switch(unsigned long ptr);
-void queue_tlb_flush(void);
-void queue_invlpg(unsigned long ptr);
-void queue_pgd_pin(unsigned long ptr);
-void queue_pgd_unpin(unsigned long ptr);
-void queue_pte_pin(unsigned long ptr);
-void queue_pte_unpin(unsigned long ptr);
-void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-void queue_machphys_update(unsigned long mfn, unsigned long pfn);
-void xen_l1_entry_update(pte_t *ptr, unsigned long val);
-void xen_l2_entry_update(pmd_t *ptr, unsigned long val);
-void xen_pt_switch(unsigned long ptr);
-void xen_tlb_flush(void);
-void xen_invlpg(unsigned long ptr);
-void xen_pgd_pin(unsigned long ptr);
-void xen_pgd_unpin(unsigned long ptr);
-void xen_pte_pin(unsigned long ptr);
-void xen_pte_unpin(unsigned long ptr);
-void xen_set_ldt(unsigned long ptr, unsigned long bytes);
-void xen_machphys_update(unsigned long mfn, unsigned long pfn);
-#define MMU_UPDATE_DEBUG 0
-
-#if MMU_UPDATE_DEBUG > 0
-typedef struct {
-    void *ptr;
-    unsigned long val, pteval;
-    void *ptep;
-    int line; char *file;
-} page_update_debug_t;
-extern page_update_debug_t update_debug_queue[];
-#define queue_l1_entry_update(_p,_v) ({                           \
- update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
- update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
- queue_l1_entry_update((_p),(_v));                                \
-})
-#define queue_l2_entry_update(_p,_v) ({                           \
- update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
- update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
- queue_l2_entry_update((_p),(_v));                                \
-})
-#endif
-
-#if MMU_UPDATE_DEBUG > 1
-#if MMU_UPDATE_DEBUG > 2
-#undef queue_l1_entry_update
-#define queue_l1_entry_update(_p,_v) ({                           \
- update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
- update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
- printk("L1 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
-        (_p), virt_to_machine(_p), pte_val(*(_p)),                 \
-        (unsigned long)(_v));                                     \
- queue_l1_entry_update((_p),(_v));                                \
-})
-#endif
-#undef queue_l2_entry_update
-#define queue_l2_entry_update(_p,_v) ({                           \
- update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
- update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
- printk("L2 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
-        (_p), virt_to_machine(_p), pmd_val(*_p),                  \
-        (unsigned long)(_v));                                     \
- queue_l2_entry_update((_p),(_v));                                \
-})
-#define queue_pt_switch(_p) ({                                    \
- printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p));     \
- queue_pt_switch(_p);                                             \
-})   
-#define queue_tlb_flush() ({                                      \
- printk("TLB FLUSH %s %d\n", __FILE__, __LINE__);                 \
- queue_tlb_flush();                                               \
-})   
-#define queue_invlpg(_p) ({                                       \
- printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p));       \
- queue_invlpg(_p);                                                \
-})   
-#define queue_pgd_pin(_p) ({                                      \
- printk("PGD PIN %s %d: %08lx/%08lx\n", __FILE__, __LINE__, (_p), \
-       phys_to_machine(_p));                                     \
- queue_pgd_pin(_p);                                               \
-})   
-#define queue_pgd_unpin(_p) ({                                    \
- printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
- queue_pgd_unpin(_p);                                             \
-})   
-#define queue_pte_pin(_p) ({                                      \
- printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
- queue_pte_pin(_p);                                               \
-})   
-#define queue_pte_unpin(_p) ({                                    \
- printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
- queue_pte_unpin(_p);                                             \
-})   
-#define queue_set_ldt(_p,_l) ({                                        \
- printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
- queue_set_ldt((_p), (_l));                                            \
-})   
-#endif
-
-void _flush_page_update_queue(void);
-static inline int flush_page_update_queue(void)
-{
-    unsigned int idx = mmu_update_queue_idx;
-    if ( idx != 0 ) _flush_page_update_queue();
-    return idx;
-}
-#define xen_flush_page_update_queue() (_flush_page_update_queue())
-#define XEN_flush_page_update_queue() (_flush_page_update_queue())
-void MULTICALL_flush_page_update_queue(void);
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-/* Allocate a contiguous empty region of low memory. Return virtual start. */
-unsigned long allocate_empty_lowmem_region(unsigned long pages);
-/* Deallocate a contiguous region of low memory. Return it to the allocator. */
-void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
-#endif
-
-/*
- * Assembler stubs for hyper-calls.
- */
-
-static inline int
-HYPERVISOR_set_trap_table(
-    trap_info_t *table)
-{
-    int ret;
-    unsigned long ignore;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ignore)
-       : "0" (__HYPERVISOR_set_trap_table), "1" (table)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_mmu_update(
-    mmu_update_t *req, int count, int *success_count)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-       : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
-         "3" (success_count)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_set_gdt(
-    unsigned long *frame_list, int entries)
-{
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
-       : "memory" );
-
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_stack_switch(
-    unsigned long ss, unsigned long esp)
-{
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_set_callbacks(
-    unsigned long event_selector, unsigned long event_address,
-    unsigned long failsafe_selector, unsigned long failsafe_address)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-       : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
-         "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_fpu_taskswitch(
-    void)
-{
-    int ret;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_yield(
-    void)
-{
-    int ret;
-    unsigned long ign;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_block(
-    void)
-{
-    int ret;
-    unsigned long ign1;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1)
-       : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_shutdown(
-    void)
-{
-    int ret;
-    unsigned long ign1;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1)
-       : "0" (__HYPERVISOR_sched_op),
-         "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
-        : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_reboot(
-    void)
-{
-    int ret;
-    unsigned long ign1;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1)
-       : "0" (__HYPERVISOR_sched_op),
-         "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
-        : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_suspend(
-    unsigned long srec)
-{
-    int ret;
-    unsigned long ign1, ign2;
-
-    /* NB. On suspend, control software expects a suspend record in %esi. */
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=S" (ign2)
-       : "0" (__HYPERVISOR_sched_op),
-        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
-        "S" (srec) : "memory");
-
-    return ret;
-}
-
-static inline long
-HYPERVISOR_set_timer_op(
-    u64 timeout)
-{
-    int ret;
-    unsigned long timeout_hi = (unsigned long)(timeout>>32);
-    unsigned long timeout_lo = (unsigned long)timeout;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
-       : "memory");
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_dom0_op(
-    dom0_op_t *dom0_op)
-{
-    int ret;
-    unsigned long ign1;
-
-    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1)
-       : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
-       : "memory");
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_set_debugreg(
-    int reg, unsigned long value)
-{
-    int ret;
-    unsigned long ign1, ign2;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
-       : "memory" );
-
-    return ret;
-}
-
-static inline unsigned long
-HYPERVISOR_get_debugreg(
-    int reg)
-{
-    unsigned long ret;
-    unsigned long ign;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_update_descriptor(
-    unsigned long ma, unsigned long word1, unsigned long word2)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-       : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
-         "3" (word2)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_set_fast_trap(
-    int idx)
-{
-    int ret;
-    unsigned long ign;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_dom_mem_op(
-    unsigned int op, unsigned long *extent_list,
-    unsigned long nr_extents, unsigned int extent_order)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4, ign5;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
-         "=D" (ign5)
-       : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
-         "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
-        : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_multicall(
-    void *call_list, int nr_calls)
-{
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_update_va_mapping(
-    unsigned long page_nr, pte_t new_val, unsigned long flags)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-       : "0" (__HYPERVISOR_update_va_mapping), 
-          "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
-       : "memory" );
-
-    if ( unlikely(ret < 0) )
-    {
-        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
-               page_nr, (new_val).pte_low, flags);
-        BUG();
-    }
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_event_channel_op(
-    void *op)
-{
-    int ret;
-    unsigned long ignore;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ignore)
-       : "0" (__HYPERVISOR_event_channel_op), "1" (op)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_xen_version(
-    int cmd)
-{
-    int ret;
-    unsigned long ignore;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ignore)
-       : "0" (__HYPERVISOR_xen_version), "1" (cmd)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_console_io(
-    int cmd, int count, char *str)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-       : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_physdev_op(
-    void *physdev_op)
-{
-    int ret;
-    unsigned long ign;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_grant_table_op(
-    unsigned int cmd, void *uop, unsigned int count)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-       : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
-       : "memory" );
-
-    return ret;
-}
-
-static inline int
-HYPERVISOR_update_va_mapping_otherdomain(
-    unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
-{
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-       : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
-          "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
-        "memory" );
-    
-    return ret;
-}
-
-static inline int
-HYPERVISOR_vm_assist(
-    unsigned int cmd, unsigned int type)
-{
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
-       : "memory" );
-
-    return ret;
-}
-
-#endif /* __HYPERVISOR_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/multicall.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/multicall.h
deleted file mode 100644 (file)
index ca169b5..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/******************************************************************************
- * multicall.h
- * 
- * Copyright (c) 2003-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __MULTICALL_H__
-#define __MULTICALL_H__
-
-#include <asm-xen/hypervisor.h>
-
-extern multicall_entry_t multicall_list[];
-extern int nr_multicall_ents;
-
-static inline void queue_multicall0(unsigned long op)
-{
-    int i = nr_multicall_ents;
-    multicall_list[i].op      = op;
-    nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall1(unsigned long op, unsigned long arg1)
-{
-    int i = nr_multicall_ents;
-    multicall_list[i].op      = op;
-    multicall_list[i].args[0] = arg1;
-    nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall2(
-    unsigned long op, unsigned long arg1, unsigned long arg2)
-{
-    int i = nr_multicall_ents;
-    multicall_list[i].op      = op;
-    multicall_list[i].args[0] = arg1;
-    multicall_list[i].args[1] = arg2;
-    nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall3(
-    unsigned long op, unsigned long arg1, unsigned long arg2,
-    unsigned long arg3)
-{
-    int i = nr_multicall_ents;
-    multicall_list[i].op      = op;
-    multicall_list[i].args[0] = arg1;
-    multicall_list[i].args[1] = arg2;
-    multicall_list[i].args[2] = arg3;
-    nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall4(
-    unsigned long op, unsigned long arg1, unsigned long arg2,
-    unsigned long arg3, unsigned long arg4)
-{
-    int i = nr_multicall_ents;
-    multicall_list[i].op      = op;
-    multicall_list[i].args[0] = arg1;
-    multicall_list[i].args[1] = arg2;
-    multicall_list[i].args[2] = arg3;
-    multicall_list[i].args[3] = arg4;
-    nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall5(
-    unsigned long op, unsigned long arg1, unsigned long arg2,
-    unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
-    int i = nr_multicall_ents;
-    multicall_list[i].op      = op;
-    multicall_list[i].args[0] = arg1;
-    multicall_list[i].args[1] = arg2;
-    multicall_list[i].args[2] = arg3;
-    multicall_list[i].args[3] = arg4;
-    multicall_list[i].args[4] = arg5;
-    nr_multicall_ents = i+1;
-}
-
-static inline void execute_multicall_list(void)
-{
-    if ( unlikely(nr_multicall_ents == 0) ) return;
-    (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
-    nr_multicall_ents = 0;
-}
-
-#endif /* __MULTICALL_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/proc_cmd.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/proc_cmd.h
deleted file mode 100644 (file)
index 4292427..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/******************************************************************************
- * proc_cmd.h
- * 
- * Interface to /proc/cmd and /proc/xen/privcmd.
- */
-
-#ifndef __PROC_CMD_H__
-#define __PROC_CMD_H__
-
-typedef struct privcmd_hypercall
-{
-    unsigned long op;
-    unsigned long arg[5];
-} privcmd_hypercall_t;
-
-typedef struct privcmd_mmap_entry {
-    unsigned long va;
-    unsigned long mfn;
-    unsigned long npages;
-} privcmd_mmap_entry_t; 
-
-typedef struct privcmd_mmap {
-    int num;
-    domid_t dom; /* target domain */
-    privcmd_mmap_entry_t *entry;
-} privcmd_mmap_t; 
-
-typedef struct privcmd_mmapbatch {
-    int num;     // number of pages to populate
-    domid_t dom; // target domain 
-    unsigned long addr;  // virtual address
-    unsigned long *arr; // array of mfns - top nibble set on err
-} privcmd_mmapbatch_t; 
-
-typedef struct privcmd_blkmsg
-{
-    unsigned long op;
-    void         *buf;
-    int           buf_size;
-} privcmd_blkmsg_t;
-
-/*
- * @cmd: IOCTL_PRIVCMD_HYPERCALL
- * @arg: &privcmd_hypercall_t
- * Return: Value returned from execution of the specified hypercall.
- */
-#define IOCTL_PRIVCMD_HYPERCALL         \
-    _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-
-/*
- * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
- * @arg: n/a
- * Return: Port associated with domain-controller end of control event channel
- *         for the initial domain.
- */
-#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
-    _IOC(_IOC_NONE, 'P', 1, 0)
-#define IOCTL_PRIVCMD_MMAP             \
-    _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-#define IOCTL_PRIVCMD_MMAPBATCH             \
-    _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
-#define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN \
-    _IOC(_IOC_READ, 'P', 4, sizeof(unsigned long))
-
-#endif /* __PROC_CMD_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/queues.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/queues.h
deleted file mode 100644 (file)
index eb17e33..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-
-/*
- * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
- * queues. Unfortunately the semantics is not the same. With task queues we 
- * can defer work until a particular event occurs -- this is not
- * straightforwardly done with work queues (queued work is performed asap, or
- * after some fixed timeout). Conversely, work queues are a (slightly) neater
- * way of deferring work to a process context than using task queues in 2.4.
- * 
- * This is a bit of a needless reimplementation -- should have just pulled
- * the code from 2.4, but I tried leveraging work queues to simplify things.
- * They didn't help. :-(
- */
-
-#ifndef __QUEUES_H__
-#define __QUEUES_H__
-
-#include <linux/version.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-
-struct tq_struct { 
-    void (*fn)(void *);
-    void *arg;
-    struct list_head list;
-    unsigned long pending;
-};
-#define INIT_TQUEUE(_name, _fn, _arg)               \
-    do {                                            \
-        INIT_LIST_HEAD(&(_name)->list);             \
-        (_name)->pending = 0;                       \
-        (_name)->fn = (_fn); (_name)->arg = (_arg); \
-    } while ( 0 )
-#define DECLARE_TQUEUE(_name, _fn, _arg)            \
-    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
-
-typedef struct {
-    struct list_head list;
-    spinlock_t       lock;
-} task_queue;
-#define DECLARE_TASK_QUEUE(_name) \
-    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
-
-static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
-{
-    unsigned long flags;
-    if ( test_and_set_bit(0, &tqe->pending) )
-        return 0;
-    spin_lock_irqsave(&tql->lock, flags);
-    list_add_tail(&tqe->list, &tql->list);
-    spin_unlock_irqrestore(&tql->lock, flags);
-    return 1;
-}
-
-static inline void run_task_queue(task_queue *tql)
-{
-    struct list_head head, *ent;
-    struct tq_struct *tqe;
-    unsigned long flags;
-    void (*fn)(void *);
-    void *arg;
-
-    spin_lock_irqsave(&tql->lock, flags);
-    list_add(&head, &tql->list);
-    list_del_init(&tql->list);
-    spin_unlock_irqrestore(&tql->lock, flags);
-
-    while ( !list_empty(&head) )
-    {
-        ent = head.next;
-        list_del_init(ent);
-        tqe = list_entry(ent, struct tq_struct, list);
-        fn  = tqe->fn;
-        arg = tqe->arg;
-        wmb();
-        tqe->pending = 0;
-        fn(arg);
-    }
-}
-
-#endif /* __QUEUES_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/suspend.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/suspend.h
deleted file mode 100644 (file)
index f05e1b2..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/******************************************************************************
- * suspend.h
- * 
- * Copyright (c) 2003-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_XEN_SUSPEND_H__
-#define __ASM_XEN_SUSPEND_H__
-
-typedef struct suspend_record_st {
-    /* To be filled in before resume. */
-    start_info_t resume_info;
-    /*
-     * The number of a machine frame containing, in sequence, the number of
-     * each machine frame that contains PFN -> MFN translation table data.
-     */
-    unsigned long pfn_to_mfn_frame_list;
-    /* Number of entries in the PFN -> MFN translation table. */
-    unsigned long nr_pfns;
-} suspend_record_t;
-
-#endif /* __ASM_XEN_SUSPEND_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/asm-xen/xen_proc.h b/linux-2.6.8.1-xen-sparse/include/asm-xen/xen_proc.h
deleted file mode 100644 (file)
index d62791e..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#ifndef __ASM_XEN_PROC_H__
-#define __ASM_XEN_PROC_H__
-
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-
-extern struct proc_dir_entry *create_xen_proc_entry(
-    const char *name, mode_t mode);
-extern void remove_xen_proc_entry(
-    const char *name);
-
-#endif /* __ASM_XEN_PROC_H__ */
diff --git a/linux-2.6.8.1-xen-sparse/include/linux/bio.h b/linux-2.6.8.1-xen-sparse/include/linux/bio.h
deleted file mode 100644 (file)
index 069345e..0000000
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * 2.5 block I/O model
- *
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
- */
-#ifndef __LINUX_BIO_H
-#define __LINUX_BIO_H
-
-#include <linux/highmem.h>
-#include <linux/mempool.h>
-
-/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
-#include <asm/io.h>
-
-#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
-#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
-#define BIOVEC_VIRT_OVERSIZE(x)        ((x) > BIO_VMERGE_MAX_SIZE)
-#else
-#define BIOVEC_VIRT_START_SIZE(x)      0
-#define BIOVEC_VIRT_OVERSIZE(x)                0
-#endif
-
-#ifndef BIO_VMERGE_BOUNDARY
-#define BIO_VMERGE_BOUNDARY    0
-#endif
-
-#define BIO_DEBUG
-
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON     BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
-
-#define BIO_MAX_PAGES          (256)
-#define BIO_MAX_SIZE           (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
-#define BIO_MAX_SECTORS                (BIO_MAX_SIZE >> 9)
-
-/*
- * was unsigned short, but we might as well be ready for > 64kB I/O pages
- */
-struct bio_vec {
-       struct page     *bv_page;
-       unsigned int    bv_len;
-       unsigned int    bv_offset;
-};
-
-struct bio;
-typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
-typedef void (bio_destructor_t) (struct bio *);
-
-/*
- * main unit of I/O for the block layer and lower layers (ie drivers and
- * stacking drivers)
- */
-struct bio {
-       sector_t                bi_sector;
-       struct bio              *bi_next;       /* request queue link */
-       struct block_device     *bi_bdev;
-       unsigned long           bi_flags;       /* status, command, etc */
-       unsigned long           bi_rw;          /* bottom bits READ/WRITE,
-                                                * top bits priority
-                                                */
-
-       unsigned short          bi_vcnt;        /* how many bio_vec's */
-       unsigned short          bi_idx;         /* current index into bvl_vec */
-
-       /* Number of segments in this BIO after
-        * physical address coalescing is performed.
-        */
-       unsigned short          bi_phys_segments;
-
-       /* Number of segments after physical and DMA remapping
-        * hardware coalescing is performed.
-        */
-       unsigned short          bi_hw_segments;
-
-       unsigned int            bi_size;        /* residual I/O count */
-
-       /*
-        * To keep track of the max hw size, we account for the
-        * sizes of the first and last virtually mergeable segments
-        * in this bio
-        */
-       unsigned int            bi_hw_front_size;
-       unsigned int            bi_hw_back_size;
-
-       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
-
-       struct bio_vec          *bi_io_vec;     /* the actual vec list */
-
-       bio_end_io_t            *bi_end_io;
-       atomic_t                bi_cnt;         /* pin count */
-
-       void                    *bi_private;
-
-       bio_destructor_t        *bi_destructor; /* destructor */
-};
-
-/*
- * bio flags
- */
-#define BIO_UPTODATE   0       /* ok after I/O completion */
-#define BIO_RW_BLOCK   1       /* RW_AHEAD set, and read/write would block */
-#define BIO_EOF                2       /* out-out-bounds error */
-#define BIO_SEG_VALID  3       /* nr_hw_seg valid */
-#define BIO_CLONED     4       /* doesn't own data */
-#define BIO_BOUNCED    5       /* bio is a bounce bio */
-#define BIO_USER_MAPPED 6      /* contains user pages */
-#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
-
-/*
- * top 4 bits of bio flags indicate the pool this bio came from
- */
-#define BIO_POOL_BITS          (4)
-#define BIO_POOL_OFFSET                (BITS_PER_LONG - BIO_POOL_BITS)
-#define BIO_POOL_MASK          (1UL << BIO_POOL_OFFSET)
-#define BIO_POOL_IDX(bio)      ((bio)->bi_flags >> BIO_POOL_OFFSET)    
-
-/*
- * bio bi_rw flags
- *
- * bit 0 -- read (not set) or write (set)
- * bit 1 -- rw-ahead when set
- * bit 2 -- barrier
- * bit 3 -- fail fast, don't want low level driver retries
- * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
- */
-#define BIO_RW         0
-#define BIO_RW_AHEAD   1
-#define BIO_RW_BARRIER 2
-#define BIO_RW_FAILFAST        3
-#define BIO_RW_SYNC    4
-
-/*
- * various member access, note that bio_data should of course not be used
- * on highmem page vectors
- */
-#define bio_iovec_idx(bio, idx)        (&((bio)->bi_io_vec[(idx)]))
-#define bio_iovec(bio)         bio_iovec_idx((bio), (bio)->bi_idx)
-#define bio_page(bio)          bio_iovec((bio))->bv_page
-#define bio_offset(bio)                bio_iovec((bio))->bv_offset
-#define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
-#define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_cur_sectors(bio)   (bio_iovec(bio)->bv_len >> 9)
-#define bio_data(bio)          (page_address(bio_page((bio))) + bio_offset((bio)))
-#define bio_barrier(bio)       ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
-#define bio_sync(bio)          ((bio)->bi_rw & (1 << BIO_RW_SYNC))
-
-/*
- * will die
- */
-#define bio_to_phys(bio)       (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
-#define bvec_to_phys(bv)       (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-/*
- * queues that have highmem support enabled may still need to revert to
- * PIO transfers occasionally and thus map high pages temporarily. For
- * permanent PIO fall back, user is probably better off disabling highmem
- * I/O completely on that queue (see ide-dma for example)
- */
-#define __bio_kmap_atomic(bio, idx, kmtype)                            \
-       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) +    \
-               bio_iovec_idx((bio), (idx))->bv_offset)
-
-#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
-
-/*
- * merge helpers etc
- */
-
-#define __BVEC_END(bio)                bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
-#define __BVEC_START(bio)      bio_iovec_idx((bio), (bio)->bi_idx)
-/* Platforms may set this to restrict multi-page buffer merging. */
-#ifndef BIOVEC_PHYS_MERGEABLE
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
-       ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-#endif
-#define BIOVEC_VIRT_MERGEABLE(vec1, vec2)      \
-       ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
-       (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
-       __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
-#define BIO_SEG_BOUNDARY(q, b1, b2) \
-       BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
-
-#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
-
-/*
- * drivers should not use the __ version unless they _really_ want to
- * run through the entire bio and not just pending pieces
- */
-#define __bio_for_each_segment(bvl, bio, i, start_idx)                 \
-       for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
-            i < (bio)->bi_vcnt;                                        \
-            bvl++, i++)
-
-#define bio_for_each_segment(bvl, bio, i)                              \
-       __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
-
-/*
- * get a reference to a bio, so it won't disappear. the intended use is
- * something like:
- *
- * bio_get(bio);
- * submit_bio(rw, bio);
- * if (bio->bi_flags ...)
- *     do_something
- * bio_put(bio);
- *
- * without the bio_get(), it could potentially complete I/O before submit_bio
- * returns. and then bio would be freed memory when if (bio->bi_flags ...)
- * runs
- */
-#define bio_get(bio)   atomic_inc(&(bio)->bi_cnt)
-
-
-/*
- * A bio_pair is used when we need to split a bio.
- * This can only happen for a bio that refers to just one
- * page of data, and in the unusual situation when the
- * page crosses a chunk/device boundary
- *
- * The address of the master bio is stored in bio1.bi_private
- * The address of the pool the pair was allocated from is stored
- *   in bio2.bi_private
- */
-struct bio_pair {
-       struct bio      bio1, bio2;
-       struct bio_vec  bv1, bv2;
-       atomic_t        cnt;
-       int             error;
-};
-extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
-                                 int first_sectors);
-extern mempool_t *bio_split_pool;
-extern void bio_pair_release(struct bio_pair *dbio);
-
-extern struct bio *bio_alloc(int, int);
-extern void bio_put(struct bio *);
-
-extern void bio_endio(struct bio *, unsigned int, int);
-struct request_queue;
-extern int bio_phys_segments(struct request_queue *, struct bio *);
-extern int bio_hw_segments(struct request_queue *, struct bio *);
-
-extern void __bio_clone(struct bio *, struct bio *);
-extern struct bio *bio_clone(struct bio *, int);
-
-extern void bio_init(struct bio *);
-
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
-extern int bio_get_nr_vecs(struct block_device *);
-extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
-                               unsigned long, unsigned int, int);
-extern void bio_unmap_user(struct bio *);
-extern void bio_set_pages_dirty(struct bio *bio);
-extern void bio_check_pages_dirty(struct bio *bio);
-extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
-extern int bio_uncopy_user(struct bio *);
-
-#ifdef CONFIG_HIGHMEM
-/*
- * remember to add offset! and never ever reenable interrupts between a
- * bvec_kmap_irq and bvec_kunmap_irq!!
- *
- * This function MUST be inlined - it plays with the CPU interrupt flags.
- * Hence the `extern inline'.
- */
-extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
-       unsigned long addr;
-
-       /*
-        * might not be a highmem page, but the preempt/irq count
-        * balancing is a lot nicer this way
-        */
-       local_irq_save(*flags);
-       addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
-
-       BUG_ON(addr & ~PAGE_MASK);
-
-       return (char *) addr + bvec->bv_offset;
-}
-
-extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
-       unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
-       kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
-       local_irq_restore(*flags);
-}
-
-#else
-#define bvec_kmap_irq(bvec, flags)     (page_address((bvec)->bv_page) + (bvec)->bv_offset)
-#define bvec_kunmap_irq(buf, flags)    do { *(flags) = 0; } while (0)
-#endif
-
-extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
-                                  unsigned long *flags)
-{
-       return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
-}
-#define __bio_kunmap_irq(buf, flags)   bvec_kunmap_irq(buf, flags)
-
-#define bio_kmap_irq(bio, flags) \
-       __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
-#define bio_kunmap_irq(buf,flags)      __bio_kunmap_irq(buf, flags)
-
-#endif /* __LINUX_BIO_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/linux/page-flags.h b/linux-2.6.8.1-xen-sparse/include/linux/page-flags.h
deleted file mode 100644 (file)
index fdf0d3f..0000000
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Macros for manipulating and testing page->flags
- */
-
-#ifndef PAGE_FLAGS_H
-#define PAGE_FLAGS_H
-
-#include <linux/percpu.h>
-#include <linux/cache.h>
-#include <asm/pgtable.h>
-
-/*
- * Various page->flags bits:
- *
- * PG_reserved is set for special pages, which can never be swapped out. Some
- * of them might not even exist (eg empty_bad_page)...
- *
- * The PG_private bitflag is set if page->private contains a valid value.
- *
- * During disk I/O, PG_locked is used. This bit is set before I/O and
- * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
- * waiting for the I/O on this page to complete.
- *
- * PG_uptodate tells whether the page's contents is valid.  When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
- *
- * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
- * which is set any time the system accesses that page through the (mapping,
- * index) hash table.  This referenced bit, together with the referenced bit
- * in the page tables, is used to manipulate page->age and move the page across
- * the active, inactive_dirty and inactive_clean lists.
- *
- * Note that the referenced bit, the page->lru list_head and the active,
- * inactive_dirty and inactive_clean lists are protected by the
- * zone->lru_lock, and *NOT* by the usual PG_locked bit!
- *
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
- * PG_arch_1 is an architecture specific page state bit.  The generic code
- * guarantees that this bit is cleared for a page when it first is entered into
- * the page cache.
- *
- * PG_highmem pages are not permanently mapped into the kernel virtual address
- * space, they need to be kmapped separately for doing IO on the pages.  The
- * struct page (these bits with information) are always mapped into kernel
- * address space...
- */
-
-/*
- * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
- * locked- and dirty-page accounting.  The top eight bits of page->flags are
- * used for page->zone, so putting flag bits there doesn't work.
- */
-#define PG_locked               0      /* Page is locked. Don't touch. */
-#define PG_error                1
-#define PG_referenced           2
-#define PG_uptodate             3
-
-#define PG_dirty                4
-#define PG_lru                  5
-#define PG_active               6
-#define PG_slab                         7      /* slab debug (Suparna wants this) */
-
-#define PG_highmem              8
-#define PG_checked              9      /* kill me in 2.5.<early>. */
-#define PG_arch_1              10
-#define PG_reserved            11
-
-#define PG_private             12      /* Has something at ->private */
-#define PG_writeback           13      /* Page is under writeback */
-#define PG_nosave              14      /* Used for system suspend/resume */
-#define PG_maplock             15      /* Lock bit for rmap to ptes */
-
-#define PG_swapcache           16      /* Swap page: swp_entry_t in private */
-#define PG_mappedtodisk                17      /* Has blocks allocated on-disk */
-#define PG_reclaim             18      /* To be reclaimed asap */
-#define PG_compound            19      /* Part of a compound page */
-
-#define PG_anon                        20      /* Anonymous: anon_vma in mapping */
-#define PG_foreign             21      /* Page belongs to foreign allocator */
-
-
-/*
- * Global page accounting.  One instance per CPU.  Only unsigned longs are
- * allowed.
- */
-struct page_state {
-       unsigned long nr_dirty;         /* Dirty writeable pages */
-       unsigned long nr_writeback;     /* Pages under writeback */
-       unsigned long nr_unstable;      /* NFS unstable pages */
-       unsigned long nr_page_table_pages;/* Pages used for pagetables */
-       unsigned long nr_mapped;        /* mapped into pagetables */
-       unsigned long nr_slab;          /* In slab */
-#define GET_PAGE_STATE_LAST nr_slab
-
-       /*
-        * The below are zeroed by get_page_state().  Use get_full_page_state()
-        * to add up all these.
-        */
-       unsigned long pgpgin;           /* Disk reads */
-       unsigned long pgpgout;          /* Disk writes */
-       unsigned long pswpin;           /* swap reads */
-       unsigned long pswpout;          /* swap writes */
-       unsigned long pgalloc_high;     /* page allocations */
-
-       unsigned long pgalloc_normal;
-       unsigned long pgalloc_dma;
-       unsigned long pgfree;           /* page freeings */
-       unsigned long pgactivate;       /* pages moved inactive->active */
-       unsigned long pgdeactivate;     /* pages moved active->inactive */
-
-       unsigned long pgfault;          /* faults (major+minor) */
-       unsigned long pgmajfault;       /* faults (major only) */
-       unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
-       unsigned long pgrefill_normal;
-       unsigned long pgrefill_dma;
-
-       unsigned long pgsteal_high;     /* total highmem pages reclaimed */
-       unsigned long pgsteal_normal;
-       unsigned long pgsteal_dma;
-       unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
-       unsigned long pgscan_kswapd_normal;
-
-       unsigned long pgscan_kswapd_dma;
-       unsigned long pgscan_direct_high;/* total highmem pages scanned */
-       unsigned long pgscan_direct_normal;
-       unsigned long pgscan_direct_dma;
-       unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
-
-       unsigned long slabs_scanned;    /* slab objects scanned */
-       unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
-       unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
-       unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
-       unsigned long allocstall;       /* direct reclaim calls */
-
-       unsigned long pgrotated;        /* pages rotated to tail of the LRU */
-};
-
-DECLARE_PER_CPU(struct page_state, page_states);
-
-extern void get_page_state(struct page_state *ret);
-extern void get_full_page_state(struct page_state *ret);
-extern unsigned long __read_page_state(unsigned offset);
-
-#define read_page_state(member) \
-       __read_page_state(offsetof(struct page_state, member))
-
-#define mod_page_state(member, delta)                                  \
-       do {                                                            \
-               unsigned long flags;                                    \
-               local_irq_save(flags);                                  \
-               __get_cpu_var(page_states).member += (delta);           \
-               local_irq_restore(flags);                               \
-       } while (0)
-
-
-#define inc_page_state(member) mod_page_state(member, 1UL)
-#define dec_page_state(member) mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta) mod_page_state(member, (delta))
-#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
-
-#define mod_page_state_zone(zone, member, delta)                       \
-       do {                                                            \
-               unsigned long flags;                                    \
-               local_irq_save(flags);                                  \
-               if (is_highmem(zone))                                   \
-                       __get_cpu_var(page_states).member##_high += (delta);\
-               else if (is_normal(zone))                               \
-                       __get_cpu_var(page_states).member##_normal += (delta);\
-               else                                                    \
-                       __get_cpu_var(page_states).member##_dma += (delta);\
-               local_irq_restore(flags);                               \
-       } while (0)
-
-/*
- * Manipulation of page state flags
- */
-#define PageLocked(page)               \
-               test_bit(PG_locked, &(page)->flags)
-#define SetPageLocked(page)            \
-               set_bit(PG_locked, &(page)->flags)
-#define TestSetPageLocked(page)                \
-               test_and_set_bit(PG_locked, &(page)->flags)
-#define ClearPageLocked(page)          \
-               clear_bit(PG_locked, &(page)->flags)
-#define TestClearPageLocked(page)      \
-               test_and_clear_bit(PG_locked, &(page)->flags)
-
-#define PageError(page)                test_bit(PG_error, &(page)->flags)
-#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
-
-#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page)        set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page)      clear_bit(PG_referenced, &(page)->flags)
-#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
-
-#define PageUptodate(page)     test_bit(PG_uptodate, &(page)->flags)
-#ifndef SetPageUptodate
-#define SetPageUptodate(page)  set_bit(PG_uptodate, &(page)->flags)
-#endif
-#define ClearPageUptodate(page)        clear_bit(PG_uptodate, &(page)->flags)
-
-#define PageDirty(page)                test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
-#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
-#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-
-#define SetPageLRU(page)       set_bit(PG_lru, &(page)->flags)
-#define PageLRU(page)          test_bit(PG_lru, &(page)->flags)
-#define TestSetPageLRU(page)   test_and_set_bit(PG_lru, &(page)->flags)
-#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
-
-#define PageActive(page)       test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page)    set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page)  clear_bit(PG_active, &(page)->flags)
-#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
-#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
-
-#define PageSlab(page)         test_bit(PG_slab, &(page)->flags)
-#define SetPageSlab(page)      set_bit(PG_slab, &(page)->flags)
-#define ClearPageSlab(page)    clear_bit(PG_slab, &(page)->flags)
-#define TestClearPageSlab(page)        test_and_clear_bit(PG_slab, &(page)->flags)
-#define TestSetPageSlab(page)  test_and_set_bit(PG_slab, &(page)->flags)
-
-#ifdef CONFIG_HIGHMEM
-#define PageHighMem(page)      test_bit(PG_highmem, &(page)->flags)
-#else
-#define PageHighMem(page)      0 /* needed to optimize away at compile time */
-#endif
-
-#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-
-#define PageReserved(page)     test_bit(PG_reserved, &(page)->flags)
-#define SetPageReserved(page)  set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page)        clear_bit(PG_reserved, &(page)->flags)
-
-#define SetPagePrivate(page)   set_bit(PG_private, &(page)->flags)
-#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-#define PagePrivate(page)      test_bit(PG_private, &(page)->flags)
-
-#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
-#define SetPageWriteback(page)                                         \
-       do {                                                            \
-               if (!test_and_set_bit(PG_writeback,                     \
-                               &(page)->flags))                        \
-                       inc_page_state(nr_writeback);                   \
-       } while (0)
-#define TestSetPageWriteback(page)                                     \
-       ({                                                              \
-               int ret;                                                \
-               ret = test_and_set_bit(PG_writeback,                    \
-                                       &(page)->flags);                \
-               if (!ret)                                               \
-                       inc_page_state(nr_writeback);                   \
-               ret;                                                    \
-       })
-#define ClearPageWriteback(page)                                       \
-       do {                                                            \
-               if (test_and_clear_bit(PG_writeback,                    \
-                               &(page)->flags))                        \
-                       dec_page_state(nr_writeback);                   \
-       } while (0)
-#define TestClearPageWriteback(page)                                   \
-       ({                                                              \
-               int ret;                                                \
-               ret = test_and_clear_bit(PG_writeback,                  \
-                               &(page)->flags);                        \
-               if (ret)                                                \
-                       dec_page_state(nr_writeback);                   \
-               ret;                                                    \
-       })
-
-#define PageNosave(page)       test_bit(PG_nosave, &(page)->flags)
-#define SetPageNosave(page)    set_bit(PG_nosave, &(page)->flags)
-#define TestSetPageNosave(page)        test_and_set_bit(PG_nosave, &(page)->flags)
-#define ClearPageNosave(page)          clear_bit(PG_nosave, &(page)->flags)
-#define TestClearPageNosave(page)      test_and_clear_bit(PG_nosave, &(page)->flags)
-
-#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
-#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
-#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
-
-#define PageReclaim(page)      test_bit(PG_reclaim, &(page)->flags)
-#define SetPageReclaim(page)   set_bit(PG_reclaim, &(page)->flags)
-#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
-#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
-
-#define PageCompound(page)     test_bit(PG_compound, &(page)->flags)
-#define SetPageCompound(page)  set_bit(PG_compound, &(page)->flags)
-#define ClearPageCompound(page)        clear_bit(PG_compound, &(page)->flags)
-
-#define PageAnon(page)         test_bit(PG_anon, &(page)->flags)
-#define SetPageAnon(page)      set_bit(PG_anon, &(page)->flags)
-#define ClearPageAnon(page)    clear_bit(PG_anon, &(page)->flags)
-
-/* A foreign page uses a custom destructor rather than the buddy allocator. */
-#ifdef CONFIG_FOREIGN_PAGES
-#define PageForeign(page)      test_bit(PG_foreign, &(page)->flags)
-#define SetPageForeign(page, dtor) do {                \
-       set_bit(PG_foreign, &(page)->flags);    \
-       (page)->mapping = (void *)dtor;         \
-} while (0)
-#define ClearPageForeign(page) do {            \
-       clear_bit(PG_foreign, &(page)->flags);  \
-       (page)->mapping = NULL;                 \
-} while (0)
-#define PageForeignDestructor(page)    \
-       ( (void (*) (struct page *)) (page)->mapping )
-#else
-#define PageForeign(page)      0
-#define PageForeignDestructor(page)    void
-#endif
-
-#ifdef CONFIG_SWAP
-#define PageSwapCache(page)    test_bit(PG_swapcache, &(page)->flags)
-#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
-#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
-#else
-#define PageSwapCache(page)    0
-#endif
-
-struct page;   /* forward declaration */
-
-int test_clear_page_dirty(struct page *page);
-int __clear_page_dirty(struct page *page);
-int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
-
-static inline void clear_page_dirty(struct page *page)
-{
-       test_clear_page_dirty(page);
-}
-
-static inline void set_page_writeback(struct page *page)
-{
-       test_set_page_writeback(page);
-}
-
-#endif /* PAGE_FLAGS_H */
diff --git a/linux-2.6.8.1-xen-sparse/include/linux/skbuff.h b/linux-2.6.8.1-xen-sparse/include/linux/skbuff.h
deleted file mode 100644 (file)
index 3319d01..0000000
+++ /dev/null
@@ -1,1170 +0,0 @@
-/*
- *     Definitions for the 'struct sk_buff' memory handlers.
- *
- *     Authors:
- *             Alan Cox, <gw4pts@gw4pts.ampr.org>
- *             Florian La Roche, <rzsfl@rz.uni-sb.de>
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_SKBUFF_H
-#define _LINUX_SKBUFF_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/compiler.h>
-#include <linux/time.h>
-#include <linux/cache.h>
-
-#include <asm/atomic.h>
-#include <asm/types.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/poll.h>
-#include <linux/net.h>
-#include <net/checksum.h>
-
-#define HAVE_ALLOC_SKB         /* For the drivers to know */
-#define HAVE_ALIGNABLE_SKB     /* Ditto 8)                */
-#define SLAB_SKB               /* Slabified skbuffs       */
-
-#define CHECKSUM_NONE 0
-#define CHECKSUM_HW 1
-#define CHECKSUM_UNNECESSARY 2
-
-#define SKB_DATA_ALIGN(X)      (((X) + (SMP_CACHE_BYTES - 1)) & \
-                                ~(SMP_CACHE_BYTES - 1))
-#define SKB_MAX_ORDER(X, ORDER)        (((PAGE_SIZE << (ORDER)) - (X) - \
-                                 sizeof(struct skb_shared_info)) & \
-                                 ~(SMP_CACHE_BYTES - 1))
-#define SKB_MAX_HEAD(X)                (SKB_MAX_ORDER((X), 0))
-#define SKB_MAX_ALLOC          (SKB_MAX_ORDER(0, 2))
-
-/* A. Checksumming of received packets by device.
- *
- *     NONE: device failed to checksum this packet.
- *             skb->csum is undefined.
- *
- *     UNNECESSARY: device parsed packet and wouldbe verified checksum.
- *             skb->csum is undefined.
- *           It is bad option, but, unfortunately, many of vendors do this.
- *           Apparently with secret goal to sell you new device, when you
- *           will add new protocol to your host. F.e. IPv6. 8)
- *
- *     HW: the most generic way. Device supplied checksum of _all_
- *         the packet as seen by netif_rx in skb->csum.
- *         NOTE: Even if device supports only some protocols, but
- *         is able to produce some skb->csum, it MUST use HW,
- *         not UNNECESSARY.
- *
- * B. Checksumming on output.
- *
- *     NONE: skb is checksummed by protocol or csum is not required.
- *
- *     HW: device is required to csum packet as seen by hard_start_xmit
- *     from skb->h.raw to the end and to record the checksum
- *     at skb->h.raw+skb->csum.
- *
- *     Device must show its capabilities in dev->features, set
- *     at device setup time.
- *     NETIF_F_HW_CSUM - it is clever device, it is able to checksum
- *                       everything.
- *     NETIF_F_NO_CSUM - loopback or reliable single hop media.
- *     NETIF_F_IP_CSUM - device is dumb. It is able to csum only
- *                       TCP/UDP over IPv4. Sigh. Vendors like this
- *                       way by an unknown reason. Though, see comment above
- *                       about CHECKSUM_UNNECESSARY. 8)
- *
- *     Any questions? No questions, good.              --ANK
- */
-
-#ifdef __i386__
-#define NET_CALLER(arg) (*(((void **)&arg) - 1))
-#else
-#define NET_CALLER(arg) __builtin_return_address(0)
-#endif
-
-#ifdef CONFIG_NETFILTER
-struct nf_conntrack {
-       atomic_t use;
-       void (*destroy)(struct nf_conntrack *);
-};
-
-struct nf_ct_info {
-       struct nf_conntrack *master;
-};
-
-#ifdef CONFIG_BRIDGE_NETFILTER
-struct nf_bridge_info {
-       atomic_t use;
-       struct net_device *physindev;
-       struct net_device *physoutdev;
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       struct net_device *netoutdev;
-#endif
-       unsigned int mask;
-       unsigned long data[32 / sizeof(unsigned long)];
-};
-#endif
-
-#endif
-
-struct sk_buff_head {
-       /* These two members must be first. */
-       struct sk_buff  *next;
-       struct sk_buff  *prev;
-
-       __u32           qlen;
-       spinlock_t      lock;
-};
-
-struct sk_buff;
-
-/* To allow 64K frame to be packed as single skb without frag_list */
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
-
-typedef struct skb_frag_struct skb_frag_t;
-
-struct skb_frag_struct {
-       struct page *page;
-       __u16 page_offset;
-       __u16 size;
-};
-
-/* This data is invariant across clones and lives at
- * the end of the header data, ie. at skb->end.
- */
-struct skb_shared_info {
-       atomic_t        dataref;
-       unsigned int    nr_frags;
-       unsigned short  tso_size;
-       unsigned short  tso_segs;
-       struct sk_buff  *frag_list;
-       skb_frag_t      frags[MAX_SKB_FRAGS];
-};
-
-/** 
- *     struct sk_buff - socket buffer
- *     @next: Next buffer in list
- *     @prev: Previous buffer in list
- *     @list: List we are on
- *     @sk: Socket we are owned by
- *     @stamp: Time we arrived
- *     @dev: Device we arrived on/are leaving by
- *     @input_dev: Device we arrived on
- *      @real_dev: The real device we are using
- *     @h: Transport layer header
- *     @nh: Network layer header
- *     @mac: Link layer header
- *     @dst: FIXME: Describe this field
- *     @cb: Control buffer. Free for use by every layer. Put private vars here
- *     @len: Length of actual data
- *     @data_len: Data length
- *     @mac_len: Length of link layer header
- *     @csum: Checksum
- *     @__unused: Dead field, may be reused
- *     @cloned: Head may be cloned (check refcnt to be sure)
- *     @pkt_type: Packet class
- *     @ip_summed: Driver fed us an IP checksum
- *     @priority: Packet queueing priority
- *     @users: User count - see {datagram,tcp}.c
- *     @protocol: Packet protocol from driver
- *     @security: Security level of packet
- *     @truesize: Buffer size 
- *     @head: Head of buffer
- *     @data: Data head pointer
- *     @tail: Tail pointer
- *     @end: End pointer
- *     @destructor: Destruct function
- *     @nfmark: Can be used for communication between hooks
- *     @nfcache: Cache info
- *     @nfct: Associated connection, if any
- *     @nf_debug: Netfilter debugging
- *     @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
- *      @private: Data which is private to the HIPPI implementation
- *     @tc_index: Traffic control index
- */
-
-struct sk_buff {
-       /* These two members must be first. */
-       struct sk_buff          *next;
-       struct sk_buff          *prev;
-
-       struct sk_buff_head     *list;
-       struct sock             *sk;
-       struct timeval          stamp;
-       struct net_device       *dev;
-       struct net_device       *input_dev;
-       struct net_device       *real_dev;
-
-       union {
-               struct tcphdr   *th;
-               struct udphdr   *uh;
-               struct icmphdr  *icmph;
-               struct igmphdr  *igmph;
-               struct iphdr    *ipiph;
-               struct ipv6hdr  *ipv6h;
-               unsigned char   *raw;
-       } h;
-
-       union {
-               struct iphdr    *iph;
-               struct ipv6hdr  *ipv6h;
-               struct arphdr   *arph;
-               unsigned char   *raw;
-       } nh;
-
-       union {
-               struct ethhdr   *ethernet;
-               unsigned char   *raw;
-       } mac;
-
-       struct  dst_entry       *dst;
-       struct  sec_path        *sp;
-
-       /*
-        * This is the control buffer. It is free to use for every
-        * layer. Please put your private variables there. If you
-        * want to keep them across layers you have to do a skb_clone()
-        * first. This is owned by whoever has the skb queued ATM.
-        */
-       char                    cb[40];
-
-       unsigned int            len,
-                               data_len,
-                               mac_len,
-                               csum;
-       unsigned char           local_df,
-                               cloned,
-                               pkt_type,
-                               ip_summed;
-       __u32                   priority;
-       unsigned short          protocol,
-                               security;
-
-       void                    (*destructor)(struct sk_buff *skb);
-#ifdef CONFIG_NETFILTER
-        unsigned long          nfmark;
-       __u32                   nfcache;
-       struct nf_ct_info       *nfct;
-#ifdef CONFIG_NETFILTER_DEBUG
-        unsigned int           nf_debug;
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       struct nf_bridge_info   *nf_bridge;
-#endif
-#endif /* CONFIG_NETFILTER */
-#if defined(CONFIG_HIPPI)
-       union {
-               __u32           ifield;
-       } private;
-#endif
-#ifdef CONFIG_NET_SCHED
-       __u32                   tc_index;        /* traffic control index */
-#ifdef CONFIG_NET_CLS_ACT
-       __u32           tc_verd;               /* traffic control verdict */
-       __u32           tc_classid;            /* traffic control classid */
- #endif
-
-#endif
-
-
-       /* These elements must be at the end, see alloc_skb() for details.  */
-       unsigned int            truesize;
-       atomic_t                users;
-       unsigned char           *head,
-                               *data,
-                               *tail,
-                               *end;
-};
-
-#ifdef __KERNEL__
-/*
- *     Handling routines are only of interest to the kernel
- */
-#include <linux/slab.h>
-
-#include <asm/system.h>
-
-extern void           __kfree_skb(struct sk_buff *skb);
-extern struct sk_buff *alloc_skb(unsigned int size, int priority);
-extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
-                                           unsigned int size, int priority);
-extern void           kfree_skbmem(struct sk_buff *skb);
-extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
-extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
-extern int            pskb_expand_head(struct sk_buff *skb,
-                                       int nhead, int ntail, int gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
-                                           unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-                                      int newheadroom, int newtailroom,
-                                      int priority);
-extern struct sk_buff *                skb_pad(struct sk_buff *skb, int pad);
-#define dev_kfree_skb(a)       kfree_skb(a)
-extern void          skb_over_panic(struct sk_buff *skb, int len,
-                                    void *here);
-extern void          skb_under_panic(struct sk_buff *skb, int len,
-                                     void *here);
-
-/* Internal */
-#define skb_shinfo(SKB)                ((struct skb_shared_info *)((SKB)->end))
-
-/**
- *     skb_queue_empty - check if a queue is empty
- *     @list: queue head
- *
- *     Returns true if the queue is empty, false otherwise.
- */
-static inline int skb_queue_empty(const struct sk_buff_head *list)
-{
-       return list->next == (struct sk_buff *)list;
-}
-
-/**
- *     skb_get - reference buffer
- *     @skb: buffer to reference
- *
- *     Makes another reference to a socket buffer and returns a pointer
- *     to the buffer.
- */
-static inline struct sk_buff *skb_get(struct sk_buff *skb)
-{
-       atomic_inc(&skb->users);
-       return skb;
-}
-
-/*
- * If users == 1, we are the only owner and are can avoid redundant
- * atomic change.
- */
-
-/**
- *     kfree_skb - free an sk_buff
- *     @skb: buffer to free
- *
- *     Drop a reference to the buffer and free it if the usage count has
- *     hit zero.
- */
-static inline void kfree_skb(struct sk_buff *skb)
-{
-       if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
-               __kfree_skb(skb);
-}
-
-/* Use this if you didn't touch the skb state [for fast switching] */
-static inline void kfree_skb_fast(struct sk_buff *skb)
-{
-       if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
-               kfree_skbmem(skb);
-}
-
-/**
- *     skb_cloned - is the buffer a clone
- *     @skb: buffer to check
- *
- *     Returns true if the buffer was generated with skb_clone() and is
- *     one of multiple shared copies of the buffer. Cloned buffers are
- *     shared data so must not be written to under normal circumstances.
- */
-static inline int skb_cloned(const struct sk_buff *skb)
-{
-       return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
-}
-
-/**
- *     skb_shared - is the buffer shared
- *     @skb: buffer to check
- *
- *     Returns true if more than one person has a reference to this
- *     buffer.
- */
-static inline int skb_shared(const struct sk_buff *skb)
-{
-       return atomic_read(&skb->users) != 1;
-}
-
-/**
- *     skb_share_check - check if buffer is shared and if so clone it
- *     @skb: buffer to check
- *     @pri: priority for memory allocation
- *
- *     If the buffer is shared the buffer is cloned and the old copy
- *     drops a reference. A new clone with a single reference is returned.
- *     If the buffer is not shared the original buffer is returned. When
- *     being called from interrupt status or with spinlocks held pri must
- *     be GFP_ATOMIC.
- *
- *     NULL is returned on a memory allocation failure.
- */
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
-{
-       might_sleep_if(pri & __GFP_WAIT);
-       if (skb_shared(skb)) {
-               struct sk_buff *nskb = skb_clone(skb, pri);
-               kfree_skb(skb);
-               skb = nskb;
-       }
-       return skb;
-}
-
-/*
- *     Copy shared buffers into a new sk_buff. We effectively do COW on
- *     packets to handle cases where we have a local reader and forward
- *     and a couple of other messy ones. The normal one is tcpdumping
- *     a packet thats being forwarded.
- */
-
-/**
- *     skb_unshare - make a copy of a shared buffer
- *     @skb: buffer to check
- *     @pri: priority for memory allocation
- *
- *     If the socket buffer is a clone then this function creates a new
- *     copy of the data, drops a reference count on the old copy and returns
- *     the new copy with the reference count at 1. If the buffer is not a clone
- *     the original buffer is returned. When called with a spinlock held or
- *     from interrupt state @pri must be %GFP_ATOMIC
- *
- *     %NULL is returned on a memory allocation failure.
- */
-static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
-{
-       might_sleep_if(pri & __GFP_WAIT);
-       if (skb_cloned(skb)) {
-               struct sk_buff *nskb = skb_copy(skb, pri);
-               kfree_skb(skb); /* Free our shared copy */
-               skb = nskb;
-       }
-       return skb;
-}
-
-/**
- *     skb_peek
- *     @list_: list to peek at
- *
- *     Peek an &sk_buff. Unlike most other operations you _MUST_
- *     be careful with this one. A peek leaves the buffer on the
- *     list and someone else may run off with it. You must hold
- *     the appropriate locks or have a private queue to do this.
- *
- *     Returns %NULL for an empty list or a pointer to the head element.
- *     The reference count is not incremented and the reference is therefore
- *     volatile. Use with caution.
- */
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
-{
-       struct sk_buff *list = ((struct sk_buff *)list_)->next;
-       if (list == (struct sk_buff *)list_)
-               list = NULL;
-       return list;
-}
-
-/**
- *     skb_peek_tail
- *     @list_: list to peek at
- *
- *     Peek an &sk_buff. Unlike most other operations you _MUST_
- *     be careful with this one. A peek leaves the buffer on the
- *     list and someone else may run off with it. You must hold
- *     the appropriate locks or have a private queue to do this.
- *
- *     Returns %NULL for an empty list or a pointer to the tail element.
- *     The reference count is not incremented and the reference is therefore
- *     volatile. Use with caution.
- */
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
-{
-       struct sk_buff *list = ((struct sk_buff *)list_)->prev;
-       if (list == (struct sk_buff *)list_)
-               list = NULL;
-       return list;
-}
-
-/**
- *     skb_queue_len   - get queue length
- *     @list_: list to measure
- *
- *     Return the length of an &sk_buff queue.
- */
-static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
-{
-       return list_->qlen;
-}
-
-static inline void skb_queue_head_init(struct sk_buff_head *list)
-{
-       spin_lock_init(&list->lock);
-       list->prev = list->next = (struct sk_buff *)list;
-       list->qlen = 0;
-}
-
-/*
- *     Insert an sk_buff at the start of a list.
- *
- *     The "__skb_xxxx()" functions are the non-atomic ones that
- *     can only be called with interrupts disabled.
- */
-
-/**
- *     __skb_queue_head - queue a buffer at the list head
- *     @list: list to use
- *     @newsk: buffer to queue
- *
- *     Queue a buffer at the start of a list. This function takes no locks
- *     and you must therefore hold required locks before calling it.
- *
- *     A buffer cannot be placed on two lists at the same time.
- */
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
-static inline void __skb_queue_head(struct sk_buff_head *list,
-                                   struct sk_buff *newsk)
-{
-       struct sk_buff *prev, *next;
-
-       newsk->list = list;
-       list->qlen++;
-       prev = (struct sk_buff *)list;
-       next = prev->next;
-       newsk->next = next;
-       newsk->prev = prev;
-       next->prev  = prev->next = newsk;
-}
-
-/**
- *     __skb_queue_tail - queue a buffer at the list tail
- *     @list: list to use
- *     @newsk: buffer to queue
- *
- *     Queue a buffer at the end of a list. This function takes no locks
- *     and you must therefore hold required locks before calling it.
- *
- *     A buffer cannot be placed on two lists at the same time.
- */
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
-static inline void __skb_queue_tail(struct sk_buff_head *list,
-                                  struct sk_buff *newsk)
-{
-       struct sk_buff *prev, *next;
-
-       newsk->list = list;
-       list->qlen++;
-       next = (struct sk_buff *)list;
-       prev = next->prev;
-       newsk->next = next;
-       newsk->prev = prev;
-       next->prev  = prev->next = newsk;
-}
-
-
-/**
- *     __skb_dequeue - remove from the head of the queue
- *     @list: list to dequeue from
- *
- *     Remove the head of the list. This function does not take any locks
- *     so must be used with appropriate locks held only. The head item is
- *     returned or %NULL if the list is empty.
- */
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
-static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
-{
-       struct sk_buff *next, *prev, *result;
-
-       prev = (struct sk_buff *) list;
-       next = prev->next;
-       result = NULL;
-       if (next != prev) {
-               result       = next;
-               next         = next->next;
-               list->qlen--;
-               next->prev   = prev;
-               prev->next   = next;
-               result->next = result->prev = NULL;
-               result->list = NULL;
-       }
-       return result;
-}
-
-
-/*
- *     Insert a packet on a list.
- */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk);
-static inline void __skb_insert(struct sk_buff *newsk,
-                               struct sk_buff *prev, struct sk_buff *next,
-                               struct sk_buff_head *list)
-{
-       newsk->next = next;
-       newsk->prev = prev;
-       next->prev  = prev->next = newsk;
-       newsk->list = list;
-       list->qlen++;
-}
-
-/*
- *     Place a packet after a given packet in a list.
- */
-extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk);
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
-{
-       __skb_insert(newsk, old, old->next, old->list);
-}
-
-/*
- * remove sk_buff from list. _Must_ be called atomically, and with
- * the list known..
- */
-extern void       skb_unlink(struct sk_buff *skb);
-static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
-{
-       struct sk_buff *next, *prev;
-
-       list->qlen--;
-       next       = skb->next;
-       prev       = skb->prev;
-       skb->next  = skb->prev = NULL;
-       skb->list  = NULL;
-       next->prev = prev;
-       prev->next = next;
-}
-
-
-/* XXX: more streamlined implementation */
-
-/**
- *     __skb_dequeue_tail - remove from the tail of the queue
- *     @list: list to dequeue from
- *
- *     Remove the tail of the list. This function does not take any locks
- *     so must be used with appropriate locks held only. The tail item is
- *     returned or %NULL if the list is empty.
- */
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
-static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
-{
-       struct sk_buff *skb = skb_peek_tail(list);
-       if (skb)
-               __skb_unlink(skb, list);
-       return skb;
-}
-
-
-static inline int skb_is_nonlinear(const struct sk_buff *skb)
-{
-       return skb->data_len;
-}
-
-static inline unsigned int skb_headlen(const struct sk_buff *skb)
-{
-       return skb->len - skb->data_len;
-}
-
-static inline int skb_pagelen(const struct sk_buff *skb)
-{
-       int i, len = 0;
-
-       for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
-               len += skb_shinfo(skb)->frags[i].size;
-       return len + skb_headlen(skb);
-}
-
-static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
-                                     struct page *page, int off, int size)
-{
-       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-       frag->page                = page;
-       frag->page_offset         = off;
-       frag->size                = size;
-       skb_shinfo(skb)->nr_frags = i + 1;
-}
-
-#define SKB_PAGE_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->nr_frags)
-#define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->frag_list)
-#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
-
-/*
- *     Add data to an sk_buff
- */
-static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
-{
-       unsigned char *tmp = skb->tail;
-       SKB_LINEAR_ASSERT(skb);
-       skb->tail += len;
-       skb->len  += len;
-       return tmp;
-}
-
-/**
- *     skb_put - add data to a buffer
- *     @skb: buffer to use
- *     @len: amount of data to add
- *
- *     This function extends the used data area of the buffer. If this would
- *     exceed the total buffer size the kernel will panic. A pointer to the
- *     first byte of the extra data is returned.
- */
-static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
-{
-       unsigned char *tmp = skb->tail;
-       SKB_LINEAR_ASSERT(skb);
-       skb->tail += len;
-       skb->len  += len;
-       if (unlikely(skb->tail>skb->end))
-               skb_over_panic(skb, len, current_text_addr());
-       return tmp;
-}
-
-static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
-{
-       skb->data -= len;
-       skb->len  += len;
-       return skb->data;
-}
-
-/**
- *     skb_push - add data to the start of a buffer
- *     @skb: buffer to use
- *     @len: amount of data to add
- *
- *     This function extends the used data area of the buffer at the buffer
- *     start. If this would exceed the total buffer headroom the kernel will
- *     panic. A pointer to the first byte of the extra data is returned.
- */
-static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
-{
-       skb->data -= len;
-       skb->len  += len;
-       if (unlikely(skb->data<skb->head))
-               skb_under_panic(skb, len, current_text_addr());
-       return skb->data;
-}
-
-static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
-{
-       skb->len -= len;
-       BUG_ON(skb->len < skb->data_len);
-       return skb->data += len;
-}
-
-/**
- *     skb_pull - remove data from the start of a buffer
- *     @skb: buffer to use
- *     @len: amount of data to remove
- *
- *     This function removes data from the start of a buffer, returning
- *     the memory to the headroom. A pointer to the next data in the buffer
- *     is returned. Once the data has been pulled future pushes will overwrite
- *     the old data.
- */
-static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
-{
-       return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
-}
-
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
-
-static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
-{
-       if (len > skb_headlen(skb) &&
-           !__pskb_pull_tail(skb, len-skb_headlen(skb)))
-               return NULL;
-       skb->len -= len;
-       return skb->data += len;
-}
-
-static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
-{
-       return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
-}
-
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
-{
-       if (likely(len <= skb_headlen(skb)))
-               return 1;
-       if (unlikely(len > skb->len))
-               return 0;
-       return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
-}
-
-/**
- *     skb_headroom - bytes at buffer head
- *     @skb: buffer to check
- *
- *     Return the number of bytes of free space at the head of an &sk_buff.
- */
-static inline int skb_headroom(const struct sk_buff *skb)
-{
-       return skb->data - skb->head;
-}
-
-/**
- *     skb_tailroom - bytes at buffer end
- *     @skb: buffer to check
- *
- *     Return the number of bytes of free space at the tail of an sk_buff
- */
-static inline int skb_tailroom(const struct sk_buff *skb)
-{
-       return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
-}
-
-/**
- *     skb_reserve - adjust headroom
- *     @skb: buffer to alter
- *     @len: bytes to move
- *
- *     Increase the headroom of an empty &sk_buff by reducing the tail
- *     room. This is only allowed for an empty buffer.
- */
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
-{
-       skb->data += len;
-       skb->tail += len;
-}
-
-/*
- * CPUs often take a performance hit when accessing unaligned memory
- * locations. The actual performance hit varies, it can be small if the
- * hardware handles it or large if we have to take an exception and fix it
- * in software.
- *
- * Since an ethernet header is 14 bytes network drivers often end up with
- * the IP header at an unaligned offset. The IP header can be aligned by
- * shifting the start of the packet by 2 bytes. Drivers should do this
- * with:
- *
- * skb_reserve(NET_IP_ALIGN);
- *
- * The downside to this alignment of the IP header is that the DMA is now
- * unaligned. On some architectures the cost of an unaligned DMA is high
- * and this cost outweighs the gains made by aligning the IP header.
- * 
- * Since this trade off varies between architectures, we allow NET_IP_ALIGN
- * to be overridden.
- */
-#ifndef NET_IP_ALIGN
-#define NET_IP_ALIGN   2
-#endif
-
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
-
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
-{
-       if (!skb->data_len) {
-               skb->len  = len;
-               skb->tail = skb->data + len;
-       } else
-               ___pskb_trim(skb, len, 0);
-}
-
-/**
- *     skb_trim - remove end from a buffer
- *     @skb: buffer to alter
- *     @len: new length
- *
- *     Cut the length of a buffer down by removing data from the tail. If
- *     the buffer is already under the length specified it is not modified.
- */
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
-{
-       if (skb->len > len)
-               __skb_trim(skb, len);
-}
-
-
-static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
-{
-       if (!skb->data_len) {
-               skb->len  = len;
-               skb->tail = skb->data+len;
-               return 0;
-       }
-       return ___pskb_trim(skb, len, 1);
-}
-
-static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
-{
-       return (len < skb->len) ? __pskb_trim(skb, len) : 0;
-}
-
-/**
- *     skb_orphan - orphan a buffer
- *     @skb: buffer to orphan
- *
- *     If a buffer currently has an owner then we call the owner's
- *     destructor function and make the @skb unowned. The buffer continues
- *     to exist but is no longer charged to its former owner.
- */
-static inline void skb_orphan(struct sk_buff *skb)
-{
-       if (skb->destructor)
-               skb->destructor(skb);
-       skb->destructor = NULL;
-       skb->sk         = NULL;
-}
-
-/**
- *     __skb_queue_purge - empty a list
- *     @list: list to empty
- *
- *     Delete all buffers on an &sk_buff list. Each buffer is removed from
- *     the list and one reference dropped. This function does not take the
- *     list lock and the caller must hold the relevant locks to use it.
- */
-extern void skb_queue_purge(struct sk_buff_head *list);
-static inline void __skb_queue_purge(struct sk_buff_head *list)
-{
-       struct sk_buff *skb;
-       while ((skb = __skb_dequeue(list)) != NULL)
-               kfree_skb(skb);
-}
-
-/**
- *     __dev_alloc_skb - allocate an skbuff for sending
- *     @length: length to allocate
- *     @gfp_mask: get_free_pages mask, passed to alloc_skb
- *
- *     Allocate a new &sk_buff and assign it a usage count of one. The
- *     buffer has unspecified headroom built in. Users should allocate
- *     the headroom they think they need without accounting for the
- *     built in space. The built in space is used for optimisations.
- *
- *     %NULL is returned in there is no free memory.
- */
-#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
-static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
-                                             int gfp_mask)
-{
-       struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
-       if (likely(skb))
-               skb_reserve(skb, 16);
-       return skb;
-}
-#else
-extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
-#endif
-
-/**
- *     dev_alloc_skb - allocate an skbuff for sending
- *     @length: length to allocate
- *
- *     Allocate a new &sk_buff and assign it a usage count of one. The
- *     buffer has unspecified headroom built in. Users should allocate
- *     the headroom they think they need without accounting for the
- *     built in space. The built in space is used for optimisations.
- *
- *     %NULL is returned in there is no free memory. Although this function
- *     allocates memory it can be called from an interrupt.
- */
-static inline struct sk_buff *dev_alloc_skb(unsigned int length)
-{
-       return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-
-/**
- *     skb_cow - copy header of skb when it is required
- *     @skb: buffer to cow
- *     @headroom: needed headroom
- *
- *     If the skb passed lacks sufficient headroom or its data part
- *     is shared, data is reallocated. If reallocation fails, an error
- *     is returned and original skb is not changed.
- *
- *     The result is skb with writable area skb->head...skb->tail
- *     and at least @headroom of space at head.
- */
-static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
-{
-       int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
-
-       if (delta < 0)
-               delta = 0;
-
-       if (delta || skb_cloned(skb))
-               return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
-       return 0;
-}
-
-/**
- *     skb_padto       - pad an skbuff up to a minimal size
- *     @skb: buffer to pad
- *     @len: minimal length
- *
- *     Pads up a buffer to ensure the trailing bytes exist and are
- *     blanked. If the buffer already contains sufficient data it
- *     is untouched. Returns the buffer, which may be a replacement
- *     for the original, or NULL for out of memory - in which case
- *     the original buffer is still freed.
- */
-static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
-{
-       unsigned int size = skb->len;
-       if (likely(size >= len))
-               return skb;
-       return skb_pad(skb, len-size);
-}
-
-static inline int skb_add_data(struct sk_buff *skb,
-                              char __user *from, int copy)
-{
-       const int off = skb->len;
-
-       if (skb->ip_summed == CHECKSUM_NONE) {
-               int err = 0;
-               unsigned int csum = csum_and_copy_from_user(from,
-                                                           skb_put(skb, copy),
-                                                           copy, 0, &err);
-               if (!err) {
-                       skb->csum = csum_block_add(skb->csum, csum, off);
-                       return 0;
-               }
-       } else if (!copy_from_user(skb_put(skb, copy), from, copy))
-               return 0;
-
-       __skb_trim(skb, off);
-       return -EFAULT;
-}
-
-static inline int skb_can_coalesce(struct sk_buff *skb, int i,
-                                  struct page *page, int off)
-{
-       if (i) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
-
-               return page == frag->page &&
-                      off == frag->page_offset + frag->size;
-       }
-       return 0;
-}
-
-/**
- *     skb_linearize - convert paged skb to linear one
- *     @skb: buffer to linarize
- *     @gfp: allocation mode
- *
- *     If there is no free memory -ENOMEM is returned, otherwise zero
- *     is returned and the old skb data released.
- */
-extern int __skb_linearize(struct sk_buff *skb, int gfp);
-static inline int skb_linearize(struct sk_buff *skb, int gfp)
-{
-       return __skb_linearize(skb, gfp);
-}
-
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
-       BUG_ON(in_irq());
-
-       local_bh_disable();
-#endif
-       return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
-       kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
-#ifdef CONFIG_HIGHMEM
-       local_bh_enable();
-#endif
-}
-
-#define skb_queue_walk(queue, skb) \
-               for (skb = (queue)->next, prefetch(skb->next);  \
-                    (skb != (struct sk_buff *)(queue));        \
-                    skb = skb->next, prefetch(skb->next))
-
-
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-                                        int noblock, int *err);
-extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
-                                    struct poll_table_struct *wait);
-extern int            skb_copy_datagram(const struct sk_buff *from,
-                                        int offset, char __user *to, int size);
-extern int            skb_copy_datagram_iovec(const struct sk_buff *from,
-                                              int offset, struct iovec *to,
-                                              int size);
-extern int            skb_copy_and_csum_datagram(const struct sk_buff *skb,
-                                                 int offset, u8 __user *to,
-                                                 int len, unsigned int *csump);
-extern int            skb_copy_and_csum_datagram_iovec(const
-                                                       struct sk_buff *skb,
-                                                       int hlen,
-                                                       struct iovec *iov);
-extern void           skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern unsigned int    skb_checksum(const struct sk_buff *skb, int offset,
-                                   int len, unsigned int csum);
-extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
-                                    void *to, int len);
-extern unsigned int    skb_copy_and_csum_bits(const struct sk_buff *skb,
-                                             int offset, u8 *to, int len,
-                                             unsigned int csum);
-extern void           skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void           skb_split(struct sk_buff *skb,
-                                struct sk_buff *skb1, const u32 len);
-
-extern void skb_init(void);
-extern void skb_add_mtu(int mtu);
-
-struct skb_iter {
-       /* Iteration functions set these */
-       unsigned char *data;
-       unsigned int len;
-
-       /* Private to iteration */
-       unsigned int nextfrag;
-       struct sk_buff *fraglist;
-};
-
-/* Keep iterating until skb_iter_next returns false. */
-extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i);
-extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i);
-/* Call this if aborting loop before !skb_iter_next */
-extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i);
-
-#ifdef CONFIG_NETFILTER
-static inline void nf_conntrack_put(struct nf_ct_info *nfct)
-{
-       if (nfct && atomic_dec_and_test(&nfct->master->use))
-               nfct->master->destroy(nfct->master);
-}
-static inline void nf_conntrack_get(struct nf_ct_info *nfct)
-{
-       if (nfct)
-               atomic_inc(&nfct->master->use);
-}
-static inline void nf_reset(struct sk_buff *skb)
-{
-       nf_conntrack_put(skb->nfct);
-       skb->nfct = NULL;
-#ifdef CONFIG_NETFILTER_DEBUG
-       skb->nf_debug = 0;
-#endif
-}
-
-#ifdef CONFIG_BRIDGE_NETFILTER
-static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
-{
-       if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
-               kfree(nf_bridge);
-}
-static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
-{
-       if (nf_bridge)
-               atomic_inc(&nf_bridge->use);
-}
-#endif /* CONFIG_BRIDGE_NETFILTER */
-#else /* CONFIG_NETFILTER */
-static inline void nf_reset(struct sk_buff *skb) {}
-#endif /* CONFIG_NETFILTER */
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_SKBUFF_H */
diff --git a/linux-2.6.8.1-xen-sparse/mkbuildtree b/linux-2.6.8.1-xen-sparse/mkbuildtree
deleted file mode 100755 (executable)
index f932b92..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/bin/sh
-
-# mkbuildtree <build tree>
-#
-# Creates symbolic links in <build tree> for the sparse tree
-# in the current directory.
-
-# Script to determine the relative path between two directories.
-# Copyright (c) D. J. Hawkey Jr. 2002
-# Fixed for Xen project by K. Fraser in 2003.  
-abs_to_rel ()
-{
-       local CWD SRCPATH
-                
-       if [ "$1" != "/" -a "${1##*[^/]}" = "/" ]; then
-               SRCPATH=${1%?}
-       else
-               SRCPATH=$1
-       fi
-       if [ "$2" != "/" -a "${2##*[^/]}" = "/" ]; then
-               DESTPATH=${2%?}
-       else
-               DESTPATH=$2
-       fi
-
-       CWD=$PWD
-       [ "${1%%[^/]*}" != "/" ] && cd $1 && SRCPATH=$PWD
-       [ "${2%%[^/]*}" != "/" ] && cd $2 && DESTPATH=$PWD
-       [ "$CWD" != "$PWD" ] && cd $CWD
-
-       BASEPATH=$SRCPATH
-
-       [ "$SRCPATH" = "$DESTPATH" ] && DESTPATH="." && return
-       [ "$SRCPATH" = "/" ] && DESTPATH=${DESTPATH#?} && return
-
-       while [ "$BASEPATH/" != "${DESTPATH%${DESTPATH#$BASEPATH/}}" ]; do
-          BASEPATH=${BASEPATH%/*}
-       done
-
-       SRCPATH=${SRCPATH#$BASEPATH}
-        DESTPATH=${DESTPATH#$BASEPATH}
-        DESTPATH=${DESTPATH#?}
-       while [ -n "$SRCPATH" ]; do
-               SRCPATH=${SRCPATH%/*}
-               DESTPATH="../$DESTPATH"
-       done
-
-       [ -z "$BASEPATH" ] && BASEPATH="/"
-       [ "${DESTPATH##*[^/]}" = "/" ] && DESTPATH=${DESTPATH%?}
-}
-
-# relative_lndir <target_dir>
-# Creates a tree of symlinks in the current working directory that mirror
-# real files in <target_dir>. <target_dir> should be relative to the current
-# working directory. Symlinks in <target_dir> are ignored. Source-control files
-# are ignored.
-relative_lndir ()
-{
-  local SYMLINK_DIR REAL_DIR pref i j
-  SYMLINK_DIR=$PWD
-  REAL_DIR=$1
-  (
-  cd $REAL_DIR
-  for i in `find . -type d | grep -v SCCS`; do
-    [ -d $SYMLINK_DIR/$i ] || mkdir -p $SYMLINK_DIR/$i
-    (
-    cd $i
-    pref=`echo $i | sed -e 's#/[^/]*#../#g' -e 's#^\.##'`
-    for j in `find . -type f -o -type l -maxdepth 1`; do
-      ln -sf ${pref}${REAL_DIR}/$i/$j ${SYMLINK_DIR}/$i/$j
-    done
-    )
-  done
-  )
-}
-
-[ "$1" == "" ] && { echo "Syntax: $0 <linux tree to xenify>"; exit 1; }
-
-# Get absolute path to the destination directory
-pushd . >/dev/null
-cd ${1}
-AD=$PWD
-popd >/dev/null
-  
-# Get absolute path to the source directory
-AS=`pwd`
-
-# Get path to source, relative to destination
-abs_to_rel ${AD} ${AS}
-RS=$DESTPATH
-
-# Remove old copies of files and directories at the destination
-for i in `find . -type f -o -type l` ; do rm -f ${AD}/${i#./} ; done
-
-# We now work from the destination directory
-cd ${AD}
-
-# Remove old symlinks
-for i in `find . -type l`; do rm -f $i; done
-
-# Create symlinks of files and directories which exist in the sparse source
-relative_lndir ${RS}
-rm -f mkbuildtree
-
-
-# Create links to the shared definitions of the hypervisor interface
-rm -rf ${AD}/include/asm-xen/hypervisor-ifs
-mkdir  ${AD}/include/asm-xen/hypervisor-ifs
-cd     ${AD}/include/asm-xen/hypervisor-ifs
-relative_lndir ../../../${RS}/../xen/include/hypervisor-ifs
-
diff --git a/linux-2.6.8.1-xen-sparse/mm/memory.c b/linux-2.6.8.1-xen-sparse/mm/memory.c
deleted file mode 100644 (file)
index d1f64fc..0000000
+++ /dev/null
@@ -1,1822 +0,0 @@
-/*
- *  linux/mm/memory.c
- *
- *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
- */
-
-/*
- * demand-loading started 01.12.91 - seems it is high on the list of
- * things wanted, and it should be easy to implement. - Linus
- */
-
-/*
- * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
- * pages started 02.12.91, seems to work. - Linus.
- *
- * Tested sharing by executing about 30 /bin/sh: under the old kernel it
- * would have taken more than the 6M I have free, but it worked well as
- * far as I could see.
- *
- * Also corrected some "invalidate()"s - I wasn't doing enough of them.
- */
-
-/*
- * Real VM (paging to/from disk) started 18.12.91. Much more work and
- * thought has to go into this. Oh, well..
- * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
- *             Found it. Everything seems to work now.
- * 20.12.91  -  Ok, making the swap-device changeable like the root.
- */
-
-/*
- * 05.04.94  -  Multi-page memory management added for v1.1.
- *             Idea by Alex Bligh (alex@cconcepts.co.uk)
- *
- * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
- *             (Gerhard.Wichert@pdb.siemens.de)
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/rmap.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-
-#include <linux/swapops.h>
-#include <linux/elf.h>
-
-#ifndef CONFIG_DISCONTIGMEM
-/* use the per-pgdat data instead for discontigmem - mbligh */
-unsigned long max_mapnr;
-struct page *mem_map;
-
-EXPORT_SYMBOL(max_mapnr);
-EXPORT_SYMBOL(mem_map);
-#endif
-
-unsigned long num_physpages;
-/*
- * A number of key systems in x86 including ioremap() rely on the assumption
- * that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
- * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
- * and ZONE_HIGHMEM.
- */
-void * high_memory;
-struct page *highmem_start_page;
-unsigned long vmalloc_earlyreserve;
-
-EXPORT_SYMBOL(num_physpages);
-EXPORT_SYMBOL(highmem_start_page);
-EXPORT_SYMBOL(high_memory);
-EXPORT_SYMBOL(vmalloc_earlyreserve);
-
-/*
- * We special-case the C-O-W ZERO_PAGE, because it's such
- * a common occurrence (no need to read the page to know
- * that it's zero - better for the cache and memory subsystem).
- */
-static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
-{
-       if (from == ZERO_PAGE(address)) {
-               clear_user_highpage(to, address);
-               return;
-       }
-       copy_user_highpage(to, from, address);
-}
-
-/*
- * Note: this doesn't free the actual pages themselves. That
- * has been handled earlier when unmapping all the memory regions.
- */
-static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
-{
-       struct page *page;
-
-       if (pmd_none(*dir))
-               return;
-       if (unlikely(pmd_bad(*dir))) {
-               pmd_ERROR(*dir);
-               pmd_clear(dir);
-               return;
-       }
-       page = pmd_page(*dir);
-       pmd_clear(dir);
-       dec_page_state(nr_page_table_pages);
-       pte_free_tlb(tlb, page);
-}
-
-static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
-{
-       int j;
-       pmd_t * pmd;
-
-       if (pgd_none(*dir))
-               return;
-       if (unlikely(pgd_bad(*dir))) {
-               pgd_ERROR(*dir);
-               pgd_clear(dir);
-               return;
-       }
-       pmd = pmd_offset(dir, 0);
-       pgd_clear(dir);
-       for (j = 0; j < PTRS_PER_PMD ; j++)
-               free_one_pmd(tlb, pmd+j);
-       pmd_free_tlb(tlb, pmd);
-}
-
-/*
- * This function clears all user-level page tables of a process - this
- * is needed by execve(), so that old pages aren't in the way.
- *
- * Must be called with pagetable lock held.
- */
-void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
-{
-       pgd_t * page_dir = tlb->mm->pgd;
-
-       page_dir += first;
-       do {
-               free_one_pgd(tlb, page_dir);
-               page_dir++;
-       } while (--nr);
-}
-
-pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
-{
-       if (!pmd_present(*pmd)) {
-               struct page *new;
-
-               spin_unlock(&mm->page_table_lock);
-               new = pte_alloc_one(mm, address);
-               spin_lock(&mm->page_table_lock);
-               if (!new)
-                       return NULL;
-
-               /*
-                * Because we dropped the lock, we should re-check the
-                * entry, as somebody else could have populated it..
-                */
-               if (pmd_present(*pmd)) {
-                       pte_free(new);
-                       goto out;
-               }
-               inc_page_state(nr_page_table_pages);
-               pmd_populate(mm, pmd, new);
-       }
-out:
-       return pte_offset_map(pmd, address);
-}
-
-pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
-{
-       if (!pmd_present(*pmd)) {
-               pte_t *new;
-
-               spin_unlock(&mm->page_table_lock);
-               new = pte_alloc_one_kernel(mm, address);
-               spin_lock(&mm->page_table_lock);
-               if (!new)
-                       return NULL;
-
-               /*
-                * Because we dropped the lock, we should re-check the
-                * entry, as somebody else could have populated it..
-                */
-               if (pmd_present(*pmd)) {
-                       pte_free_kernel(new);
-                       goto out;
-               }
-               pmd_populate_kernel(mm, pmd, new);
-       }
-out:
-       return pte_offset_kernel(pmd, address);
-}
-#define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
-#define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
-
-/*
- * copy one vm_area from one task to the other. Assumes the page tables
- * already present in the new task to be cleared in the whole range
- * covered by this vma.
- *
- * 08Jan98 Merged into one routine from several inline routines to reduce
- *         variable count and make things faster. -jj
- *
- * dst->page_table_lock is held on entry and exit,
- * but may be dropped within pmd_alloc() and pte_alloc_map().
- */
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
-                       struct vm_area_struct *vma)
-{
-       pgd_t * src_pgd, * dst_pgd;
-       unsigned long address = vma->vm_start;
-       unsigned long end = vma->vm_end;
-       unsigned long cow;
-
-       if (is_vm_hugetlb_page(vma))
-               return copy_hugetlb_page_range(dst, src, vma);
-
-       cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
-       src_pgd = pgd_offset(src, address)-1;
-       dst_pgd = pgd_offset(dst, address)-1;
-
-       for (;;) {
-               pmd_t * src_pmd, * dst_pmd;
-
-               src_pgd++; dst_pgd++;
-               
-               /* copy_pmd_range */
-               
-               if (pgd_none(*src_pgd))
-                       goto skip_copy_pmd_range;
-               if (unlikely(pgd_bad(*src_pgd))) {
-                       pgd_ERROR(*src_pgd);
-                       pgd_clear(src_pgd);
-skip_copy_pmd_range:   address = (address + PGDIR_SIZE) & PGDIR_MASK;
-                       if (!address || (address >= end))
-                               goto out;
-                       continue;
-               }
-
-               src_pmd = pmd_offset(src_pgd, address);
-               dst_pmd = pmd_alloc(dst, dst_pgd, address);
-               if (!dst_pmd)
-                       goto nomem;
-
-               do {
-                       pte_t * src_pte, * dst_pte;
-               
-                       /* copy_pte_range */
-               
-                       if (pmd_none(*src_pmd))
-                               goto skip_copy_pte_range;
-                       if (unlikely(pmd_bad(*src_pmd))) {
-                               pmd_ERROR(*src_pmd);
-                               pmd_clear(src_pmd);
-skip_copy_pte_range:
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               if (address >= end)
-                                       goto out;
-                               goto cont_copy_pmd_range;
-                       }
-
-                       dst_pte = pte_alloc_map(dst, dst_pmd, address);
-                       if (!dst_pte)
-                               goto nomem;
-                       spin_lock(&src->page_table_lock);       
-                       src_pte = pte_offset_map_nested(src_pmd, address);
-                       do {
-                               pte_t pte = *src_pte;
-                               struct page *page;
-                               unsigned long pfn;
-
-                               /* copy_one_pte */
-
-                               if (pte_none(pte))
-                                       goto cont_copy_pte_range_noset;
-                               /* pte contains position in swap, so copy. */
-                               if (!pte_present(pte)) {
-                                       if (!pte_file(pte))
-                                               swap_duplicate(pte_to_swp_entry(pte));
-                                       set_pte(dst_pte, pte);
-                                       goto cont_copy_pte_range_noset;
-                               }
-                               pfn = pte_pfn(pte);
-                               /* the pte points outside of valid memory, the
-                                * mapping is assumed to be good, meaningful
-                                * and not mapped via rmap - duplicate the
-                                * mapping as is.
-                                */
-                               page = NULL;
-                               if (pfn_valid(pfn)) 
-                                       page = pfn_to_page(pfn); 
-
-                               if (!page || PageReserved(page)) {
-                                       set_pte(dst_pte, pte);
-                                       goto cont_copy_pte_range_noset;
-                               }
-
-                               /*
-                                * If it's a COW mapping, write protect it both
-                                * in the parent and the child
-                                */
-                               if (cow) {
-                                       ptep_set_wrprotect(src_pte);
-                                       pte = *src_pte;
-                               }
-
-                               /*
-                                * If it's a shared mapping, mark it clean in
-                                * the child
-                                */
-                               if (vma->vm_flags & VM_SHARED)
-                                       pte = pte_mkclean(pte);
-                               pte = pte_mkold(pte);
-                               get_page(page);
-                               dst->rss++;
-                               set_pte(dst_pte, pte);
-                               page_dup_rmap(page);
-cont_copy_pte_range_noset:
-                               address += PAGE_SIZE;
-                               if (address >= end) {
-                                       pte_unmap_nested(src_pte);
-                                       pte_unmap(dst_pte);
-                                       goto out_unlock;
-                               }
-                               src_pte++;
-                               dst_pte++;
-                       } while ((unsigned long)src_pte & PTE_TABLE_MASK);
-                       pte_unmap_nested(src_pte-1);
-                       pte_unmap(dst_pte-1);
-                       spin_unlock(&src->page_table_lock);
-                       cond_resched_lock(&dst->page_table_lock);
-cont_copy_pmd_range:
-                       src_pmd++;
-                       dst_pmd++;
-               } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
-       }
-out_unlock:
-       spin_unlock(&src->page_table_lock);
-out:
-       return 0;
-nomem:
-       return -ENOMEM;
-}
-
-static void zap_pte_range(struct mmu_gather *tlb,
-               pmd_t *pmd, unsigned long address,
-               unsigned long size, struct zap_details *details)
-{
-       unsigned long offset;
-       pte_t *ptep;
-
-       if (pmd_none(*pmd))
-               return;
-       if (unlikely(pmd_bad(*pmd))) {
-               pmd_ERROR(*pmd);
-               pmd_clear(pmd);
-               return;
-       }
-       ptep = pte_offset_map(pmd, address);
-       offset = address & ~PMD_MASK;
-       if (offset + size > PMD_SIZE)
-               size = PMD_SIZE - offset;
-       size &= PAGE_MASK;
-       if (details && !details->check_mapping && !details->nonlinear_vma)
-               details = NULL;
-       for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
-               pte_t pte = *ptep;
-               if (pte_none(pte))
-                       continue;
-               if (pte_present(pte)) {
-                       struct page *page = NULL;
-                       unsigned long pfn = pte_pfn(pte);
-                       if (pfn_valid(pfn)) {
-                               page = pfn_to_page(pfn);
-                               if (PageReserved(page))
-                                       page = NULL;
-                       }
-                       if (unlikely(details) && page) {
-                               /*
-                                * unmap_shared_mapping_pages() wants to
-                                * invalidate cache without truncating:
-                                * unmap shared but keep private pages.
-                                */
-                               if (details->check_mapping &&
-                                   details->check_mapping != page->mapping)
-                                       continue;
-                               /*
-                                * Each page->index must be checked when
-                                * invalidating or truncating nonlinear.
-                                */
-                               if (details->nonlinear_vma &&
-                                   (page->index < details->first_index ||
-                                    page->index > details->last_index))
-                                       continue;
-                       }
-                       pte = ptep_get_and_clear(ptep);
-                       tlb_remove_tlb_entry(tlb, ptep, address+offset);
-                       if (unlikely(!page))
-                               continue;
-                       if (unlikely(details) && details->nonlinear_vma
-                           && linear_page_index(details->nonlinear_vma,
-                                       address+offset) != page->index)
-                               set_pte(ptep, pgoff_to_pte(page->index));
-                       if (pte_dirty(pte))
-                               set_page_dirty(page);
-                       if (pte_young(pte) && !PageAnon(page))
-                               mark_page_accessed(page);
-                       tlb->freed++;
-                       page_remove_rmap(page);
-                       tlb_remove_page(tlb, page);
-                       continue;
-               }
-               /*
-                * If details->check_mapping, we leave swap entries;
-                * if details->nonlinear_vma, we leave file entries.
-                */
-               if (unlikely(details))
-                       continue;
-               if (!pte_file(pte))
-                       free_swap_and_cache(pte_to_swp_entry(pte));
-               pte_clear(ptep);
-       }
-       pte_unmap(ptep-1);
-}
-
-static void zap_pmd_range(struct mmu_gather *tlb,
-               pgd_t * dir, unsigned long address,
-               unsigned long size, struct zap_details *details)
-{
-       pmd_t * pmd;
-       unsigned long end;
-
-       if (pgd_none(*dir))
-               return;
-       if (unlikely(pgd_bad(*dir))) {
-               pgd_ERROR(*dir);
-               pgd_clear(dir);
-               return;
-       }
-       pmd = pmd_offset(dir, address);
-       end = address + size;
-       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
-               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
-       do {
-               zap_pte_range(tlb, pmd, address, end - address, details);
-               address = (address + PMD_SIZE) & PMD_MASK; 
-               pmd++;
-       } while (address && (address < end));
-}
-
-static void unmap_page_range(struct mmu_gather *tlb,
-               struct vm_area_struct *vma, unsigned long address,
-               unsigned long end, struct zap_details *details)
-{
-       pgd_t * dir;
-
-       BUG_ON(address >= end);
-       dir = pgd_offset(vma->vm_mm, address);
-       tlb_start_vma(tlb, vma);
-       do {
-               zap_pmd_range(tlb, dir, address, end - address, details);
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-       tlb_end_vma(tlb, vma);
-}
-
-/* Dispose of an entire struct mmu_gather per rescheduling point */
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE)
-#endif
-
-/* For UP, 256 pages at a time gives nice low latency */
-#if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE (256 * PAGE_SIZE)
-#endif
-
-/* No preempt: go for improved straight-line efficiency */
-#if !defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
-#endif
-
-/**
- * unmap_vmas - unmap a range of memory covered by a list of vma's
- * @tlbp: address of the caller's struct mmu_gather
- * @mm: the controlling mm_struct
- * @vma: the starting vma
- * @start_addr: virtual address at which to start unmapping
- * @end_addr: virtual address at which to end unmapping
- * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
- * @details: details of nonlinear truncation or shared cache invalidation
- *
- * Returns the number of vma's which were covered by the unmapping.
- *
- * Unmap all pages in the vma list.  Called under page_table_lock.
- *
- * We aim to not hold page_table_lock for too long (for scheduling latency
- * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
- * return the ending mmu_gather to the caller.
- *
- * Only addresses between `start' and `end' will be unmapped.
- *
- * The VMA list must be sorted in ascending virtual address order.
- *
- * unmap_vmas() assumes that the caller will flush the whole unmapped address
- * range after unmap_vmas() returns.  So the only responsibility here is to
- * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
- * drops the lock and schedules.
- */
-int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
-               struct vm_area_struct *vma, unsigned long start_addr,
-               unsigned long end_addr, unsigned long *nr_accounted,
-               struct zap_details *details)
-{
-       unsigned long zap_bytes = ZAP_BLOCK_SIZE;
-       unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
-       int tlb_start_valid = 0;
-       int ret = 0;
-       int atomic = details && details->atomic;
-
-       for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
-               unsigned long start;
-               unsigned long end;
-
-               start = max(vma->vm_start, start_addr);
-               if (start >= vma->vm_end)
-                       continue;
-               end = min(vma->vm_end, end_addr);
-               if (end <= vma->vm_start)
-                       continue;
-
-               if (vma->vm_flags & VM_ACCOUNT)
-                       *nr_accounted += (end - start) >> PAGE_SHIFT;
-
-               ret++;
-               while (start != end) {
-                       unsigned long block;
-
-                       if (!tlb_start_valid) {
-                               tlb_start = start;
-                               tlb_start_valid = 1;
-                       }
-
-                       if (is_vm_hugetlb_page(vma)) {
-                               block = end - start;
-                               unmap_hugepage_range(vma, start, end);
-                       } else {
-                               block = min(zap_bytes, end - start);
-                               unmap_page_range(*tlbp, vma, start,
-                                               start + block, details);
-                       }
-
-                       start += block;
-                       zap_bytes -= block;
-                       if ((long)zap_bytes > 0)
-                               continue;
-                       if (!atomic && need_resched()) {
-                               int fullmm = tlb_is_full_mm(*tlbp);
-                               tlb_finish_mmu(*tlbp, tlb_start, start);
-                               cond_resched_lock(&mm->page_table_lock);
-                               *tlbp = tlb_gather_mmu(mm, fullmm);
-                               tlb_start_valid = 0;
-                       }
-                       zap_bytes = ZAP_BLOCK_SIZE;
-               }
-       }
-       return ret;
-}
-
-/**
- * zap_page_range - remove user pages in a given range
- * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
- * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
- */
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
-               unsigned long size, struct zap_details *details)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       struct mmu_gather *tlb;
-       unsigned long end = address + size;
-       unsigned long nr_accounted = 0;
-
-       if (is_vm_hugetlb_page(vma)) {
-               zap_hugepage_range(vma, address, size);
-               return;
-       }
-
-       lru_add_drain();
-       spin_lock(&mm->page_table_lock);
-       tlb = tlb_gather_mmu(mm, 0);
-       unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
-       tlb_finish_mmu(tlb, address, end);
-       spin_unlock(&mm->page_table_lock);
-}
-
-/*
- * Do a quick page-table lookup for a single page.
- * mm->page_table_lock must be held.
- */
-struct page *
-follow_page(struct mm_struct *mm, unsigned long address, int write) 
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *ptep, pte;
-       unsigned long pfn;
-       struct page *page;
-
-       page = follow_huge_addr(mm, address, write);
-       if (! IS_ERR(page))
-               return page;
-
-       pgd = pgd_offset(mm, address);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto out;
-
-       pmd = pmd_offset(pgd, address);
-       if (pmd_none(*pmd))
-               goto out;
-       if (pmd_huge(*pmd))
-               return follow_huge_pmd(mm, address, pmd, write);
-       if (unlikely(pmd_bad(*pmd)))
-               goto out;
-
-       ptep = pte_offset_map(pmd, address);
-       if (!ptep)
-               goto out;
-
-       pte = *ptep;
-       pte_unmap(ptep);
-       if (pte_present(pte)) {
-               if (write && !pte_write(pte))
-                       goto out;
-               pfn = pte_pfn(pte);
-               if (pfn_valid(pfn)) {
-                       page = pfn_to_page(pfn);
-                       if (write && !pte_dirty(pte) && !PageDirty(page))
-                               set_page_dirty(page);
-                       mark_page_accessed(page);
-                       return page;
-               }
-       }
-
-out:
-       return NULL;
-}
-
-/* 
- * Given a physical address, is there a useful struct page pointing to
- * it?  This may become more complex in the future if we start dealing
- * with IO-aperture pages for direct-IO.
- */
-
-static inline struct page *get_page_map(struct page *page)
-{
-       if (!pfn_valid(page_to_pfn(page)))
-               return NULL;
-       return page;
-}
-
-
-static inline int
-untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
-                        unsigned long address)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-
-       /* Check if the vma is for an anonymous mapping. */
-       if (vma->vm_ops && vma->vm_ops->nopage)
-               return 0;
-
-       /* Check if page directory entry exists. */
-       pgd = pgd_offset(mm, address);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return 1;
-
-       /* Check if page middle directory entry exists. */
-       pmd = pmd_offset(pgd, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               return 1;
-
-       /* There is a pte slot for 'address' in 'mm'. */
-       return 0;
-}
-
-
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int write, int force,
-               struct page **pages, struct vm_area_struct **vmas)
-{
-       int i;
-       unsigned int flags;
-
-       /* 
-        * Require read or write permissions.
-        * If 'force' is set, we only require the "MAY" flags.
-        */
-       flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-       i = 0;
-
-       do {
-               struct vm_area_struct * vma;
-
-               vma = find_extend_vma(mm, start);
-               if (!vma && in_gate_area(tsk, start)) {
-                       unsigned long pg = start & PAGE_MASK;
-                       struct vm_area_struct *gate_vma = get_gate_vma(tsk);
-                       pgd_t *pgd;
-                       pmd_t *pmd;
-                       pte_t *pte;
-                       if (write) /* user gate pages are read-only */
-                               return i ? : -EFAULT;
-                       pgd = pgd_offset_gate(mm, pg);
-                       if (!pgd)
-                               return i ? : -EFAULT;
-                       pmd = pmd_offset(pgd, pg);
-                       if (!pmd)
-                               return i ? : -EFAULT;
-                       pte = pte_offset_map(pmd, pg);
-                       if (!pte)
-                               return i ? : -EFAULT;
-                       if (!pte_present(*pte)) {
-                               pte_unmap(pte);
-                               return i ? : -EFAULT;
-                       }
-                       if (pages) {
-                               pages[i] = pte_page(*pte);
-                               get_page(pages[i]);
-                       }
-                       pte_unmap(pte);
-                       if (vmas)
-                               vmas[i] = gate_vma;
-                       i++;
-                       start += PAGE_SIZE;
-                       len--;
-                       continue;
-               }
-
-               if (!vma || (pages && (vma->vm_flags & VM_IO))
-                               || !(flags & vma->vm_flags))
-                       return i ? : -EFAULT;
-
-               if (is_vm_hugetlb_page(vma)) {
-                       i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &len, i);
-                       continue;
-               }
-               spin_lock(&mm->page_table_lock);
-               do {
-                       struct page *map;
-                       int lookup_write = write;
-                       while (!(map = follow_page(mm, start, lookup_write))) {
-                               /*
-                                * Shortcut for anonymous pages. We don't want
-                                * to force the creation of pages tables for
-                                * insanly big anonymously mapped areas that
-                                * nobody touched so far. This is important
-                                * for doing a core dump for these mappings.
-                                */
-                               if (!lookup_write &&
-                                   untouched_anonymous_page(mm,vma,start)) {
-                                       map = ZERO_PAGE(start);
-                                       break;
-                               }
-                               spin_unlock(&mm->page_table_lock);
-                               switch (handle_mm_fault(mm,vma,start,write)) {
-                               case VM_FAULT_MINOR:
-                                       tsk->min_flt++;
-                                       break;
-                               case VM_FAULT_MAJOR:
-                                       tsk->maj_flt++;
-                                       break;
-                               case VM_FAULT_SIGBUS:
-                                       return i ? i : -EFAULT;
-                               case VM_FAULT_OOM:
-                                       return i ? i : -ENOMEM;
-                               default:
-                                       BUG();
-                               }
-                               /*
-                                * Now that we have performed a write fault
-                                * and surely no longer have a shared page we
-                                * shouldn't write, we shouldn't ignore an
-                                * unwritable page in the page table if
-                                * we are forcing write access.
-                                */
-                               lookup_write = write && !force;
-                               spin_lock(&mm->page_table_lock);
-                       }
-                       if (pages) {
-                               pages[i] = get_page_map(map);
-                               if (!pages[i]) {
-                                       spin_unlock(&mm->page_table_lock);
-                                       while (i--)
-                                               page_cache_release(pages[i]);
-                                       i = -EFAULT;
-                                       goto out;
-                               }
-                               flush_dcache_page(pages[i]);
-                               if (!PageReserved(pages[i]))
-                                       page_cache_get(pages[i]);
-                       }
-                       if (vmas)
-                               vmas[i] = vma;
-                       i++;
-                       start += PAGE_SIZE;
-                       len--;
-               } while(len && start < vma->vm_end);
-               spin_unlock(&mm->page_table_lock);
-       } while(len);
-out:
-       return i;
-}
-
-EXPORT_SYMBOL(get_user_pages);
-
-static void zeromap_pte_range(pte_t * pte, unsigned long address,
-                                     unsigned long size, pgprot_t prot)
-{
-       unsigned long end;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
-               BUG_ON(!pte_none(*pte));
-               set_pte(pte, zero_pte);
-               address += PAGE_SIZE;
-               pte++;
-       } while (address && (address < end));
-}
-
-static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
-                                    unsigned long size, pgprot_t prot)
-{
-       unsigned long base, end;
-
-       base = address & PGDIR_MASK;
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       do {
-               pte_t * pte = pte_alloc_map(mm, pmd, base + address);
-               if (!pte)
-                       return -ENOMEM;
-               zeromap_pte_range(pte, base + address, end - address, prot);
-               pte_unmap(pte);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return 0;
-}
-
-int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, pgprot_t prot)
-{
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = address;
-       unsigned long end = address + size;
-       struct mm_struct *mm = vma->vm_mm;
-
-       dir = pgd_offset(mm, address);
-       flush_cache_range(vma, beg, end);
-       if (address >= end)
-               BUG();
-
-       spin_lock(&mm->page_table_lock);
-       do {
-               pmd_t *pmd = pmd_alloc(mm, dir, address);
-               error = -ENOMEM;
-               if (!pmd)
-                       break;
-               error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
-               if (error)
-                       break;
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-       /*
-        * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
-        */
-       flush_tlb_range(vma, beg, end);
-       spin_unlock(&mm->page_table_lock);
-       return error;
-}
-
-/*
- * maps a range of physical memory into the requested pages. the old
- * mappings are removed. any references to nonexistent pages results
- * in null mappings (currently treated as "copy-on-access")
- */
-static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
-       unsigned long phys_addr, pgprot_t prot)
-{
-       unsigned long end;
-       unsigned long pfn;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       pfn = phys_addr >> PAGE_SHIFT;
-       do {
-               BUG_ON(!pte_none(*pte));
-               if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
-                       set_pte(pte, pfn_pte(pfn, prot));
-               address += PAGE_SIZE;
-               pfn++;
-               pte++;
-       } while (address && (address < end));
-}
-
-static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long phys_addr, pgprot_t prot)
-{
-       unsigned long base, end;
-
-       base = address & PGDIR_MASK;
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       phys_addr -= address;
-       do {
-               pte_t * pte = pte_alloc_map(mm, pmd, base + address);
-               if (!pte)
-                       return -ENOMEM;
-               remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
-               pte_unmap(pte);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return 0;
-}
-
-/*  Note: this is only safe if the mm semaphore is held when called. */
-int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
-{
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = from;
-       unsigned long end = from + size;
-       struct mm_struct *mm = vma->vm_mm;
-
-       phys_addr -= from;
-       dir = pgd_offset(mm, from);
-       flush_cache_range(vma, beg, end);
-       if (from >= end)
-               BUG();
-
-       spin_lock(&mm->page_table_lock);
-       do {
-               pmd_t *pmd = pmd_alloc(mm, dir, from);
-               error = -ENOMEM;
-               if (!pmd)
-                       break;
-               error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
-               if (error)
-                       break;
-               from = (from + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (from && (from < end));
-       /*
-        * Why flush? remap_pte_range has a BUG_ON for !pte_none()
-        */
-       flush_tlb_range(vma, beg, end);
-       spin_unlock(&mm->page_table_lock);
-       return error;
-}
-
-EXPORT_SYMBOL(remap_page_range);
-
-/*
- * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
- * servicing faults for write access.  In the normal case, do always want
- * pte_mkwrite.  But get_user_pages can cause write faults for mappings
- * that do not have writing enabled, when used by access_process_vm.
- */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
-{
-       if (likely(vma->vm_flags & VM_WRITE))
-               pte = pte_mkwrite(pte);
-       return pte;
-}
-
-/*
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
- */
-static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 
-               pte_t *page_table)
-{
-       pte_t entry;
-
-       flush_cache_page(vma, address);
-       entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
-                             vma);
-       ptep_establish(vma, address, page_table, entry);
-       update_mmu_cache(vma, address, entry);
-}
-
-/*
- * This routine handles present pages, when users try to write
- * to a shared page. It is done by copying the page to a new address
- * and decrementing the shared-page counter for the old page.
- *
- * Goto-purists beware: the only reason for goto's here is that it results
- * in better assembly code.. The "default" path will see no jumps at all.
- *
- * Note that this routine assumes that the protection checks have been
- * done by the caller (the low-level page fault routine in most cases).
- * Thus we can safely just mark it writable once we've done any necessary
- * COW.
- *
- * We also mark the page dirty at this point even though the page will
- * change only once the write actually happens. This avoids a few races,
- * and potentially makes it more efficient.
- *
- * We hold the mm semaphore and the page_table_lock on entry and exit
- * with the page_table_lock released.
- */
-static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
-       unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
-{
-       struct page *old_page, *new_page;
-       unsigned long pfn = pte_pfn(pte);
-       pte_t entry;
-
-       if (unlikely(!pfn_valid(pfn))) {
-               /*
-                * This should really halt the system so it can be debugged or
-                * at least the kernel stops what it's doing before it corrupts
-                * data, but for the moment just pretend this is OOM.
-                */
-               pte_unmap(page_table);
-               printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
-                               address);
-               spin_unlock(&mm->page_table_lock);
-               return VM_FAULT_OOM;
-       }
-       old_page = pfn_to_page(pfn);
-
-       if (!TestSetPageLocked(old_page)) {
-               int reuse = can_share_swap_page(old_page);
-               unlock_page(old_page);
-               if (reuse) {
-                       flush_cache_page(vma, address);
-                       entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
-                                             vma);
-                       ptep_set_access_flags(vma, address, page_table, entry, 1);
-                       update_mmu_cache(vma, address, entry);
-                       pte_unmap(page_table);
-                       spin_unlock(&mm->page_table_lock);
-                       return VM_FAULT_MINOR;
-               }
-       }
-       pte_unmap(page_table);
-
-       /*
-        * Ok, we need to copy. Oh, well..
-        */
-       if (!PageReserved(old_page))
-               page_cache_get(old_page);
-       spin_unlock(&mm->page_table_lock);
-
-       if (unlikely(anon_vma_prepare(vma)))
-               goto no_new_page;
-       new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
-       if (!new_page)
-               goto no_new_page;
-       copy_cow_page(old_page,new_page,address);
-
-       /*
-        * Re-check the pte - we dropped the lock
-        */
-       spin_lock(&mm->page_table_lock);
-       page_table = pte_offset_map(pmd, address);
-       if (likely(pte_same(*page_table, pte))) {
-               if (PageReserved(old_page))
-                       ++mm->rss;
-               else
-                       page_remove_rmap(old_page);
-               break_cow(vma, new_page, address, page_table);
-               lru_cache_add_active(new_page);
-               page_add_anon_rmap(new_page, vma, address);
-
-               /* Free the old page.. */
-               new_page = old_page;
-       }
-       pte_unmap(page_table);
-       page_cache_release(new_page);
-       page_cache_release(old_page);
-       spin_unlock(&mm->page_table_lock);
-       return VM_FAULT_MINOR;
-
-no_new_page:
-       page_cache_release(old_page);
-       return VM_FAULT_OOM;
-}
-
-/*
- * Helper function for unmap_mapping_range().
- */
-static inline void unmap_mapping_range_list(struct prio_tree_root *root,
-                                           struct zap_details *details)
-{
-       struct vm_area_struct *vma = NULL;
-       struct prio_tree_iter iter;
-       pgoff_t vba, vea, zba, zea;
-
-       while ((vma = vma_prio_tree_next(vma, root, &iter,
-                       details->first_index, details->last_index)) != NULL) {
-               vba = vma->vm_pgoff;
-               vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
-               /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
-               zba = details->first_index;
-               if (zba < vba)
-                       zba = vba;
-               zea = details->last_index;
-               if (zea > vea)
-                       zea = vea;
-               zap_page_range(vma,
-                       ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
-                       (zea - zba + 1) << PAGE_SHIFT, details);
-       }
-}
-
-/**
- * unmap_mapping_range - unmap the portion of all mmaps
- * in the specified address_space corresponding to the specified
- * page range in the underlying file.
- * @address_space: the address space containing mmaps to be unmapped.
- * @holebegin: byte in first page to unmap, relative to the start of
- * the underlying file.  This will be rounded down to a PAGE_SIZE
- * boundary.  Note that this is different from vmtruncate(), which
- * must keep the partial page.  In contrast, we must get rid of
- * partial pages.
- * @holelen: size of prospective hole in bytes.  This will be rounded
- * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
- * end of the file.
- * @even_cows: 1 when truncating a file, unmap even private COWed pages;
- * but 0 when invalidating pagecache, don't throw away private data.
- */
-void unmap_mapping_range(struct address_space *mapping,
-               loff_t const holebegin, loff_t const holelen, int even_cows)
-{
-       struct zap_details details;
-       pgoff_t hba = holebegin >> PAGE_SHIFT;
-       pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-       /* Check for overflow. */
-       if (sizeof(holelen) > sizeof(hlen)) {
-               long long holeend =
-                       (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
-               if (holeend & ~(long long)ULONG_MAX)
-                       hlen = ULONG_MAX - hba + 1;
-       }
-
-       details.check_mapping = even_cows? NULL: mapping;
-       details.nonlinear_vma = NULL;
-       details.first_index = hba;
-       details.last_index = hba + hlen - 1;
-       details.atomic = 1;     /* A spinlock is held */
-       if (details.last_index < details.first_index)
-               details.last_index = ULONG_MAX;
-
-       spin_lock(&mapping->i_mmap_lock);
-       /* Protect against page fault */
-       atomic_inc(&mapping->truncate_count);
-
-       if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
-               unmap_mapping_range_list(&mapping->i_mmap, &details);
-
-       /*
-        * In nonlinear VMAs there is no correspondence between virtual address
-        * offset and file offset.  So we must perform an exhaustive search
-        * across *all* the pages in each nonlinear VMA, not just the pages
-        * whose virtual address lies outside the file truncation point.
-        */
-       if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
-               struct vm_area_struct *vma;
-               list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
-                                               shared.vm_set.list) {
-                       details.nonlinear_vma = vma;
-                       zap_page_range(vma, vma->vm_start,
-                               vma->vm_end - vma->vm_start, &details);
-               }
-       }
-       spin_unlock(&mapping->i_mmap_lock);
-}
-EXPORT_SYMBOL(unmap_mapping_range);
-
-/*
- * Handle all mappings that got truncated by a "truncate()"
- * system call.
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page.  Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
-       struct address_space *mapping = inode->i_mapping;
-       unsigned long limit;
-
-       if (inode->i_size < offset)
-               goto do_expand;
-       /*
-        * truncation of in-use swapfiles is disallowed - it would cause
-        * subsequent swapout to scribble on the now-freed blocks.
-        */
-       if (IS_SWAPFILE(inode))
-               goto out_busy;
-       i_size_write(inode, offset);
-       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       truncate_inode_pages(mapping, offset);
-       goto out_truncate;
-
-do_expand:
-       limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
-       if (limit != RLIM_INFINITY && offset > limit)
-               goto out_sig;
-       if (offset > inode->i_sb->s_maxbytes)
-               goto out_big;
-       i_size_write(inode, offset);
-
-out_truncate:
-       if (inode->i_op && inode->i_op->truncate)
-               inode->i_op->truncate(inode);
-       return 0;
-out_sig:
-       send_sig(SIGXFSZ, current, 0);
-out_big:
-       return -EFBIG;
-out_busy:
-       return -ETXTBSY;
-}
-
-EXPORT_SYMBOL(vmtruncate);
-
-/* 
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time.  We also make sure to queue
- * the 'original' request together with the readahead ones...  
- *
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
- */
-void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
-{
-#ifdef CONFIG_NUMA
-       struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
-#endif
-       int i, num;
-       struct page *new_page;
-       unsigned long offset;
-
-       /*
-        * Get the number of handles we should do readahead io to.
-        */
-       num = valid_swaphandles(entry, &offset);
-       for (i = 0; i < num; offset++, i++) {
-               /* Ok, do the async read-ahead now */
-               new_page = read_swap_cache_async(swp_entry(swp_type(entry),
-                                                          offset), vma, addr);
-               if (!new_page)
-                       break;
-               page_cache_release(new_page);
-#ifdef CONFIG_NUMA
-               /*
-                * Find the next applicable VMA for the NUMA policy.
-                */
-               addr += PAGE_SIZE;
-               if (addr == 0)
-                       vma = NULL;
-               if (vma) {
-                       if (addr >= vma->vm_end) {
-                               vma = next_vma;
-                               next_vma = vma ? vma->vm_next : NULL;
-                       }
-                       if (vma && addr < vma->vm_start)
-                               vma = NULL;
-               } else {
-                       if (next_vma && addr >= next_vma->vm_start) {
-                               vma = next_vma;
-                               next_vma = vma->vm_next;
-                       }
-               }
-#endif
-       }
-       lru_add_drain();        /* Push any new pages onto the LRU now */
-}
-
-/*
- * We hold the mm semaphore and the page_table_lock on entry and
- * should release the pagetable lock on exit..
- */
-static int do_swap_page(struct mm_struct * mm,
-       struct vm_area_struct * vma, unsigned long address,
-       pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
-{
-       struct page *page;
-       swp_entry_t entry = pte_to_swp_entry(orig_pte);
-       pte_t pte;
-       int ret = VM_FAULT_MINOR;
-
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
-       page = lookup_swap_cache(entry);
-       if (!page) {
-               swapin_readahead(entry, address, vma);
-               page = read_swap_cache_async(entry, vma, address);
-               if (!page) {
-                       /*
-                        * Back out if somebody else faulted in this pte while
-                        * we released the page table lock.
-                        */
-                       spin_lock(&mm->page_table_lock);
-                       page_table = pte_offset_map(pmd, address);
-                       if (likely(pte_same(*page_table, orig_pte)))
-                               ret = VM_FAULT_OOM;
-                       else
-                               ret = VM_FAULT_MINOR;
-                       pte_unmap(page_table);
-                       spin_unlock(&mm->page_table_lock);
-                       goto out;
-               }
-
-               /* Had to read the page from swap area: Major fault */
-               ret = VM_FAULT_MAJOR;
-               inc_page_state(pgmajfault);
-       }
-
-       mark_page_accessed(page);
-       lock_page(page);
-
-       /*
-        * Back out if somebody else faulted in this pte while we
-        * released the page table lock.
-        */
-       spin_lock(&mm->page_table_lock);
-       page_table = pte_offset_map(pmd, address);
-       if (unlikely(!pte_same(*page_table, orig_pte))) {
-               pte_unmap(page_table);
-               spin_unlock(&mm->page_table_lock);
-               unlock_page(page);
-               page_cache_release(page);
-               ret = VM_FAULT_MINOR;
-               goto out;
-       }
-
-       /* The page isn't present yet, go ahead with the fault. */
-               
-       swap_free(entry);
-       if (vm_swap_full())
-               remove_exclusive_swap_page(page);
-
-       mm->rss++;
-       pte = mk_pte(page, vma->vm_page_prot);
-       if (write_access && can_share_swap_page(page)) {
-               pte = maybe_mkwrite(pte_mkdirty(pte), vma);
-               write_access = 0;
-       }
-       unlock_page(page);
-
-       flush_icache_page(vma, page);
-       set_pte(page_table, pte);
-       page_add_anon_rmap(page, vma, address);
-
-       if (write_access) {
-               if (do_wp_page(mm, vma, address,
-                               page_table, pmd, pte) == VM_FAULT_OOM)
-                       ret = VM_FAULT_OOM;
-               goto out;
-       }
-
-       /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, pte);
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
-out:
-       return ret;
-}
-
-/*
- * We are called with the MM semaphore and page_table_lock
- * spinlock held to protect against concurrent faults in
- * multithreaded programs. 
- */
-static int
-do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
-               pte_t *page_table, pmd_t *pmd, int write_access,
-               unsigned long addr)
-{
-       pte_t entry;
-       struct page * page = ZERO_PAGE(addr);
-
-       /* Read-only mapping of ZERO_PAGE. */
-       entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
-
-       /* ..except if it's a write access */
-       if (write_access) {
-               /* Allocate our own private page. */
-               pte_unmap(page_table);
-               spin_unlock(&mm->page_table_lock);
-
-               if (unlikely(anon_vma_prepare(vma)))
-                       goto no_mem;
-               page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
-               if (!page)
-                       goto no_mem;
-               clear_user_highpage(page, addr);
-
-               spin_lock(&mm->page_table_lock);
-               page_table = pte_offset_map(pmd, addr);
-
-               if (!pte_none(*page_table)) {
-                       pte_unmap(page_table);
-                       page_cache_release(page);
-                       spin_unlock(&mm->page_table_lock);
-                       goto out;
-               }
-               mm->rss++;
-               entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
-                                                        vma->vm_page_prot)),
-                                     vma);
-               lru_cache_add_active(page);
-               mark_page_accessed(page);
-               page_add_anon_rmap(page, vma, addr);
-       }
-
-       ptep_establish_new(vma, addr, page_table, entry);
-       pte_unmap(page_table);
-
-       /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, addr, entry);
-       spin_unlock(&mm->page_table_lock);
-out:
-       return VM_FAULT_MINOR;
-no_mem:
-       return VM_FAULT_OOM;
-}
-
-/*
- * do_no_page() tries to create a new page mapping. It aggressively
- * tries to share with existing pages, but makes a separate copy if
- * the "write_access" parameter is true in order to avoid the next
- * page fault.
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * This is called with the MM semaphore held and the page table
- * spinlock held. Exit with the spinlock released.
- */
-static int
-do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-       unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
-{
-       struct page * new_page;
-       struct address_space *mapping = NULL;
-       pte_t entry;
-       int sequence = 0;
-       int ret = VM_FAULT_MINOR;
-       int anon = 0;
-
-       if (!vma->vm_ops || !vma->vm_ops->nopage)
-               return do_anonymous_page(mm, vma, page_table,
-                                       pmd, write_access, address);
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
-
-       if (vma->vm_file) {
-               mapping = vma->vm_file->f_mapping;
-               sequence = atomic_read(&mapping->truncate_count);
-       }
-       smp_rmb();  /* Prevent CPU from reordering lock-free ->nopage() */
-retry:
-       new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
-
-       /* no page was available -- either SIGBUS or OOM */
-       if (new_page == NOPAGE_SIGBUS)
-               return VM_FAULT_SIGBUS;
-       if (new_page == NOPAGE_OOM)
-               return VM_FAULT_OOM;
-
-       /*
-        * Should we do an early C-O-W break?
-        */
-       if (write_access && !(vma->vm_flags & VM_SHARED)) {
-               struct page *page;
-
-               if (unlikely(anon_vma_prepare(vma)))
-                       goto oom;
-               page = alloc_page_vma(GFP_HIGHUSER, vma, address);
-               if (!page)
-                       goto oom;
-               copy_user_highpage(page, new_page, address);
-               page_cache_release(new_page);
-               new_page = page;
-               anon = 1;
-       }
-
-       spin_lock(&mm->page_table_lock);
-       /*
-        * For a file-backed vma, someone could have truncated or otherwise
-        * invalidated this page.  If unmap_mapping_range got called,
-        * retry getting the page.
-        */
-       if (mapping &&
-             (unlikely(sequence != atomic_read(&mapping->truncate_count)))) {
-               sequence = atomic_read(&mapping->truncate_count);
-               spin_unlock(&mm->page_table_lock);
-               page_cache_release(new_page);
-               goto retry;
-       }
-       page_table = pte_offset_map(pmd, address);
-
-       /*
-        * This silly early PAGE_DIRTY setting removes a race
-        * due to the bad i386 page protection. But it's valid
-        * for other architectures too.
-        *
-        * Note that if write_access is true, we either now have
-        * an exclusive copy of the page, or this is a shared mapping,
-        * so we can make it writable and dirty to avoid having to
-        * handle that later.
-        */
-       /* Only go through if we didn't race with anybody else... */
-       if (pte_none(*page_table)) {
-               if (!PageReserved(new_page))
-                       ++mm->rss;
-               flush_icache_page(vma, new_page);
-               entry = mk_pte(new_page, vma->vm_page_prot);
-               if (write_access)
-                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               ptep_establish_new(vma, address, page_table, entry);
-               if (anon) {
-                       lru_cache_add_active(new_page);
-                       page_add_anon_rmap(new_page, vma, address);
-               } else
-                       page_add_file_rmap(new_page);
-               pte_unmap(page_table);
-       } else {
-               /* One of our sibling threads was faster, back out. */
-               pte_unmap(page_table);
-               page_cache_release(new_page);
-               spin_unlock(&mm->page_table_lock);
-               goto out;
-       }
-
-       /* no need to invalidate: a not-present page shouldn't be cached */
-       update_mmu_cache(vma, address, entry);
-       spin_unlock(&mm->page_table_lock);
-out:
-       return ret;
-oom:
-       page_cache_release(new_page);
-       ret = VM_FAULT_OOM;
-       goto out;
-}
-
-/*
- * Fault of a previously existing named mapping. Repopulate the pte
- * from the encoded file_pte if possible. This enables swappable
- * nonlinear vmas.
- */
-static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
-       unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
-{
-       unsigned long pgoff;
-       int err;
-
-       BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
-       /*
-        * Fall back to the linear mapping if the fs does not support
-        * ->populate:
-        */
-       if (!vma->vm_ops || !vma->vm_ops->populate || 
-                       (write_access && !(vma->vm_flags & VM_SHARED))) {
-               pte_clear(pte);
-               return do_no_page(mm, vma, address, write_access, pte, pmd);
-       }
-
-       pgoff = pte_to_pgoff(*pte);
-
-       pte_unmap(pte);
-       spin_unlock(&mm->page_table_lock);
-
-       err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
-       if (err == -ENOMEM)
-               return VM_FAULT_OOM;
-       if (err)
-               return VM_FAULT_SIGBUS;
-       return VM_FAULT_MAJOR;
-}
-
-/*
- * These routines also need to handle stuff like marking pages dirty
- * and/or accessed for architectures that don't do it in hardware (most
- * RISC architectures).  The early dirtying is also good on the i386.
- *
- * There is also a hook called "update_mmu_cache()" that architectures
- * with external mmu caches can use to update those (ie the Sparc or
- * PowerPC hashed page tables that act as extended TLBs).
- *
- * Note the "page_table_lock". It is to protect against kswapd removing
- * pages from under us. Note that kswapd only ever _removes_ pages, never
- * adds them. As such, once we have noticed that the page is not present,
- * we can drop the lock early.
- *
- * The adding of pages is protected by the MM semaphore (which we hold),
- * so we don't need to worry about a page being suddenly been added into
- * our VM.
- *
- * We enter with the pagetable spinlock held, we are supposed to
- * release it when done.
- */
-static inline int handle_pte_fault(struct mm_struct *mm,
-       struct vm_area_struct * vma, unsigned long address,
-       int write_access, pte_t *pte, pmd_t *pmd)
-{
-       pte_t entry;
-
-       entry = *pte;
-       if (!pte_present(entry)) {
-               /*
-                * If it truly wasn't present, we know that kswapd
-                * and the PTE updates will not touch it later. So
-                * drop the lock.
-                */
-               if (pte_none(entry))
-                       return do_no_page(mm, vma, address, write_access, pte, pmd);
-               if (pte_file(entry))
-                       return do_file_page(mm, vma, address, write_access, pte, pmd);
-               return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
-       }
-
-       if (write_access) {
-               if (!pte_write(entry))
-                       return do_wp_page(mm, vma, address, pte, pmd, entry);
-
-               entry = pte_mkdirty(entry);
-       }
-       entry = pte_mkyoung(entry);
-       ptep_set_access_flags(vma, address, pte, entry, write_access);
-       update_mmu_cache(vma, address, entry);
-       pte_unmap(pte);
-       spin_unlock(&mm->page_table_lock);
-       return VM_FAULT_MINOR;
-}
-
-/*
- * By the time we get here, we already hold the mm semaphore
- */
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
-       unsigned long address, int write_access)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-
-       __set_current_state(TASK_RUNNING);
-       pgd = pgd_offset(mm, address);
-
-       inc_page_state(pgfault);
-
-       if (is_vm_hugetlb_page(vma))
-               return VM_FAULT_SIGBUS; /* mapping truncation does this. */
-
-       /*
-        * We need the page table lock to synchronize with kswapd
-        * and the SMP-safe atomic PTE updates.
-        */
-       spin_lock(&mm->page_table_lock);
-       pmd = pmd_alloc(mm, pgd, address);
-
-       if (pmd) {
-               pte_t * pte = pte_alloc_map(mm, pmd, address);
-               if (pte)
-                       return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
-       }
-       spin_unlock(&mm->page_table_lock);
-       return VM_FAULT_OOM;
-}
-
-/*
- * Allocate page middle directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
- *
- * On a two-level page table, this ends up actually being entirely
- * optimized away.
- */
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-{
-       pmd_t *new;
-
-       spin_unlock(&mm->page_table_lock);
-       new = pmd_alloc_one(mm, address);
-       spin_lock(&mm->page_table_lock);
-       if (!new)
-               return NULL;
-
-       /*
-        * Because we dropped the lock, we should re-check the
-        * entry, as somebody else could have populated it..
-        */
-       if (pgd_present(*pgd)) {
-               pmd_free(new);
-               goto out;
-       }
-       pgd_populate(mm, pgd, new);
-out:
-       return pmd_offset(pgd, address);
-}
-
-int make_pages_present(unsigned long addr, unsigned long end)
-{
-       int ret, len, write;
-       struct vm_area_struct * vma;
-
-       vma = find_vma(current->mm, addr);
-       write = (vma->vm_flags & VM_WRITE) != 0;
-       if (addr >= end)
-               BUG();
-       if (end > vma->vm_end)
-               BUG();
-       len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
-       ret = get_user_pages(current, current->mm, addr,
-                       len, write, 0, NULL, NULL);
-       if (ret < 0)
-               return ret;
-       return ret == len ? 0 : -1;
-}
-
-/* 
- * Map a vmalloc()-space virtual address to the physical page.
- */
-struct page * vmalloc_to_page(void * vmalloc_addr)
-{
-       unsigned long addr = (unsigned long) vmalloc_addr;
-       struct page *page = NULL;
-       pgd_t *pgd = pgd_offset_k(addr);
-       pmd_t *pmd;
-       pte_t *ptep, pte;
-  
-       if (!pgd_none(*pgd)) {
-               pmd = pmd_offset(pgd, addr);
-               if (!pmd_none(*pmd)) {
-                       preempt_disable();
-                       ptep = pte_offset_map(pmd, addr);
-                       pte = *ptep;
-                       if (pte_present(pte))
-                               page = pte_page(pte);
-                       pte_unmap(ptep);
-                       preempt_enable();
-               }
-       }
-       return page;
-}
-
-EXPORT_SYMBOL(vmalloc_to_page);
-
-#if !defined(CONFIG_ARCH_GATE_AREA)
-
-#if defined(AT_SYSINFO_EHDR)
-struct vm_area_struct gate_vma;
-
-static int __init gate_vma_init(void)
-{
-       gate_vma.vm_mm = NULL;
-       gate_vma.vm_start = FIXADDR_USER_START;
-       gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_page_prot = PAGE_READONLY;
-       gate_vma.vm_flags = 0;
-       return 0;
-}
-__initcall(gate_vma_init);
-#endif
-
-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-{
-#ifdef AT_SYSINFO_EHDR
-       return &gate_vma;
-#else
-       return NULL;
-#endif
-}
-
-int in_gate_area(struct task_struct *task, unsigned long addr)
-{
-#ifdef AT_SYSINFO_EHDR
-       if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
-               return 1;
-#endif
-       return 0;
-}
-
-#endif
diff --git a/linux-2.6.8.1-xen-sparse/mm/page_alloc.c b/linux-2.6.8.1-xen-sparse/mm/page_alloc.c
deleted file mode 100644 (file)
index 166246d..0000000
+++ /dev/null
@@ -1,2046 +0,0 @@
-/*
- *  linux/mm/page_alloc.c
- *
- *  Manages the free list, the system allocates free pages here.
- *  Note that kmalloc() lives in slab.c
- *
- *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
- *  Swap reorganised 29.12.95, Stephen Tweedie
- *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
- *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
- *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
- *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
- */
-
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/suspend.h>
-#include <linux/pagevec.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-#include <linux/notifier.h>
-#include <linux/topology.h>
-#include <linux/sysctl.h>
-#include <linux/cpu.h>
-
-#include <asm/tlbflush.h>
-
-DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
-struct pglist_data *pgdat_list;
-unsigned long totalram_pages;
-unsigned long totalhigh_pages;
-long nr_swap_pages;
-int numnodes = 1;
-int sysctl_lower_zone_protection = 0;
-
-EXPORT_SYMBOL(totalram_pages);
-EXPORT_SYMBOL(nr_swap_pages);
-
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
-int min_free_kbytes = 1024;
-
-static unsigned long __initdata nr_kernel_pages;
-static unsigned long __initdata nr_all_pages;
-
-/*
- * Temporary debugging check for pages not lying within a given zone.
- */
-static int bad_range(struct zone *zone, struct page *page)
-{
-       if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
-               return 1;
-       if (page_to_pfn(page) < zone->zone_start_pfn)
-               return 1;
-       if (zone != page_zone(page))
-               return 1;
-       return 0;
-}
-
-static void bad_page(const char *function, struct page *page)
-{
-       printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
-               function, current->comm, page);
-       printk(KERN_EMERG "flags:0x%08lx mapping:%p mapcount:%d count:%d\n",
-               (unsigned long)page->flags, page->mapping,
-               (int)page->mapcount, page_count(page));
-       printk(KERN_EMERG "Backtrace:\n");
-       dump_stack();
-       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
-       page->flags &= ~(1 << PG_private        |
-                       1 << PG_locked  |
-                       1 << PG_lru     |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_maplock |
-                       1 << PG_anon    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback);
-       set_page_count(page, 0);
-       page->mapping = NULL;
-       page->mapcount = 0;
-}
-
-#ifndef CONFIG_HUGETLB_PAGE
-#define prep_compound_page(page, order) do { } while (0)
-#define destroy_compound_page(page, order) do { } while (0)
-#else
-/*
- * Higher-order pages are called "compound pages".  They are structured thusly:
- *
- * The first PAGE_SIZE page is called the "head page".
- *
- * The remaining PAGE_SIZE pages are called "tail pages".
- *
- * All pages have PG_compound set.  All pages have their ->private pointing at
- * the head page (even the head page has this).
- *
- * The first tail page's ->mapping, if non-zero, holds the address of the
- * compound page's put_page() function.
- *
- * The order of the allocation is stored in the first tail page's ->index
- * This is only for debug at present.  This usage means that zero-order pages
- * may not be compound.
- */
-static void prep_compound_page(struct page *page, unsigned long order)
-{
-       int i;
-       int nr_pages = 1 << order;
-
-       page[1].mapping = NULL;
-       page[1].index = order;
-       for (i = 0; i < nr_pages; i++) {
-               struct page *p = page + i;
-
-               SetPageCompound(p);
-               p->private = (unsigned long)page;
-       }
-}
-
-static void destroy_compound_page(struct page *page, unsigned long order)
-{
-       int i;
-       int nr_pages = 1 << order;
-
-       if (!PageCompound(page))
-               return;
-
-       if (page[1].index != order)
-               bad_page(__FUNCTION__, page);
-
-       for (i = 0; i < nr_pages; i++) {
-               struct page *p = page + i;
-
-               if (!PageCompound(p))
-                       bad_page(__FUNCTION__, page);
-               if (p->private != (unsigned long)page)
-                       bad_page(__FUNCTION__, page);
-               ClearPageCompound(p);
-       }
-}
-#endif         /* CONFIG_HUGETLB_PAGE */
-
-/*
- * Freeing function for a buddy system allocator.
- *
- * The concept of a buddy system is to maintain direct-mapped table
- * (containing bit values) for memory blocks of various "orders".
- * The bottom level table contains the map for the smallest allocatable
- * units of memory (here, pages), and each level above it describes
- * pairs of units from the levels below, hence, "buddies".
- * At a high level, all that happens here is marking the table entry
- * at the bottom level available, and propagating the changes upward
- * as necessary, plus some accounting needed to play nicely with other
- * parts of the VM system.
- * At each level, we keep one bit for each pair of blocks, which
- * is set to 1 iff only one of the pair is allocated.  So when we
- * are allocating or freeing one, we can derive the state of the
- * other.  That is, if we allocate a small block, and both were   
- * free, the remainder of the region must be split into blocks.   
- * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.            
- *
- * -- wli
- */
-
-static inline void __free_pages_bulk (struct page *page, struct page *base,
-               struct zone *zone, struct free_area *area, unsigned int order)
-{
-       unsigned long page_idx, index, mask;
-
-       if (order)
-               destroy_compound_page(page, order);
-       mask = (~0UL) << order;
-       page_idx = page - base;
-       if (page_idx & ~mask)
-               BUG();
-       index = page_idx >> (1 + order);
-
-       zone->free_pages += 1 << order;
-       while (order < MAX_ORDER-1) {
-               struct page *buddy1, *buddy2;
-
-               BUG_ON(area >= zone->free_area + MAX_ORDER);
-               if (!__test_and_change_bit(index, area->map))
-                       /*
-                        * the buddy page is still allocated.
-                        */
-                       break;
-
-               /* Move the buddy up one level. */
-               buddy1 = base + (page_idx ^ (1 << order));
-               buddy2 = base + page_idx;
-               BUG_ON(bad_range(zone, buddy1));
-               BUG_ON(bad_range(zone, buddy2));
-               list_del(&buddy1->lru);
-               mask <<= 1;
-               order++;
-               area++;
-               index >>= 1;
-               page_idx &= mask;
-       }
-       list_add(&(base + page_idx)->lru, &area->free_list);
-}
-
-static inline void free_pages_check(const char *function, struct page *page)
-{
-       if (    page_mapped(page) ||
-               page->mapping != NULL ||
-               page_count(page) != 0 ||
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_reclaim |
-                       1 << PG_slab    |
-                       1 << PG_maplock |
-                       1 << PG_anon    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback )))
-               bad_page(function, page);
-       if (PageDirty(page))
-               ClearPageDirty(page);
-}
-
-/*
- * Frees a list of pages. 
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free, or 0 for all on the list.
- *
- * If the zone was previously in an "all pages pinned" state then look to
- * see if this freeing clears that state.
- *
- * And clear the zone's pages_scanned counter, to hold off the "all pages are
- * pinned" detection logic.
- */
-static int
-free_pages_bulk(struct zone *zone, int count,
-               struct list_head *list, unsigned int order)
-{
-       unsigned long flags;
-       struct free_area *area;
-       struct page *base, *page = NULL;
-       int ret = 0;
-
-       base = zone->zone_mem_map;
-       area = zone->free_area + order;
-       spin_lock_irqsave(&zone->lock, flags);
-       zone->all_unreclaimable = 0;
-       zone->pages_scanned = 0;
-       while (!list_empty(list) && count--) {
-               page = list_entry(list->prev, struct page, lru);
-               /* have to delete it as __free_pages_bulk list manipulates */
-               list_del(&page->lru);
-               __free_pages_bulk(page, base, zone, area, order);
-               ret++;
-       }
-       spin_unlock_irqrestore(&zone->lock, flags);
-       return ret;
-}
-
-void __free_pages_ok(struct page *page, unsigned int order)
-{
-       LIST_HEAD(list);
-       int i;
-
-       mod_page_state(pgfree, 1 << order);
-       for (i = 0 ; i < (1 << order) ; ++i)
-               free_pages_check(__FUNCTION__, page + i);
-       list_add(&page->lru, &list);
-       kernel_map_pages(page, 1<<order, 0);
-       free_pages_bulk(page_zone(page), 1, &list, order);
-}
-
-#define MARK_USED(index, order, area) \
-       __change_bit((index) >> (1+(order)), (area)->map)
-
-/*
- * The order of subdivision here is critical for the IO subsystem.
- * Please do not alter this order without good reasons and regression
- * testing. Specifically, as large blocks of memory are subdivided,
- * the order in which smaller blocks are delivered depends on the order
- * they're subdivided in this function. This is the primary factor
- * influencing the order in which pages are delivered to the IO
- * subsystem according to empirical testing, and this is also justified
- * by considering the behavior of a buddy system containing a single
- * large block of memory acted on by a series of small allocations.
- * This behavior is a critical factor in sglist merging's success.
- *
- * -- wli
- */
-static inline struct page *
-expand(struct zone *zone, struct page *page,
-        unsigned long index, int low, int high, struct free_area *area)
-{
-       unsigned long size = 1 << high;
-
-       while (high > low) {
-               area--;
-               high--;
-               size >>= 1;
-               BUG_ON(bad_range(zone, &page[size]));
-               list_add(&page[size].lru, &area->free_list);
-               MARK_USED(index + size, high, area);
-       }
-       return page;
-}
-
-static inline void set_page_refs(struct page *page, int order)
-{
-#ifdef CONFIG_MMU
-       set_page_count(page, 1);
-#else
-       int i;
-
-       /*
-        * We need to reference all the pages for this order, otherwise if
-        * anyone accesses one of the pages with (get/put) it will be freed.
-        */
-       for (i = 0; i < (1 << order); i++)
-               set_page_count(page+i, 1);
-#endif /* CONFIG_MMU */
-}
-
-/*
- * This page is about to be returned from the page allocator
- */
-static void prep_new_page(struct page *page, int order)
-{
-       if (page->mapping || page_mapped(page) ||
-           (page->flags & (
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_lru     |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_reclaim |
-                       1 << PG_maplock |
-                       1 << PG_anon    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback )))
-               bad_page(__FUNCTION__, page);
-
-       page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
-                       1 << PG_referenced | 1 << PG_arch_1 |
-                       1 << PG_checked | 1 << PG_mappedtodisk);
-       page->private = 0;
-       set_page_refs(page, order);
-}
-
-/* 
- * Do the hard work of removing an element from the buddy allocator.
- * Call me with the zone->lock already held.
- */
-static struct page *__rmqueue(struct zone *zone, unsigned int order)
-{
-       struct free_area * area;
-       unsigned int current_order;
-       struct page *page;
-       unsigned int index;
-
-       for (current_order = order; current_order < MAX_ORDER; ++current_order) {
-               area = zone->free_area + current_order;
-               if (list_empty(&area->free_list))
-                       continue;
-
-               page = list_entry(area->free_list.next, struct page, lru);
-               list_del(&page->lru);
-               index = page - zone->zone_mem_map;
-               if (current_order != MAX_ORDER-1)
-                       MARK_USED(index, current_order, area);
-               zone->free_pages -= 1UL << order;
-               return expand(zone, page, index, order, current_order, area);
-       }
-
-       return NULL;
-}
-
-/* 
- * Obtain a specified number of elements from the buddy allocator, all under
- * a single hold of the lock, for efficiency.  Add them to the supplied list.
- * Returns the number of new pages which were placed at *list.
- */
-static int rmqueue_bulk(struct zone *zone, unsigned int order, 
-                       unsigned long count, struct list_head *list)
-{
-       unsigned long flags;
-       int i;
-       int allocated = 0;
-       struct page *page;
-       
-       spin_lock_irqsave(&zone->lock, flags);
-       for (i = 0; i < count; ++i) {
-               page = __rmqueue(zone, order);
-               if (page == NULL)
-                       break;
-               allocated++;
-               list_add_tail(&page->lru, list);
-       }
-       spin_unlock_irqrestore(&zone->lock, flags);
-       return allocated;
-}
-
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
-static void __drain_pages(unsigned int cpu)
-{
-       struct zone *zone;
-       int i;
-
-       for_each_zone(zone) {
-               struct per_cpu_pageset *pset;
-
-               pset = &zone->pageset[cpu];
-               for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
-                       struct per_cpu_pages *pcp;
-
-                       pcp = &pset->pcp[i];
-                       pcp->count -= free_pages_bulk(zone, pcp->count,
-                                               &pcp->list, 0);
-               }
-       }
-}
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
-
-#ifdef CONFIG_PM
-int is_head_of_free_region(struct page *page)
-{
-        struct zone *zone = page_zone(page);
-        unsigned long flags;
-       int order;
-       struct list_head *curr;
-
-       /*
-        * Should not matter as we need quiescent system for
-        * suspend anyway, but...
-        */
-       spin_lock_irqsave(&zone->lock, flags);
-       for (order = MAX_ORDER - 1; order >= 0; --order)
-               list_for_each(curr, &zone->free_area[order].free_list)
-                       if (page == list_entry(curr, struct page, lru)) {
-                               spin_unlock_irqrestore(&zone->lock, flags);
-                               return 1 << order;
-                       }
-       spin_unlock_irqrestore(&zone->lock, flags);
-        return 0;
-}
-
-/*
- * Spill all of this CPU's per-cpu pages back into the buddy allocator.
- */
-void drain_local_pages(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);  
-       __drain_pages(smp_processor_id());
-       local_irq_restore(flags);       
-}
-#endif /* CONFIG_PM */
-
-static void zone_statistics(struct zonelist *zonelist, struct zone *z)
-{
-#ifdef CONFIG_NUMA
-       unsigned long flags;
-       int cpu;
-       pg_data_t *pg = z->zone_pgdat;
-       pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
-       struct per_cpu_pageset *p;
-
-       local_irq_save(flags);
-       cpu = smp_processor_id();
-       p = &z->pageset[cpu];
-       if (pg == orig) {
-               z->pageset[cpu].numa_hit++;
-       } else {
-               p->numa_miss++;
-               zonelist->zones[0]->pageset[cpu].numa_foreign++;
-       }
-       if (pg == NODE_DATA(numa_node_id()))
-               p->local_node++;
-       else
-               p->other_node++;
-       local_irq_restore(flags);
-#endif
-}
-
-/*
- * Free a 0-order page
- */
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
-static void fastcall free_hot_cold_page(struct page *page, int cold)
-{
-       struct zone *zone = page_zone(page);
-       struct per_cpu_pages *pcp;
-       unsigned long flags;
-
-       if (PageForeign(page))
-               return (PageForeignDestructor(page))(page);
-
-       kernel_map_pages(page, 1, 0);
-       inc_page_state(pgfree);
-       free_pages_check(__FUNCTION__, page);
-       pcp = &zone->pageset[get_cpu()].pcp[cold];
-       local_irq_save(flags);
-       if (pcp->count >= pcp->high)
-               pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
-       list_add(&page->lru, &pcp->list);
-       pcp->count++;
-       local_irq_restore(flags);
-       put_cpu();
-}
-
-void fastcall free_hot_page(struct page *page)
-{
-       free_hot_cold_page(page, 0);
-}
-       
-void fastcall free_cold_page(struct page *page)
-{
-       free_hot_cold_page(page, 1);
-}
-
-/*
- * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
- * we cheat by calling it from here, in the order > 0 path.  Saves a branch
- * or two.
- */
-
-static struct page *
-buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
-{
-       unsigned long flags;
-       struct page *page = NULL;
-       int cold = !!(gfp_flags & __GFP_COLD);
-
-       if (order == 0) {
-               struct per_cpu_pages *pcp;
-
-               pcp = &zone->pageset[get_cpu()].pcp[cold];
-               local_irq_save(flags);
-               if (pcp->count <= pcp->low)
-                       pcp->count += rmqueue_bulk(zone, 0,
-                                               pcp->batch, &pcp->list);
-               if (pcp->count) {
-                       page = list_entry(pcp->list.next, struct page, lru);
-                       list_del(&page->lru);
-                       pcp->count--;
-               }
-               local_irq_restore(flags);
-               put_cpu();
-       }
-
-       if (page == NULL) {
-               spin_lock_irqsave(&zone->lock, flags);
-               page = __rmqueue(zone, order);
-               spin_unlock_irqrestore(&zone->lock, flags);
-       }
-
-       if (page != NULL) {
-               BUG_ON(bad_range(zone, page));
-               mod_page_state_zone(zone, pgalloc, 1 << order);
-               prep_new_page(page, order);
-               if (order && (gfp_flags & __GFP_COMP))
-                       prep_compound_page(page, order);
-       }
-       return page;
-}
-
-/*
- * This is the 'heart' of the zoned buddy allocator.
- *
- * Herein lies the mysterious "incremental min".  That's the
- *
- *     local_low = z->pages_low;
- *     min += local_low;
- *
- * thing.  The intent here is to provide additional protection to low zones for
- * allocation requests which _could_ use higher zones.  So a GFP_HIGHMEM
- * request is not allowed to dip as deeply into the normal zone as a GFP_KERNEL
- * request.  This preserves additional space in those lower zones for requests
- * which really do need memory from those zones.  It means that on a decent
- * sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA
- * zone untouched.
- */
-struct page * fastcall
-__alloc_pages(unsigned int gfp_mask, unsigned int order,
-               struct zonelist *zonelist)
-{
-       const int wait = gfp_mask & __GFP_WAIT;
-       unsigned long min;
-       struct zone **zones;
-       struct page *page;
-       struct reclaim_state reclaim_state;
-       struct task_struct *p = current;
-       int i;
-       int alloc_type;
-       int do_retry;
-
-       might_sleep_if(wait);
-
-       zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
-       if (zones[0] == NULL)     /* no zones in the zonelist */
-               return NULL;
-
-       alloc_type = zone_idx(zones[0]);
-
-       /* Go through the zonelist once, looking for a zone with enough free */
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *z = zones[i];
-
-               min = (1<<order) + z->protection[alloc_type];
-
-               /*
-                * We let real-time tasks dip their real-time paws a little
-                * deeper into reserves.
-                */
-               if (rt_task(p))
-                       min -= z->pages_low >> 1;
-
-               if (z->free_pages >= min ||
-                               (!wait && z->free_pages >= z->pages_high)) {
-                       page = buffered_rmqueue(z, order, gfp_mask);
-                       if (page) {
-                               zone_statistics(zonelist, z);
-                               goto got_pg;
-                       }
-               }
-       }
-
-       /* we're somewhat low on memory, failed to find what we needed */
-       for (i = 0; zones[i] != NULL; i++)
-               wakeup_kswapd(zones[i]);
-
-       /* Go through the zonelist again, taking __GFP_HIGH into account */
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *z = zones[i];
-
-               min = (1<<order) + z->protection[alloc_type];
-
-               if (gfp_mask & __GFP_HIGH)
-                       min -= z->pages_low >> 2;
-               if (rt_task(p))
-                       min -= z->pages_low >> 1;
-
-               if (z->free_pages >= min ||
-                               (!wait && z->free_pages >= z->pages_high)) {
-                       page = buffered_rmqueue(z, order, gfp_mask);
-                       if (page) {
-                               zone_statistics(zonelist, z);
-                               goto got_pg;
-                       }
-               }
-       }
-
-       /* here we're in the low on memory slow path */
-
-rebalance:
-       if ((p->flags & (PF_MEMALLOC | PF_MEMDIE)) && !in_interrupt()) {
-               /* go through the zonelist yet again, ignoring mins */
-               for (i = 0; zones[i] != NULL; i++) {
-                       struct zone *z = zones[i];
-
-                       page = buffered_rmqueue(z, order, gfp_mask);
-                       if (page) {
-                               zone_statistics(zonelist, z);
-                               goto got_pg;
-                       }
-               }
-               goto nopage;
-       }
-
-       /* Atomic allocations - we can't balance anything */
-       if (!wait)
-               goto nopage;
-
-       p->flags |= PF_MEMALLOC;
-       reclaim_state.reclaimed_slab = 0;
-       p->reclaim_state = &reclaim_state;
-
-       try_to_free_pages(zones, gfp_mask, order);
-
-       p->reclaim_state = NULL;
-       p->flags &= ~PF_MEMALLOC;
-
-       /* go through the zonelist yet one more time */
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *z = zones[i];
-
-               min = (1UL << order) + z->protection[alloc_type];
-
-               if (z->free_pages >= min ||
-                               (!wait && z->free_pages >= z->pages_high)) {
-                       page = buffered_rmqueue(z, order, gfp_mask);
-                       if (page) {
-                               zone_statistics(zonelist, z);
-                               goto got_pg;
-                       }
-               }
-       }
-
-       /*
-        * Don't let big-order allocations loop unless the caller explicitly
-        * requests that.  Wait for some write requests to complete then retry.
-        *
-        * In this implementation, __GFP_REPEAT means __GFP_NOFAIL, but that
-        * may not be true in other implementations.
-        */
-       do_retry = 0;
-       if (!(gfp_mask & __GFP_NORETRY)) {
-               if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
-                       do_retry = 1;
-               if (gfp_mask & __GFP_NOFAIL)
-                       do_retry = 1;
-       }
-       if (do_retry) {
-               blk_congestion_wait(WRITE, HZ/50);
-               goto rebalance;
-       }
-
-nopage:
-       if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
-               printk(KERN_WARNING "%s: page allocation failure."
-                       " order:%d, mode:0x%x\n",
-                       p->comm, order, gfp_mask);
-               dump_stack();
-       }
-       return NULL;
-got_pg:
-       kernel_map_pages(page, 1 << order, 1);
-       return page;
-}
-
-EXPORT_SYMBOL(__alloc_pages);
-
-/*
- * Common helper functions.
- */
-fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
-{
-       struct page * page;
-       page = alloc_pages(gfp_mask, order);
-       if (!page)
-               return 0;
-       return (unsigned long) page_address(page);
-}
-
-EXPORT_SYMBOL(__get_free_pages);
-
-fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
-{
-       struct page * page;
-
-       /*
-        * get_zeroed_page() returns a 32-bit address, which cannot represent
-        * a highmem page
-        */
-       BUG_ON(gfp_mask & __GFP_HIGHMEM);
-
-       page = alloc_pages(gfp_mask, 0);
-       if (page) {
-               void *address = page_address(page);
-               clear_page(address);
-               return (unsigned long) address;
-       }
-       return 0;
-}
-
-EXPORT_SYMBOL(get_zeroed_page);
-
-void __pagevec_free(struct pagevec *pvec)
-{
-       int i = pagevec_count(pvec);
-
-       while (--i >= 0)
-               free_hot_cold_page(pvec->pages[i], pvec->cold);
-}
-
-fastcall void __free_pages(struct page *page, unsigned int order)
-{
-       if (!PageReserved(page) && put_page_testzero(page)) {
-               if (order == 0)
-                       free_hot_page(page);
-               else
-                       __free_pages_ok(page, order);
-       }
-}
-
-EXPORT_SYMBOL(__free_pages);
-
-fastcall void free_pages(unsigned long addr, unsigned int order)
-{
-       if (addr != 0) {
-               BUG_ON(!virt_addr_valid(addr));
-               __free_pages(virt_to_page(addr), order);
-       }
-}
-
-EXPORT_SYMBOL(free_pages);
-
-/*
- * Total amount of free (allocatable) RAM:
- */
-unsigned int nr_free_pages(void)
-{
-       unsigned int sum = 0;
-       struct zone *zone;
-
-       for_each_zone(zone)
-               sum += zone->free_pages;
-
-       return sum;
-}
-
-EXPORT_SYMBOL(nr_free_pages);
-
-#ifdef CONFIG_NUMA
-unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
-{
-       unsigned int i, sum = 0;
-
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               sum += pgdat->node_zones[i].free_pages;
-
-       return sum;
-}
-#endif
-
-static unsigned int nr_free_zone_pages(int offset)
-{
-       pg_data_t *pgdat;
-       unsigned int sum = 0;
-
-       for_each_pgdat(pgdat) {
-               struct zonelist *zonelist = pgdat->node_zonelists + offset;
-               struct zone **zonep = zonelist->zones;
-               struct zone *zone;
-
-               for (zone = *zonep++; zone; zone = *zonep++) {
-                       unsigned long size = zone->present_pages;
-                       unsigned long high = zone->pages_high;
-                       if (size > high)
-                               sum += size - high;
-               }
-       }
-
-       return sum;
-}
-
-/*
- * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
- */
-unsigned int nr_free_buffer_pages(void)
-{
-       return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
-}
-
-/*
- * Amount of free RAM allocatable within all zones
- */
-unsigned int nr_free_pagecache_pages(void)
-{
-       return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
-}
-
-#ifdef CONFIG_HIGHMEM
-unsigned int nr_free_highpages (void)
-{
-       pg_data_t *pgdat;
-       unsigned int pages = 0;
-
-       for_each_pgdat(pgdat)
-               pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
-
-       return pages;
-}
-#endif
-
-#ifdef CONFIG_NUMA
-static void show_node(struct zone *zone)
-{
-       printk("Node %d ", zone->zone_pgdat->node_id);
-}
-#else
-#define show_node(zone)        do { } while (0)
-#endif
-
-/*
- * Accumulate the page_state information across all CPUs.
- * The result is unavoidably approximate - it can change
- * during and after execution of this function.
- */
-DEFINE_PER_CPU(struct page_state, page_states) = {0};
-EXPORT_PER_CPU_SYMBOL(page_states);
-
-atomic_t nr_pagecache = ATOMIC_INIT(0);
-EXPORT_SYMBOL(nr_pagecache);
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
-#endif
-
-void __get_page_state(struct page_state *ret, int nr)
-{
-       int cpu = 0;
-
-       memset(ret, 0, sizeof(*ret));
-       while (cpu < NR_CPUS) {
-               unsigned long *in, *out, off;
-
-               if (!cpu_possible(cpu)) {
-                       cpu++;
-                       continue;
-               }
-
-               in = (unsigned long *)&per_cpu(page_states, cpu);
-               cpu++;
-               if (cpu < NR_CPUS && cpu_possible(cpu))
-                       prefetch(&per_cpu(page_states, cpu));
-               out = (unsigned long *)ret;
-               for (off = 0; off < nr; off++)
-                       *out++ += *in++;
-       }
-}
-
-void get_page_state(struct page_state *ret)
-{
-       int nr;
-
-       nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
-       nr /= sizeof(unsigned long);
-
-       __get_page_state(ret, nr + 1);
-}
-
-void get_full_page_state(struct page_state *ret)
-{
-       __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
-}
-
-unsigned long __read_page_state(unsigned offset)
-{
-       unsigned long ret = 0;
-       int cpu;
-
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               unsigned long in;
-
-               if (!cpu_possible(cpu))
-                       continue;
-
-               in = (unsigned long)&per_cpu(page_states, cpu) + offset;
-               ret += *((unsigned long *)in);
-       }
-       return ret;
-}
-
-void get_zone_counts(unsigned long *active,
-               unsigned long *inactive, unsigned long *free)
-{
-       struct zone *zone;
-
-       *active = 0;
-       *inactive = 0;
-       *free = 0;
-       for_each_zone(zone) {
-               *active += zone->nr_active;
-               *inactive += zone->nr_inactive;
-               *free += zone->free_pages;
-       }
-}
-
-void si_meminfo(struct sysinfo *val)
-{
-       val->totalram = totalram_pages;
-       val->sharedram = 0;
-       val->freeram = nr_free_pages();
-       val->bufferram = nr_blockdev_pages();
-#ifdef CONFIG_HIGHMEM
-       val->totalhigh = totalhigh_pages;
-       val->freehigh = nr_free_highpages();
-#else
-       val->totalhigh = 0;
-       val->freehigh = 0;
-#endif
-       val->mem_unit = PAGE_SIZE;
-}
-
-EXPORT_SYMBOL(si_meminfo);
-
-#ifdef CONFIG_NUMA
-void si_meminfo_node(struct sysinfo *val, int nid)
-{
-       pg_data_t *pgdat = NODE_DATA(nid);
-
-       val->totalram = pgdat->node_present_pages;
-       val->freeram = nr_free_pages_pgdat(pgdat);
-       val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
-       val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
-       val->mem_unit = PAGE_SIZE;
-}
-#endif
-
-#define K(x) ((x) << (PAGE_SHIFT-10))
-
-/*
- * Show free area list (used inside shift_scroll-lock stuff)
- * We also calculate the percentage fragmentation. We do this by counting the
- * memory on each free list with the exception of the first item on the list.
- */
-void show_free_areas(void)
-{
-       struct page_state ps;
-       int cpu, temperature;
-       unsigned long active;
-       unsigned long inactive;
-       unsigned long free;
-       struct zone *zone;
-
-       for_each_zone(zone) {
-               show_node(zone);
-               printk("%s per-cpu:", zone->name);
-
-               if (!zone->present_pages) {
-                       printk(" empty\n");
-                       continue;
-               } else
-                       printk("\n");
-
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-                       struct per_cpu_pageset *pageset;
-
-                       if (!cpu_possible(cpu))
-                               continue;
-
-                       pageset = zone->pageset + cpu;
-
-                       for (temperature = 0; temperature < 2; temperature++)
-                               printk("cpu %d %s: low %d, high %d, batch %d\n",
-                                       cpu,
-                                       temperature ? "cold" : "hot",
-                                       pageset->pcp[temperature].low,
-                                       pageset->pcp[temperature].high,
-                                       pageset->pcp[temperature].batch);
-               }
-       }
-
-       get_page_state(&ps);
-       get_zone_counts(&active, &inactive, &free);
-
-       printk("\nFree pages: %11ukB (%ukB HighMem)\n",
-               K(nr_free_pages()),
-               K(nr_free_highpages()));
-
-       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
-               "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
-               active,
-               inactive,
-               ps.nr_dirty,
-               ps.nr_writeback,
-               ps.nr_unstable,
-               nr_free_pages(),
-               ps.nr_slab,
-               ps.nr_mapped,
-               ps.nr_page_table_pages);
-
-       for_each_zone(zone) {
-               int i;
-
-               show_node(zone);
-               printk("%s"
-                       " free:%lukB"
-                       " min:%lukB"
-                       " low:%lukB"
-                       " high:%lukB"
-                       " active:%lukB"
-                       " inactive:%lukB"
-                       " present:%lukB"
-                       "\n",
-                       zone->name,
-                       K(zone->free_pages),
-                       K(zone->pages_min),
-                       K(zone->pages_low),
-                       K(zone->pages_high),
-                       K(zone->nr_active),
-                       K(zone->nr_inactive),
-                       K(zone->present_pages)
-                       );
-               printk("protections[]:");
-               for (i = 0; i < MAX_NR_ZONES; i++)
-                       printk(" %lu", zone->protection[i]);
-               printk("\n");
-       }
-
-       for_each_zone(zone) {
-               struct list_head *elem;
-               unsigned long nr, flags, order, total = 0;
-
-               show_node(zone);
-               printk("%s: ", zone->name);
-               if (!zone->present_pages) {
-                       printk("empty\n");
-                       continue;
-               }
-
-               spin_lock_irqsave(&zone->lock, flags);
-               for (order = 0; order < MAX_ORDER; order++) {
-                       nr = 0;
-                       list_for_each(elem, &zone->free_area[order].free_list)
-                               ++nr;
-                       total += nr << order;
-                       printk("%lu*%lukB ", nr, K(1UL) << order);
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
-               printk("= %lukB\n", K(total));
-       }
-
-       show_swap_cache_info();
-}
-
-/*
- * Builds allocation fallback zone lists.
- */
-static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
-{
-       switch (k) {
-               struct zone *zone;
-       default:
-               BUG();
-       case ZONE_HIGHMEM:
-               zone = pgdat->node_zones + ZONE_HIGHMEM;
-               if (zone->present_pages) {
-#ifndef CONFIG_HIGHMEM
-                       BUG();
-#endif
-                       zonelist->zones[j++] = zone;
-               }
-       case ZONE_NORMAL:
-               zone = pgdat->node_zones + ZONE_NORMAL;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       case ZONE_DMA:
-               zone = pgdat->node_zones + ZONE_DMA;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       }
-
-       return j;
-}
-
-#ifdef CONFIG_NUMA
-#define MAX_NODE_LOAD (numnodes)
-static int __initdata node_load[MAX_NUMNODES];
-/**
- * find_next_best_node - find the next node that should appear in a given
- *    node's fallback list
- * @node: node whose fallback list we're appending
- * @used_node_mask: pointer to the bitmap of already used nodes
- *
- * We use a number of factors to determine which is the next node that should
- * appear on a given node's fallback list.  The node should not have appeared
- * already in @node's fallback list, and it should be the next closest node
- * according to the distance array (which contains arbitrary distance values
- * from each node to each node in the system), and should also prefer nodes
- * with no CPUs, since presumably they'll have very little allocation pressure
- * on them otherwise.
- * It returns -1 if no node is found.
- */
-static int __init find_next_best_node(int node, void *used_node_mask)
-{
-       int i, n, val;
-       int min_val = INT_MAX;
-       int best_node = -1;
-
-       for (i = 0; i < numnodes; i++) {
-               cpumask_t tmp;
-
-               /* Start from local node */
-               n = (node+i)%numnodes;
-
-               /* Don't want a node to appear more than once */
-               if (test_bit(n, used_node_mask))
-                       continue;
-
-               /* Use the distance array to find the distance */
-               val = node_distance(node, n);
-
-               /* Give preference to headless and unused nodes */
-               tmp = node_to_cpumask(n);
-               if (!cpus_empty(tmp))
-                       val += PENALTY_FOR_NODE_WITH_CPUS;
-
-               /* Slight preference for less loaded node */
-               val *= (MAX_NODE_LOAD*MAX_NUMNODES);
-               val += node_load[n];
-
-               if (val < min_val) {
-                       min_val = val;
-                       best_node = n;
-               }
-       }
-
-       if (best_node >= 0)
-               set_bit(best_node, used_node_mask);
-
-       return best_node;
-}
-
-static void __init build_zonelists(pg_data_t *pgdat)
-{
-       int i, j, k, node, local_node;
-       int prev_node, load;
-       struct zonelist *zonelist;
-       DECLARE_BITMAP(used_mask, MAX_NUMNODES);
-
-       /* initialize zonelists */
-       for (i = 0; i < GFP_ZONETYPES; i++) {
-               zonelist = pgdat->node_zonelists + i;
-               memset(zonelist, 0, sizeof(*zonelist));
-               zonelist->zones[0] = NULL;
-       }
-
-       /* NUMA-aware ordering of nodes */
-       local_node = pgdat->node_id;
-       load = numnodes;
-       prev_node = local_node;
-       bitmap_zero(used_mask, MAX_NUMNODES);
-       while ((node = find_next_best_node(local_node, used_mask)) >= 0) {
-               /*
-                * We don't want to pressure a particular node.
-                * So adding penalty to the first node in same
-                * distance group to make it round-robin.
-                */
-               if (node_distance(local_node, node) !=
-                               node_distance(local_node, prev_node))
-                       node_load[node] += load;
-               prev_node = node;
-               load--;
-               for (i = 0; i < GFP_ZONETYPES; i++) {
-                       zonelist = pgdat->node_zonelists + i;
-                       for (j = 0; zonelist->zones[j] != NULL; j++);
-
-                       k = ZONE_NORMAL;
-                       if (i & __GFP_HIGHMEM)
-                               k = ZONE_HIGHMEM;
-                       if (i & __GFP_DMA)
-                               k = ZONE_DMA;
-
-                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
-                       zonelist->zones[j] = NULL;
-               }
-       }
-}
-
-#else  /* CONFIG_NUMA */
-
-static void __init build_zonelists(pg_data_t *pgdat)
-{
-       int i, j, k, node, local_node;
-
-       local_node = pgdat->node_id;
-       for (i = 0; i < GFP_ZONETYPES; i++) {
-               struct zonelist *zonelist;
-
-               zonelist = pgdat->node_zonelists + i;
-               memset(zonelist, 0, sizeof(*zonelist));
-
-               j = 0;
-               k = ZONE_NORMAL;
-               if (i & __GFP_HIGHMEM)
-                       k = ZONE_HIGHMEM;
-               if (i & __GFP_DMA)
-                       k = ZONE_DMA;
-
-               j = build_zonelists_node(pgdat, zonelist, j, k);
-               /*
-                * Now we build the zonelist so that it contains the zones
-                * of all the other nodes.
-                * We don't want to pressure a particular node, so when
-                * building the zones for node N, we make sure that the
-                * zones coming right after the local ones are those from
-                * node N+1 (modulo N)
-                */
-               for (node = local_node + 1; node < numnodes; node++)
-                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
-               for (node = 0; node < local_node; node++)
-                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
-               zonelist->zones[j] = NULL;
-       }
-}
-
-#endif /* CONFIG_NUMA */
-
-void __init build_all_zonelists(void)
-{
-       int i;
-
-       for(i = 0 ; i < numnodes ; i++)
-               build_zonelists(NODE_DATA(i));
-       printk("Built %i zonelists\n", numnodes);
-}
-
-/*
- * Helper functions to size the waitqueue hash table.
- * Essentially these want to choose hash table sizes sufficiently
- * large so that collisions trying to wait on pages are rare.
- * But in fact, the number of active page waitqueues on typical
- * systems is ridiculously low, less than 200. So this is even
- * conservative, even though it seems large.
- *
- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
- * waitqueues, i.e. the size of the waitq table given the number of pages.
- */
-#define PAGES_PER_WAITQUEUE    256
-
-static inline unsigned long wait_table_size(unsigned long pages)
-{
-       unsigned long size = 1;
-
-       pages /= PAGES_PER_WAITQUEUE;
-
-       while (size < pages)
-               size <<= 1;
-
-       /*
-        * Once we have dozens or even hundreds of threads sleeping
-        * on IO we've got bigger problems than wait queue collision.
-        * Limit the size of the wait table to a reasonable size.
-        */
-       size = min(size, 4096UL);
-
-       return max(size, 4UL);
-}
-
-/*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
- */
-static inline unsigned long wait_table_bits(unsigned long size)
-{
-       return ffz(~size);
-}
-
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
-static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
-               unsigned long *zones_size, unsigned long *zholes_size)
-{
-       unsigned long realtotalpages, totalpages = 0;
-       int i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               totalpages += zones_size[i];
-       pgdat->node_spanned_pages = totalpages;
-
-       realtotalpages = totalpages;
-       if (zholes_size)
-               for (i = 0; i < MAX_NR_ZONES; i++)
-                       realtotalpages -= zholes_size[i];
-       pgdat->node_present_pages = realtotalpages;
-       printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
-}
-
-
-/*
- * Initially all pages are reserved - free ones are freed
- * up by free_all_bootmem() once the early boot process is
- * done. Non-atomic initialization, single-pass.
- */
-void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
-               unsigned long zone, unsigned long start_pfn)
-{
-       struct page *page;
-
-       for (page = start; page < (start + size); page++) {
-               set_page_zone(page, NODEZONE(nid, zone));
-               set_page_count(page, 0);
-               SetPageReserved(page);
-               INIT_LIST_HEAD(&page->lru);
-#ifdef WANT_PAGE_VIRTUAL
-               /* The shift won't overflow because ZONE_NORMAL is below 4G. */
-               if (!is_highmem_idx(zone))
-                       set_page_address(page, __va(start_pfn << PAGE_SHIFT));
-#endif
-               start_pfn++;
-       }
-}
-
-#ifndef __HAVE_ARCH_MEMMAP_INIT
-#define memmap_init(start, size, nid, zone, start_pfn) \
-       memmap_init_zone((start), (size), (nid), (zone), (start_pfn))
-#endif
-
-/*
- * Set up the zone data structures:
- *   - mark all pages reserved
- *   - mark all memory queues empty
- *   - clear the memory bitmaps
- */
-static void __init free_area_init_core(struct pglist_data *pgdat,
-               unsigned long *zones_size, unsigned long *zholes_size)
-{
-       unsigned long i, j;
-       const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
-       int cpu, nid = pgdat->node_id;
-       struct page *lmem_map = pgdat->node_mem_map;
-       unsigned long zone_start_pfn = pgdat->node_start_pfn;
-
-       pgdat->nr_zones = 0;
-       init_waitqueue_head(&pgdat->kswapd_wait);
-       
-       for (j = 0; j < MAX_NR_ZONES; j++) {
-               struct zone *zone = pgdat->node_zones + j;
-               unsigned long size, realsize;
-               unsigned long batch;
-
-               zone_table[NODEZONE(nid, j)] = zone;
-               realsize = size = zones_size[j];
-               if (zholes_size)
-                       realsize -= zholes_size[j];
-
-               if (j == ZONE_DMA || j == ZONE_NORMAL)
-                       nr_kernel_pages += realsize;
-               nr_all_pages += realsize;
-
-               zone->spanned_pages = size;
-               zone->present_pages = realsize;
-               zone->name = zone_names[j];
-               spin_lock_init(&zone->lock);
-               spin_lock_init(&zone->lru_lock);
-               zone->zone_pgdat = pgdat;
-               zone->free_pages = 0;
-
-               zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
-
-               /*
-                * The per-cpu-pages pools are set to around 1000th of the
-                * size of the zone.  But no more than 1/4 of a meg - there's
-                * no point in going beyond the size of L2 cache.
-                *
-                * OK, so we don't know how big the cache is.  So guess.
-                */
-               batch = zone->present_pages / 1024;
-               if (batch * PAGE_SIZE > 256 * 1024)
-                       batch = (256 * 1024) / PAGE_SIZE;
-               batch /= 4;             /* We effectively *= 4 below */
-               if (batch < 1)
-                       batch = 1;
-
-               for (cpu = 0; cpu < NR_CPUS; cpu++) {
-                       struct per_cpu_pages *pcp;
-
-                       pcp = &zone->pageset[cpu].pcp[0];       /* hot */
-                       pcp->count = 0;
-                       pcp->low = 2 * batch;
-                       pcp->high = 6 * batch;
-                       pcp->batch = 1 * batch;
-                       INIT_LIST_HEAD(&pcp->list);
-
-                       pcp = &zone->pageset[cpu].pcp[1];       /* cold */
-                       pcp->count = 0;
-                       pcp->low = 0;
-                       pcp->high = 2 * batch;
-                       pcp->batch = 1 * batch;
-                       INIT_LIST_HEAD(&pcp->list);
-               }
-               printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
-                               zone_names[j], realsize, batch);
-               INIT_LIST_HEAD(&zone->active_list);
-               INIT_LIST_HEAD(&zone->inactive_list);
-               zone->nr_scan_active = 0;
-               zone->nr_scan_inactive = 0;
-               zone->nr_active = 0;
-               zone->nr_inactive = 0;
-               if (!size)
-                       continue;
-
-               /*
-                * The per-page waitqueue mechanism uses hashed waitqueues
-                * per zone.
-                */
-               zone->wait_table_size = wait_table_size(size);
-               zone->wait_table_bits =
-                       wait_table_bits(zone->wait_table_size);
-               zone->wait_table = (wait_queue_head_t *)
-                       alloc_bootmem_node(pgdat, zone->wait_table_size
-                                               * sizeof(wait_queue_head_t));
-
-               for(i = 0; i < zone->wait_table_size; ++i)
-                       init_waitqueue_head(zone->wait_table + i);
-
-               pgdat->nr_zones = j+1;
-
-               zone->zone_mem_map = lmem_map;
-               zone->zone_start_pfn = zone_start_pfn;
-
-               if ((zone_start_pfn) & (zone_required_alignment-1))
-                       printk("BUG: wrong zone alignment, it will crash\n");
-
-               memmap_init(lmem_map, size, nid, j, zone_start_pfn);
-
-               zone_start_pfn += size;
-               lmem_map += size;
-
-               for (i = 0; ; i++) {
-                       unsigned long bitmap_size;
-
-                       INIT_LIST_HEAD(&zone->free_area[i].free_list);
-                       if (i == MAX_ORDER-1) {
-                               zone->free_area[i].map = NULL;
-                               break;
-                       }
-
-                       /*
-                        * Page buddy system uses "index >> (i+1)",
-                        * where "index" is at most "size-1".
-                        *
-                        * The extra "+3" is to round down to byte
-                        * size (8 bits per byte assumption). Thus
-                        * we get "(size-1) >> (i+4)" as the last byte
-                        * we can access.
-                        *
-                        * The "+1" is because we want to round the
-                        * byte allocation up rather than down. So
-                        * we should have had a "+7" before we shifted
-                        * down by three. Also, we have to add one as
-                        * we actually _use_ the last bit (it's [0,n]
-                        * inclusive, not [0,n[).
-                        *
-                        * So we actually had +7+1 before we shift
-                        * down by 3. But (n+8) >> 3 == (n >> 3) + 1
-                        * (modulo overflows, which we do not have).
-                        *
-                        * Finally, we LONG_ALIGN because all bitmap
-                        * operations are on longs.
-                        */
-                       bitmap_size = (size-1) >> (i+4);
-                       bitmap_size = LONG_ALIGN(bitmap_size+1);
-                       zone->free_area[i].map = 
-                         (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
-               }
-       }
-}
-
-void __init free_area_init_node(int nid, struct pglist_data *pgdat,
-               struct page *node_mem_map, unsigned long *zones_size,
-               unsigned long node_start_pfn, unsigned long *zholes_size)
-{
-       unsigned long size;
-
-       pgdat->node_id = nid;
-       pgdat->node_start_pfn = node_start_pfn;
-       calculate_zone_totalpages(pgdat, zones_size, zholes_size);
-       if (!node_mem_map) {
-               size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
-               node_mem_map = alloc_bootmem_node(pgdat, size);
-       }
-       pgdat->node_mem_map = node_mem_map;
-
-       free_area_init_core(pgdat, zones_size, zholes_size);
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-static bootmem_data_t contig_bootmem_data;
-struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
-
-EXPORT_SYMBOL(contig_page_data);
-
-void __init free_area_init(unsigned long *zones_size)
-{
-       free_area_init_node(0, &contig_page_data, NULL, zones_size,
-                       __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
-       mem_map = contig_page_data.node_mem_map;
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-static void *frag_start(struct seq_file *m, loff_t *pos)
-{
-       pg_data_t *pgdat;
-       loff_t node = *pos;
-
-       for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
-               --node;
-
-       return pgdat;
-}
-
-static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
-{
-       pg_data_t *pgdat = (pg_data_t *)arg;
-
-       (*pos)++;
-       return pgdat->pgdat_next;
-}
-
-static void frag_stop(struct seq_file *m, void *arg)
-{
-}
-
-/* 
- * This walks the freelist for each zone. Whilst this is slow, I'd rather 
- * be slow here than slow down the fast path by keeping stats - mjbligh
- */
-static int frag_show(struct seq_file *m, void *arg)
-{
-       pg_data_t *pgdat = (pg_data_t *)arg;
-       struct zone *zone;
-       struct zone *node_zones = pgdat->node_zones;
-       unsigned long flags;
-       int order;
-
-       for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
-               if (!zone->present_pages)
-                       continue;
-
-               spin_lock_irqsave(&zone->lock, flags);
-               seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
-               for (order = 0; order < MAX_ORDER; ++order) {
-                       unsigned long nr_bufs = 0;
-                       struct list_head *elem;
-
-                       list_for_each(elem, &(zone->free_area[order].free_list))
-                               ++nr_bufs;
-                       seq_printf(m, "%6lu ", nr_bufs);
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
-               seq_putc(m, '\n');
-       }
-       return 0;
-}
-
-struct seq_operations fragmentation_op = {
-       .start  = frag_start,
-       .next   = frag_next,
-       .stop   = frag_stop,
-       .show   = frag_show,
-};
-
-static char *vmstat_text[] = {
-       "nr_dirty",
-       "nr_writeback",
-       "nr_unstable",
-       "nr_page_table_pages",
-       "nr_mapped",
-       "nr_slab",
-
-       "pgpgin",
-       "pgpgout",
-       "pswpin",
-       "pswpout",
-       "pgalloc_high",
-
-       "pgalloc_normal",
-       "pgalloc_dma",
-       "pgfree",
-       "pgactivate",
-       "pgdeactivate",
-
-       "pgfault",
-       "pgmajfault",
-       "pgrefill_high",
-       "pgrefill_normal",
-       "pgrefill_dma",
-
-       "pgsteal_high",
-       "pgsteal_normal",
-       "pgsteal_dma",
-       "pgscan_kswapd_high",
-       "pgscan_kswapd_normal",
-
-       "pgscan_kswapd_dma",
-       "pgscan_direct_high",
-       "pgscan_direct_normal",
-       "pgscan_direct_dma",
-       "pginodesteal",
-
-       "slabs_scanned",
-       "kswapd_steal",
-       "kswapd_inodesteal",
-       "pageoutrun",
-       "allocstall",
-
-       "pgrotated",
-};
-
-static void *vmstat_start(struct seq_file *m, loff_t *pos)
-{
-       struct page_state *ps;
-
-       if (*pos >= ARRAY_SIZE(vmstat_text))
-               return NULL;
-
-       ps = kmalloc(sizeof(*ps), GFP_KERNEL);
-       m->private = ps;
-       if (!ps)
-               return ERR_PTR(-ENOMEM);
-       get_full_page_state(ps);
-       ps->pgpgin /= 2;                /* sectors -> kbytes */
-       ps->pgpgout /= 2;
-       return (unsigned long *)ps + *pos;
-}
-
-static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
-{
-       (*pos)++;
-       if (*pos >= ARRAY_SIZE(vmstat_text))
-               return NULL;
-       return (unsigned long *)m->private + *pos;
-}
-
-static int vmstat_show(struct seq_file *m, void *arg)
-{
-       unsigned long *l = arg;
-       unsigned long off = l - (unsigned long *)m->private;
-
-       seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
-       return 0;
-}
-
-static void vmstat_stop(struct seq_file *m, void *arg)
-{
-       kfree(m->private);
-       m->private = NULL;
-}
-
-struct seq_operations vmstat_op = {
-       .start  = vmstat_start,
-       .next   = vmstat_next,
-       .stop   = vmstat_stop,
-       .show   = vmstat_show,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int page_alloc_cpu_notify(struct notifier_block *self,
-                                unsigned long action, void *hcpu)
-{
-       int cpu = (unsigned long)hcpu;
-       long *count;
-
-       if (action == CPU_DEAD) {
-               /* Drain local pagecache count. */
-               count = &per_cpu(nr_pagecache_local, cpu);
-               atomic_add(*count, &nr_pagecache);
-               *count = 0;
-               local_irq_disable();
-               __drain_pages(cpu);
-               local_irq_enable();
-       }
-       return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void __init page_alloc_init(void)
-{
-       hotcpu_notifier(page_alloc_cpu_notify, 0);
-}
-
-static unsigned long higherzone_val(struct zone *z, int max_zone,
-                                       int alloc_type)
-{
-       int z_idx = zone_idx(z);
-       struct zone *higherzone;
-       unsigned long pages;
-
-       /* there is no higher zone to get a contribution from */
-       if (z_idx == MAX_NR_ZONES-1)
-               return 0;
-
-       higherzone = &z->zone_pgdat->node_zones[z_idx+1];
-
-       /* We always start with the higher zone's protection value */
-       pages = higherzone->protection[alloc_type];
-
-       /*
-        * We get a lower-zone-protection contribution only if there are
-        * pages in the higher zone and if we're not the highest zone
-        * in the current zonelist.  e.g., never happens for GFP_DMA. Happens
-        * only for ZONE_DMA in a GFP_KERNEL allocation and happens for ZONE_DMA
-        * and ZONE_NORMAL for a GFP_HIGHMEM allocation.
-        */
-       if (higherzone->present_pages && z_idx < alloc_type)
-               pages += higherzone->pages_low * sysctl_lower_zone_protection;
-
-       return pages;
-}
-
-/*
- * setup_per_zone_protection - called whenver min_free_kbytes or
- *     sysctl_lower_zone_protection changes.  Ensures that each zone
- *     has a correct pages_protected value, so an adequate number of
- *     pages are left in the zone after a successful __alloc_pages().
- *
- *     This algorithm is way confusing.  I tries to keep the same behavior
- *     as we had with the incremental min iterative algorithm.
- */
-static void setup_per_zone_protection(void)
-{
-       struct pglist_data *pgdat;
-       struct zone *zones, *zone;
-       int max_zone;
-       int i, j;
-
-       for_each_pgdat(pgdat) {
-               zones = pgdat->node_zones;
-
-               for (i = 0, max_zone = 0; i < MAX_NR_ZONES; i++)
-                       if (zones[i].present_pages)
-                               max_zone = i;
-
-               /*
-                * For each of the different allocation types:
-                * GFP_DMA -> GFP_KERNEL -> GFP_HIGHMEM
-                */
-               for (i = 0; i < GFP_ZONETYPES; i++) {
-                       /*
-                        * For each of the zones:
-                        * ZONE_HIGHMEM -> ZONE_NORMAL -> ZONE_DMA
-                        */
-                       for (j = MAX_NR_ZONES-1; j >= 0; j--) {
-                               zone = &zones[j];
-
-                               /*
-                                * We never protect zones that don't have memory
-                                * in them (j>max_zone) or zones that aren't in
-                                * the zonelists for a certain type of
-                                * allocation (j>i).  We have to assign these to
-                                * zero because the lower zones take
-                                * contributions from the higher zones.
-                                */
-                               if (j > max_zone || j > i) {
-                                       zone->protection[i] = 0;
-                                       continue;
-                               }
-                               /*
-                                * The contribution of the next higher zone
-                                */
-                               zone->protection[i] = higherzone_val(zone,
-                                                               max_zone, i);
-                               zone->protection[i] += zone->pages_low;
-                       }
-               }
-       }
-}
-
-/*
- * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures 
- *     that the pages_{min,low,high} values for each zone are set correctly 
- *     with respect to min_free_kbytes.
- */
-static void setup_per_zone_pages_min(void)
-{
-       unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
-       unsigned long lowmem_pages = 0;
-       struct zone *zone;
-       unsigned long flags;
-
-       /* Calculate total number of !ZONE_HIGHMEM pages */
-       for_each_zone(zone) {
-               if (!is_highmem(zone))
-                       lowmem_pages += zone->present_pages;
-       }
-
-       for_each_zone(zone) {
-               spin_lock_irqsave(&zone->lru_lock, flags);
-               if (is_highmem(zone)) {
-                       /*
-                        * Often, highmem doesn't need to reserve any pages.
-                        * But the pages_min/low/high values are also used for
-                        * batching up page reclaim activity so we need a
-                        * decent value here.
-                        */
-                       int min_pages;
-
-                       min_pages = zone->present_pages / 1024;
-                       if (min_pages < SWAP_CLUSTER_MAX)
-                               min_pages = SWAP_CLUSTER_MAX;
-                       if (min_pages > 128)
-                               min_pages = 128;
-                       zone->pages_min = min_pages;
-               } else {
-                       /* if it's a lowmem zone, reserve a number of pages 
-                        * proportionate to the zone's size.
-                        */
-                       zone->pages_min = (pages_min * zone->present_pages) / 
-                                          lowmem_pages;
-               }
-
-               zone->pages_low = zone->pages_min * 2;
-               zone->pages_high = zone->pages_min * 3;
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
-       }
-}
-
-/*
- * Initialise min_free_kbytes.
- *
- * For small machines we want it small (128k min).  For large machines
- * we want it large (16MB max).  But it is not linear, because network
- * bandwidth does not increase linearly with machine size.  We use
- *
- *     min_free_kbytes = sqrt(lowmem_kbytes)
- *
- * which yields
- *
- * 16MB:       128k
- * 32MB:       181k
- * 64MB:       256k
- * 128MB:      362k
- * 256MB:      512k
- * 512MB:      724k
- * 1024MB:     1024k
- * 2048MB:     1448k
- * 4096MB:     2048k
- * 8192MB:     2896k
- * 16384MB:    4096k
- */
-static int __init init_per_zone_pages_min(void)
-{
-       unsigned long lowmem_kbytes;
-
-       lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
-
-       min_free_kbytes = int_sqrt(lowmem_kbytes);
-       if (min_free_kbytes < 128)
-               min_free_kbytes = 128;
-       if (min_free_kbytes > 16384)
-               min_free_kbytes = 16384;
-       setup_per_zone_pages_min();
-       setup_per_zone_protection();
-       return 0;
-}
-module_init(init_per_zone_pages_min)
-
-/*
- * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
- *     that we can call two helper functions whenever min_free_kbytes
- *     changes.
- */
-int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
-               struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
-{
-       proc_dointvec(table, write, file, buffer, length, ppos);
-       setup_per_zone_pages_min();
-       setup_per_zone_protection();
-       return 0;
-}
-
-/*
- * lower_zone_protection_sysctl_handler - just a wrapper around
- *     proc_dointvec() so that we can call setup_per_zone_protection()
- *     whenever sysctl_lower_zone_protection changes.
- */
-int lower_zone_protection_sysctl_handler(ctl_table *table, int write,
-                struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
-{
-       proc_dointvec_minmax(table, write, file, buffer, length, ppos);
-       setup_per_zone_protection();
-       return 0;
-}
-
-/*
- * allocate a large system hash table from bootmem
- * - it is assumed that the hash table must contain an exact power-of-2
- *   quantity of entries
- */
-void *__init alloc_large_system_hash(const char *tablename,
-                                    unsigned long bucketsize,
-                                    unsigned long numentries,
-                                    int scale,
-                                    int consider_highmem,
-                                    unsigned int *_hash_shift,
-                                    unsigned int *_hash_mask)
-{
-       unsigned long mem, max, log2qty, size;
-       void *table;
-
-       /* round applicable memory size up to nearest megabyte */
-       mem = consider_highmem ? nr_all_pages : nr_kernel_pages;
-       mem += (1UL << (20 - PAGE_SHIFT)) - 1;
-       mem >>= 20 - PAGE_SHIFT;
-       mem <<= 20 - PAGE_SHIFT;
-
-       /* limit to 1 bucket per 2^scale bytes of low memory (rounded up to
-        * nearest power of 2 in size) */
-       if (scale > PAGE_SHIFT)
-               mem >>= (scale - PAGE_SHIFT);
-       else
-               mem <<= (PAGE_SHIFT - scale);
-
-       mem = 1UL << (long_log2(mem) + 1);
-
-       /* limit allocation size */
-       max = (1UL << (PAGE_SHIFT + MAX_SYS_HASH_TABLE_ORDER)) / bucketsize;
-       if (max > mem)
-               max = mem;
-
-       /* allow the kernel cmdline to have a say */
-       if (!numentries || numentries > max)
-               numentries = max;
-
-       log2qty = long_log2(numentries);
-
-       do {
-               size = bucketsize << log2qty;
-
-               table = (void *) alloc_bootmem(size);
-
-       } while (!table && size > PAGE_SIZE);
-
-       if (!table)
-               panic("Failed to allocate %s hash table\n", tablename);
-
-       printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
-              tablename,
-              (1U << log2qty),
-              long_log2(size) - PAGE_SHIFT,
-              size);
-
-       if (_hash_shift)
-               *_hash_shift = log2qty;
-       if (_hash_mask)
-               *_hash_mask = (1 << log2qty) - 1;
-
-       return table;
-}
diff --git a/linux-2.6.8.1-xen-sparse/net/core/skbuff.c b/linux-2.6.8.1-xen-sparse/net/core/skbuff.c
deleted file mode 100644 (file)
index 420d885..0000000
+++ /dev/null
@@ -1,1521 +0,0 @@
-/*
- *     Routines having to do with the 'struct sk_buff' memory handlers.
- *
- *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
- *                     Florian La Roche <rzsfl@rz.uni-sb.de>
- *
- *     Version:        $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
- *
- *     Fixes:
- *             Alan Cox        :       Fixed the worst of the load
- *                                     balancer bugs.
- *             Dave Platt      :       Interrupt stacking fix.
- *     Richard Kooijman        :       Timestamp fixes.
- *             Alan Cox        :       Changed buffer format.
- *             Alan Cox        :       destructor hook for AF_UNIX etc.
- *             Linus Torvalds  :       Better skb_clone.
- *             Alan Cox        :       Added skb_copy.
- *             Alan Cox        :       Added all the changed routines Linus
- *                                     only put in the headers
- *             Ray VanTassle   :       Fixed --skb->lock in free
- *             Alan Cox        :       skb_copy copy arp field
- *             Andi Kleen      :       slabified it.
- *             Robert Olsson   :       Removed skb_head_pool
- *
- *     NOTE:
- *             The __skb_ routines should be called with interrupts
- *     disabled, or you better be *real* sure that the operation is atomic
- *     with respect to whatever list is being frobbed (e.g. via lock_sock()
- *     or via disabling bottom half handlers, etc).
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-/*
- *     The functions in this file will not compile correctly with gcc 2.4.x
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#ifdef CONFIG_NET_CLS_ACT
-#include <net/pkt_sched.h>
-#endif
-#include <linux/string.h>
-#include <linux/skbuff.h>
-#include <linux/cache.h>
-#include <linux/rtnetlink.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-
-#include <net/protocol.h>
-#include <net/dst.h>
-#include <net/sock.h>
-#include <net/checksum.h>
-#include <net/xfrm.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-
-static kmem_cache_t *skbuff_head_cache;
-
-/*
- *     Keep out-of-line to prevent kernel bloat.
- *     __builtin_return_address is not used because it is not always
- *     reliable.
- */
-
-/**
- *     skb_over_panic  -       private function
- *     @skb: buffer
- *     @sz: size
- *     @here: address
- *
- *     Out of line support code for skb_put(). Not user callable.
- */
-void skb_over_panic(struct sk_buff *skb, int sz, void *here)
-{
-       printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
-               here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
-       BUG();
-}
-
-/**
- *     skb_under_panic -       private function
- *     @skb: buffer
- *     @sz: size
- *     @here: address
- *
- *     Out of line support code for skb_push(). Not user callable.
- */
-
-void skb_under_panic(struct sk_buff *skb, int sz, void *here)
-{
-       printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
-               here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
-       BUG();
-}
-
-/*     Allocate a new skbuff. We do this ourselves so we can fill in a few
- *     'private' fields and also do memory statistics to find all the
- *     [BEEP] leaks.
- *
- */
-
-/**
- *     alloc_skb       -       allocate a network buffer
- *     @size: size to allocate
- *     @gfp_mask: allocation mask
- *
- *     Allocate a new &sk_buff. The returned buffer has no headroom and a
- *     tail room of size bytes. The object has a reference count of one.
- *     The return is the buffer. On a failure the return is %NULL.
- *
- *     Buffers may only be allocated from interrupts using a @gfp_mask of
- *     %GFP_ATOMIC.
- */
-struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
-{
-       struct sk_buff *skb;
-       u8 *data;
-
-       /* Get the HEAD */
-       skb = kmem_cache_alloc(skbuff_head_cache,
-                              gfp_mask & ~__GFP_DMA);
-       if (!skb)
-               goto out;
-
-       /* Get the DATA. Size must match skb_add_mtu(). */
-       size = SKB_DATA_ALIGN(size);
-       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
-       if (!data)
-               goto nodata;
-
-       memset(skb, 0, offsetof(struct sk_buff, truesize));
-       skb->truesize = size + sizeof(struct sk_buff);
-       atomic_set(&skb->users, 1);
-       skb->head = data;
-       skb->data = data;
-       skb->tail = data;
-       skb->end  = data + size;
-
-       atomic_set(&(skb_shinfo(skb)->dataref), 1);
-       skb_shinfo(skb)->nr_frags  = 0;
-       skb_shinfo(skb)->tso_size = 0;
-       skb_shinfo(skb)->tso_segs = 0;
-       skb_shinfo(skb)->frag_list = NULL;
-out:
-       return skb;
-nodata:
-       kmem_cache_free(skbuff_head_cache, skb);
-       skb = NULL;
-       goto out;
-}
-
-/**
- *     alloc_skb_from_cache    -       allocate a network buffer
- *     @cp: kmem_cache from which to allocate the data area
- *           (object size must be big enough for @size bytes + skb overheads)
- *     @size: size to allocate
- *     @gfp_mask: allocation mask
- *
- *     Allocate a new &sk_buff. The returned buffer has no headroom and a
- *     tail room of size bytes. The object has a reference count of one.
- *     The return is the buffer. On a failure the return is %NULL.
- *
- *     Buffers may only be allocated from interrupts using a @gfp_mask of
- *     %GFP_ATOMIC.
- */
-struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
-                                    unsigned int size, int gfp_mask)
-{
-       struct sk_buff *skb;
-       u8 *data;
-
-       /* Get the HEAD */
-       skb = kmem_cache_alloc(skbuff_head_cache,
-                              gfp_mask & ~__GFP_DMA);
-       if (!skb)
-               goto out;
-
-       /* Get the DATA. */
-       size = SKB_DATA_ALIGN(size);
-       data = kmem_cache_alloc(cp, gfp_mask);
-       if (!data)
-               goto nodata;
-
-       memset(skb, 0, offsetof(struct sk_buff, truesize));
-       skb->truesize = size + sizeof(struct sk_buff);
-       atomic_set(&skb->users, 1);
-       skb->head = data;
-       skb->data = data;
-       skb->tail = data;
-       skb->end  = data + size;
-
-       atomic_set(&(skb_shinfo(skb)->dataref), 1);
-       skb_shinfo(skb)->nr_frags  = 0;
-       skb_shinfo(skb)->tso_size = 0;
-       skb_shinfo(skb)->tso_segs = 0;
-       skb_shinfo(skb)->frag_list = NULL;
-out:
-       return skb;
-nodata:
-       kmem_cache_free(skbuff_head_cache, skb);
-       skb = NULL;
-       goto out;
-}
-
-
-static void skb_drop_fraglist(struct sk_buff *skb)
-{
-       struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-       skb_shinfo(skb)->frag_list = NULL;
-
-       do {
-               struct sk_buff *this = list;
-               list = list->next;
-               kfree_skb(this);
-       } while (list);
-}
-
-static void skb_clone_fraglist(struct sk_buff *skb)
-{
-       struct sk_buff *list;
-
-       for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
-               skb_get(list);
-}
-
-void skb_release_data(struct sk_buff *skb)
-{
-       if (!skb->cloned ||
-           atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
-               if (skb_shinfo(skb)->nr_frags) {
-                       int i;
-                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-                               put_page(skb_shinfo(skb)->frags[i].page);
-               }
-
-               if (skb_shinfo(skb)->frag_list)
-                       skb_drop_fraglist(skb);
-
-               kfree(skb->head);
-       }
-}
-
-/*
- *     Free an skbuff by memory without cleaning the state.
- */
-void kfree_skbmem(struct sk_buff *skb)
-{
-       skb_release_data(skb);
-       kmem_cache_free(skbuff_head_cache, skb);
-}
-
-/**
- *     __kfree_skb - private function
- *     @skb: buffer
- *
- *     Free an sk_buff. Release anything attached to the buffer.
- *     Clean the state. This is an internal helper function. Users should
- *     always call kfree_skb
- */
-
-void __kfree_skb(struct sk_buff *skb)
-{
-       if (skb->list) {
-               printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
-                      "on a list (from %p).\n", NET_CALLER(skb));
-               BUG();
-       }
-
-       dst_release(skb->dst);
-#ifdef CONFIG_XFRM
-       secpath_put(skb->sp);
-#endif
-       if(skb->destructor) {
-               if (in_irq())
-                       printk(KERN_WARNING "Warning: kfree_skb on "
-                                           "hard IRQ %p\n", NET_CALLER(skb));
-               skb->destructor(skb);
-       }
-#ifdef CONFIG_NETFILTER
-       nf_conntrack_put(skb->nfct);
-#ifdef CONFIG_BRIDGE_NETFILTER
-       nf_bridge_put(skb->nf_bridge);
-#endif
-#endif
-/* XXX: IS this still necessary? - JHS */
-#ifdef CONFIG_NET_SCHED
-       skb->tc_index = 0;
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = 0;
-       skb->tc_classid = 0;
-#endif
-#endif
-
-       kfree_skbmem(skb);
-}
-
-/**
- *     skb_clone       -       duplicate an sk_buff
- *     @skb: buffer to clone
- *     @gfp_mask: allocation priority
- *
- *     Duplicate an &sk_buff. The new one is not owned by a socket. Both
- *     copies share the same packet data but not structure. The new
- *     buffer has a reference count of 1. If the allocation fails the
- *     function returns %NULL otherwise the new buffer is returned.
- *
- *     If this function is called from an interrupt gfp_mask() must be
- *     %GFP_ATOMIC.
- */
-
-struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
-{
-       struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
-
-       if (!n) 
-               return NULL;
-
-#define C(x) n->x = skb->x
-
-       n->next = n->prev = NULL;
-       n->list = NULL;
-       n->sk = NULL;
-       C(stamp);
-       C(dev);
-       C(real_dev);
-       C(h);
-       C(nh);
-       C(mac);
-       C(dst);
-       dst_clone(skb->dst);
-       C(sp);
-#ifdef CONFIG_INET
-       secpath_get(skb->sp);
-#endif
-       memcpy(n->cb, skb->cb, sizeof(skb->cb));
-       C(len);
-       C(data_len);
-       C(csum);
-       C(local_df);
-       n->cloned = 1;
-       C(pkt_type);
-       C(ip_summed);
-       C(priority);
-       C(protocol);
-       C(security);
-       n->destructor = NULL;
-#ifdef CONFIG_NETFILTER
-       C(nfmark);
-       C(nfcache);
-       C(nfct);
-       nf_conntrack_get(skb->nfct);
-#ifdef CONFIG_NETFILTER_DEBUG
-       C(nf_debug);
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       C(nf_bridge);
-       nf_bridge_get(skb->nf_bridge);
-#endif
-#endif /*CONFIG_NETFILTER*/
-#if defined(CONFIG_HIPPI)
-       C(private);
-#endif
-#ifdef CONFIG_NET_SCHED
-       C(tc_index);
-#ifdef CONFIG_NET_CLS_ACT
-       n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
-       n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd);
-       n->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
-       C(input_dev);
-       C(tc_classid);
-#endif
-
-#endif
-       C(truesize);
-       atomic_set(&n->users, 1);
-       C(head);
-       C(data);
-       C(tail);
-       C(end);
-
-       atomic_inc(&(skb_shinfo(skb)->dataref));
-       skb->cloned = 1;
-
-       return n;
-}
-
-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
-{
-       /*
-        *      Shift between the two data areas in bytes
-        */
-       unsigned long offset = new->data - old->data;
-
-       new->list       = NULL;
-       new->sk         = NULL;
-       new->dev        = old->dev;
-       new->real_dev   = old->real_dev;
-       new->priority   = old->priority;
-       new->protocol   = old->protocol;
-       new->dst        = dst_clone(old->dst);
-#ifdef CONFIG_INET
-       new->sp         = secpath_get(old->sp);
-#endif
-       new->h.raw      = old->h.raw + offset;
-       new->nh.raw     = old->nh.raw + offset;
-       new->mac.raw    = old->mac.raw + offset;
-       memcpy(new->cb, old->cb, sizeof(old->cb));
-       new->local_df   = old->local_df;
-       new->pkt_type   = old->pkt_type;
-       new->stamp      = old->stamp;
-       new->destructor = NULL;
-       new->security   = old->security;
-#ifdef CONFIG_NETFILTER
-       new->nfmark     = old->nfmark;
-       new->nfcache    = old->nfcache;
-       new->nfct       = old->nfct;
-       nf_conntrack_get(old->nfct);
-#ifdef CONFIG_NETFILTER_DEBUG
-       new->nf_debug   = old->nf_debug;
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       new->nf_bridge  = old->nf_bridge;
-       nf_bridge_get(old->nf_bridge);
-#endif
-#endif
-#ifdef CONFIG_NET_SCHED
-#ifdef CONFIG_NET_CLS_ACT
-       new->tc_verd = old->tc_verd;
-#endif
-       new->tc_index   = old->tc_index;
-#endif
-       atomic_set(&new->users, 1);
-}
-
-/**
- *     skb_copy        -       create private copy of an sk_buff
- *     @skb: buffer to copy
- *     @gfp_mask: allocation priority
- *
- *     Make a copy of both an &sk_buff and its data. This is used when the
- *     caller wishes to modify the data and needs a private copy of the
- *     data to alter. Returns %NULL on failure or the pointer to the buffer
- *     on success. The returned buffer has a reference count of 1.
- *
- *     As by-product this function converts non-linear &sk_buff to linear
- *     one, so that &sk_buff becomes completely private and caller is allowed
- *     to modify all the data of returned buffer. This means that this
- *     function is not recommended for use in circumstances when only
- *     header is going to be modified. Use pskb_copy() instead.
- */
-
-struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
-{
-       int headerlen = skb->data - skb->head;
-       /*
-        *      Allocate the copy buffer
-        */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
-                                     gfp_mask);
-       if (!n)
-               return NULL;
-
-       /* Set the data pointer */
-       skb_reserve(n, headerlen);
-       /* Set the tail pointer and length */
-       skb_put(n, skb->len);
-       n->csum      = skb->csum;
-       n->ip_summed = skb->ip_summed;
-
-       if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
-               BUG();
-
-       copy_skb_header(n, skb);
-       return n;
-}
-
-
-/**
- *     pskb_copy       -       create copy of an sk_buff with private head.
- *     @skb: buffer to copy
- *     @gfp_mask: allocation priority
- *
- *     Make a copy of both an &sk_buff and part of its data, located
- *     in header. Fragmented data remain shared. This is used when
- *     the caller wishes to modify only header of &sk_buff and needs
- *     private copy of the header to alter. Returns %NULL on failure
- *     or the pointer to the buffer on success.
- *     The returned buffer has a reference count of 1.
- */
-
-struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
-{
-       /*
-        *      Allocate the copy buffer
-        */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
-
-       if (!n)
-               goto out;
-
-       /* Set the data pointer */
-       skb_reserve(n, skb->data - skb->head);
-       /* Set the tail pointer and length */
-       skb_put(n, skb_headlen(skb));
-       /* Copy the bytes */
-       memcpy(n->data, skb->data, n->len);
-       n->csum      = skb->csum;
-       n->ip_summed = skb->ip_summed;
-
-       n->data_len  = skb->data_len;
-       n->len       = skb->len;
-
-       if (skb_shinfo(skb)->nr_frags) {
-               int i;
-
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
-                       get_page(skb_shinfo(n)->frags[i].page);
-               }
-               skb_shinfo(n)->nr_frags = i;
-       }
-       skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
-       skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
-
-       if (skb_shinfo(skb)->frag_list) {
-               skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
-               skb_clone_fraglist(n);
-       }
-
-       copy_skb_header(n, skb);
-out:
-       return n;
-}
-
-/**
- *     pskb_expand_head - reallocate header of &sk_buff
- *     @skb: buffer to reallocate
- *     @nhead: room to add at head
- *     @ntail: room to add at tail
- *     @gfp_mask: allocation priority
- *
- *     Expands (or creates identical copy, if &nhead and &ntail are zero)
- *     header of skb. &sk_buff itself is not changed. &sk_buff MUST have
- *     reference count of 1. Returns zero in the case of success or error,
- *     if expansion failed. In the last case, &sk_buff is not changed.
- *
- *     All the pointers pointing into skb header may change and must be
- *     reloaded after call to this function.
- */
-
-int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
-{
-       int i;
-       u8 *data;
-       int size = nhead + (skb->end - skb->head) + ntail;
-       long off;
-
-       if (skb_shared(skb))
-               BUG();
-
-       size = SKB_DATA_ALIGN(size);
-
-       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
-       if (!data)
-               goto nodata;
-
-       /* Copy only real data... and, alas, header. This should be
-        * optimized for the cases when header is void. */
-       memcpy(data + nhead, skb->head, skb->tail - skb->head);
-       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-               get_page(skb_shinfo(skb)->frags[i].page);
-
-       if (skb_shinfo(skb)->frag_list)
-               skb_clone_fraglist(skb);
-
-       skb_release_data(skb);
-
-       off = (data + nhead) - skb->head;
-
-       skb->head     = data;
-       skb->end      = data + size;
-       skb->data    += off;
-       skb->tail    += off;
-       skb->mac.raw += off;
-       skb->h.raw   += off;
-       skb->nh.raw  += off;
-       skb->cloned   = 0;
-       atomic_set(&skb_shinfo(skb)->dataref, 1);
-       return 0;
-
-nodata:
-       return -ENOMEM;
-}
-
-/* Make private copy of skb with writable head and some headroom */
-
-struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
-{
-       struct sk_buff *skb2;
-       int delta = headroom - skb_headroom(skb);
-
-       if (delta <= 0)
-               skb2 = pskb_copy(skb, GFP_ATOMIC);
-       else {
-               skb2 = skb_clone(skb, GFP_ATOMIC);
-               if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
-                                            GFP_ATOMIC)) {
-                       kfree_skb(skb2);
-                       skb2 = NULL;
-               }
-       }
-       return skb2;
-}
-
-
-/**
- *     skb_copy_expand -       copy and expand sk_buff
- *     @skb: buffer to copy
- *     @newheadroom: new free bytes at head
- *     @newtailroom: new free bytes at tail
- *     @gfp_mask: allocation priority
- *
- *     Make a copy of both an &sk_buff and its data and while doing so
- *     allocate additional space.
- *
- *     This is used when the caller wishes to modify the data and needs a
- *     private copy of the data to alter as well as more space for new fields.
- *     Returns %NULL on failure or the pointer to the buffer
- *     on success. The returned buffer has a reference count of 1.
- *
- *     You must pass %GFP_ATOMIC as the allocation priority if this function
- *     is called from an interrupt.
- *
- *     BUG ALERT: ip_summed is not copied. Why does this work? Is it used
- *     only by netfilter in the cases when checksum is recalculated? --ANK
- */
-struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-                               int newheadroom, int newtailroom, int gfp_mask)
-{
-       /*
-        *      Allocate the copy buffer
-        */
-       struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
-                                     gfp_mask);
-       int head_copy_len, head_copy_off;
-
-       if (!n)
-               return NULL;
-
-       skb_reserve(n, newheadroom);
-
-       /* Set the tail pointer and length */
-       skb_put(n, skb->len);
-
-       head_copy_len = skb_headroom(skb);
-       head_copy_off = 0;
-       if (newheadroom <= head_copy_len)
-               head_copy_len = newheadroom;
-       else
-               head_copy_off = newheadroom - head_copy_len;
-
-       /* Copy the linear header and data. */
-       if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
-                         skb->len + head_copy_len))
-               BUG();
-
-       copy_skb_header(n, skb);
-       skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
-       skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
-
-       return n;
-}
-
-/**
- *     skb_pad                 -       zero pad the tail of an skb
- *     @skb: buffer to pad
- *     @pad: space to pad
- *
- *     Ensure that a buffer is followed by a padding area that is zero
- *     filled. Used by network drivers which may DMA or transfer data
- *     beyond the buffer end onto the wire.
- *
- *     May return NULL in out of memory cases.
- */
-struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
-{
-       struct sk_buff *nskb;
-       
-       /* If the skbuff is non linear tailroom is always zero.. */
-       if (skb_tailroom(skb) >= pad) {
-               memset(skb->data+skb->len, 0, pad);
-               return skb;
-       }
-       
-       nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
-       kfree_skb(skb);
-       if (nskb)
-               memset(nskb->data+nskb->len, 0, pad);
-       return nskb;
-}      
-/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
- * If realloc==0 and trimming is impossible without change of data,
- * it is BUG().
- */
-
-int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
-{
-       int offset = skb_headlen(skb);
-       int nfrags = skb_shinfo(skb)->nr_frags;
-       int i;
-
-       for (i = 0; i < nfrags; i++) {
-               int end = offset + skb_shinfo(skb)->frags[i].size;
-               if (end > len) {
-                       if (skb_cloned(skb)) {
-                               if (!realloc)
-                                       BUG();
-                               if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
-                                       return -ENOMEM;
-                       }
-                       if (len <= offset) {
-                               put_page(skb_shinfo(skb)->frags[i].page);
-                               skb_shinfo(skb)->nr_frags--;
-                       } else {
-                               skb_shinfo(skb)->frags[i].size = len - offset;
-                       }
-               }
-               offset = end;
-       }
-
-       if (offset < len) {
-               skb->data_len -= skb->len - len;
-               skb->len       = len;
-       } else {
-               if (len <= skb_headlen(skb)) {
-                       skb->len      = len;
-                       skb->data_len = 0;
-                       skb->tail     = skb->data + len;
-                       if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
-                               skb_drop_fraglist(skb);
-               } else {
-                       skb->data_len -= skb->len - len;
-                       skb->len       = len;
-               }
-       }
-
-       return 0;
-}
-
-/**
- *     __pskb_pull_tail - advance tail of skb header
- *     @skb: buffer to reallocate
- *     @delta: number of bytes to advance tail
- *
- *     The function makes a sense only on a fragmented &sk_buff,
- *     it expands header moving its tail forward and copying necessary
- *     data from fragmented part.
- *
- *     &sk_buff MUST have reference count of 1.
- *
- *     Returns %NULL (and &sk_buff does not change) if pull failed
- *     or value of new tail of skb in the case of success.
- *
- *     All the pointers pointing into skb header may change and must be
- *     reloaded after call to this function.
- */
-
-/* Moves tail of skb head forward, copying data from fragmented part,
- * when it is necessary.
- * 1. It may fail due to malloc failure.
- * 2. It may change skb pointers.
- *
- * It is pretty complicated. Luckily, it is called only in exceptional cases.
- */
-unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
-{
-       /* If skb has not enough free space at tail, get new one
-        * plus 128 bytes for future expansions. If we have enough
-        * room at tail, reallocate without expansion only if skb is cloned.
-        */
-       int i, k, eat = (skb->tail + delta) - skb->end;
-
-       if (eat > 0 || skb_cloned(skb)) {
-               if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
-                                    GFP_ATOMIC))
-                       return NULL;
-       }
-
-       if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
-               BUG();
-
-       /* Optimization: no fragments, no reasons to preestimate
-        * size of pulled pages. Superb.
-        */
-       if (!skb_shinfo(skb)->frag_list)
-               goto pull_pages;
-
-       /* Estimate size of pulled pages. */
-       eat = delta;
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               if (skb_shinfo(skb)->frags[i].size >= eat)
-                       goto pull_pages;
-               eat -= skb_shinfo(skb)->frags[i].size;
-       }
-
-       /* If we need update frag list, we are in troubles.
-        * Certainly, it possible to add an offset to skb data,
-        * but taking into account that pulling is expected to
-        * be very rare operation, it is worth to fight against
-        * further bloating skb head and crucify ourselves here instead.
-        * Pure masohism, indeed. 8)8)
-        */
-       if (eat) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-               struct sk_buff *clone = NULL;
-               struct sk_buff *insp = NULL;
-
-               do {
-                       if (!list)
-                               BUG();
-
-                       if (list->len <= eat) {
-                               /* Eaten as whole. */
-                               eat -= list->len;
-                               list = list->next;
-                               insp = list;
-                       } else {
-                               /* Eaten partially. */
-
-                               if (skb_shared(list)) {
-                                       /* Sucks! We need to fork list. :-( */
-                                       clone = skb_clone(list, GFP_ATOMIC);
-                                       if (!clone)
-                                               return NULL;
-                                       insp = list->next;
-                                       list = clone;
-                               } else {
-                                       /* This may be pulled without
-                                        * problems. */
-                                       insp = list;
-                               }
-                               if (!pskb_pull(list, eat)) {
-                                       if (clone)
-                                               kfree_skb(clone);
-                                       return NULL;
-                               }
-                               break;
-                       }
-               } while (eat);
-
-               /* Free pulled out fragments. */
-               while ((list = skb_shinfo(skb)->frag_list) != insp) {
-                       skb_shinfo(skb)->frag_list = list->next;
-                       kfree_skb(list);
-               }
-               /* And insert new clone at head. */
-               if (clone) {
-                       clone->next = list;
-                       skb_shinfo(skb)->frag_list = clone;
-               }
-       }
-       /* Success! Now we may commit changes to skb data. */
-
-pull_pages:
-       eat = delta;
-       k = 0;
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               if (skb_shinfo(skb)->frags[i].size <= eat) {
-                       put_page(skb_shinfo(skb)->frags[i].page);
-                       eat -= skb_shinfo(skb)->frags[i].size;
-               } else {
-                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
-                       if (eat) {
-                               skb_shinfo(skb)->frags[k].page_offset += eat;
-                               skb_shinfo(skb)->frags[k].size -= eat;
-                               eat = 0;
-                       }
-                       k++;
-               }
-       }
-       skb_shinfo(skb)->nr_frags = k;
-
-       skb->tail     += delta;
-       skb->data_len -= delta;
-
-       return skb->tail;
-}
-
-/* Copy some data bits from skb to kernel buffer. */
-
-int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
-{
-       int i, copy;
-       int start = skb_headlen(skb);
-
-       if (offset > (int)skb->len - len)
-               goto fault;
-
-       /* Copy header. */
-       if ((copy = start - offset) > 0) {
-               if (copy > len)
-                       copy = len;
-               memcpy(to, skb->data + offset, copy);
-               if ((len -= copy) == 0)
-                       return 0;
-               offset += copy;
-               to     += copy;
-       }
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
-
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
-               if ((copy = end - offset) > 0) {
-                       u8 *vaddr;
-
-                       if (copy > len)
-                               copy = len;
-
-                       vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
-                       memcpy(to,
-                              vaddr + skb_shinfo(skb)->frags[i].page_offset+
-                              offset - start, copy);
-                       kunmap_skb_frag(vaddr);
-
-                       if ((len -= copy) == 0)
-                               return 0;
-                       offset += copy;
-                       to     += copy;
-               }
-               start = end;
-       }
-
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-               for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               if (skb_copy_bits(list, offset - start,
-                                                 to, copy))
-                                       goto fault;
-                               if ((len -= copy) == 0)
-                                       return 0;
-                               offset += copy;
-                               to     += copy;
-                       }
-                       start = end;
-               }
-       }
-       if (!len)
-               return 0;
-
-fault:
-       return -EFAULT;
-}
-
-/* Keep iterating until skb_iter_next returns false. */
-void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
-{
-       i->len = skb_headlen(skb);
-       i->data = (unsigned char *)skb->data;
-       i->nextfrag = 0;
-       i->fraglist = NULL;
-}
-
-int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
-{
-       /* Unmap previous, if not head fragment. */
-       if (i->nextfrag)
-               kunmap_skb_frag(i->data);
-
-       if (i->fraglist) {
-       fraglist:
-               /* We're iterating through fraglist. */
-               if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
-                       i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
-                                               ->frags[i->nextfrag]);
-                       i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
-                               .size;
-                       i->nextfrag++;
-                       return 1;
-               }
-               /* Fragments with fragments?  Too hard! */
-               BUG_ON(skb_shinfo(i->fraglist)->frag_list);
-               i->fraglist = i->fraglist->next;
-               if (!i->fraglist)
-                       goto end;
-
-               i->len = skb_headlen(i->fraglist);
-               i->data = i->fraglist->data;
-               i->nextfrag = 0;
-               return 1;
-       }
-
-       if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
-               i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
-               i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
-               i->nextfrag++;
-               return 1;
-       }
-
-       i->fraglist = skb_shinfo(skb)->frag_list;
-       if (i->fraglist)
-               goto fraglist;
-
-end:
-       /* Bug trap for callers */
-       i->data = NULL;
-       return 0;
-}
-
-void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
-{
-       /* Unmap previous, if not head fragment. */
-       if (i->data && i->nextfrag)
-               kunmap_skb_frag(i->data);
-       /* Bug trap for callers */
-       i->data = NULL;
-}
-
-/* Checksum skb data. */
-
-unsigned int skb_checksum(const struct sk_buff *skb, int offset,
-                         int len, unsigned int csum)
-{
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
-       int pos = 0;
-
-       /* Checksum header. */
-       if (copy > 0) {
-               if (copy > len)
-                       copy = len;
-               csum = csum_partial(skb->data + offset, copy, csum);
-               if ((len -= copy) == 0)
-                       return csum;
-               offset += copy;
-               pos     = copy;
-       }
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
-
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
-               if ((copy = end - offset) > 0) {
-                       unsigned int csum2;
-                       u8 *vaddr;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-                       if (copy > len)
-                               copy = len;
-                       vaddr = kmap_skb_frag(frag);
-                       csum2 = csum_partial(vaddr + frag->page_offset +
-                                            offset - start, copy, 0);
-                       kunmap_skb_frag(vaddr);
-                       csum = csum_block_add(csum, csum2, pos);
-                       if (!(len -= copy))
-                               return csum;
-                       offset += copy;
-                       pos    += copy;
-               }
-               start = end;
-       }
-
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-               for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               unsigned int csum2;
-                               if (copy > len)
-                                       copy = len;
-                               csum2 = skb_checksum(list, offset - start,
-                                                    copy, 0);
-                               csum = csum_block_add(csum, csum2, pos);
-                               if ((len -= copy) == 0)
-                                       return csum;
-                               offset += copy;
-                               pos    += copy;
-                       }
-                       start = end;
-               }
-       }
-       if (len)
-               BUG();
-
-       return csum;
-}
-
-/* Both of above in one bottle. */
-
-unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
-                                   u8 *to, int len, unsigned int csum)
-{
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
-       int pos = 0;
-
-       /* Copy header. */
-       if (copy > 0) {
-               if (copy > len)
-                       copy = len;
-               csum = csum_partial_copy_nocheck(skb->data + offset, to,
-                                                copy, csum);
-               if ((len -= copy) == 0)
-                       return csum;
-               offset += copy;
-               to     += copy;
-               pos     = copy;
-       }
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
-
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
-               if ((copy = end - offset) > 0) {
-                       unsigned int csum2;
-                       u8 *vaddr;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-                       if (copy > len)
-                               copy = len;
-                       vaddr = kmap_skb_frag(frag);
-                       csum2 = csum_partial_copy_nocheck(vaddr +
-                                                         frag->page_offset +
-                                                         offset - start, to,
-                                                         copy, 0);
-                       kunmap_skb_frag(vaddr);
-                       csum = csum_block_add(csum, csum2, pos);
-                       if (!(len -= copy))
-                               return csum;
-                       offset += copy;
-                       to     += copy;
-                       pos    += copy;
-               }
-               start = end;
-       }
-
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-               for (; list; list = list->next) {
-                       unsigned int csum2;
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               csum2 = skb_copy_and_csum_bits(list,
-                                                              offset - start,
-                                                              to, copy, 0);
-                               csum = csum_block_add(csum, csum2, pos);
-                               if ((len -= copy) == 0)
-                                       return csum;
-                               offset += copy;
-                               to     += copy;
-                               pos    += copy;
-                       }
-                       start = end;
-               }
-       }
-       if (len)
-               BUG();
-       return csum;
-}
-
-void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
-{
-       unsigned int csum;
-       long csstart;
-
-       if (skb->ip_summed == CHECKSUM_HW)
-               csstart = skb->h.raw - skb->data;
-       else
-               csstart = skb_headlen(skb);
-
-       if (csstart > skb_headlen(skb))
-               BUG();
-
-       memcpy(to, skb->data, csstart);
-
-       csum = 0;
-       if (csstart != skb->len)
-               csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
-                                             skb->len - csstart, 0);
-
-       if (skb->ip_summed == CHECKSUM_HW) {
-               long csstuff = csstart + skb->csum;
-
-               *((unsigned short *)(to + csstuff)) = csum_fold(csum);
-       }
-}
-
-/**
- *     skb_dequeue - remove from the head of the queue
- *     @list: list to dequeue from
- *
- *     Remove the head of the list. The list lock is taken so the function
- *     may be used safely with other locking list functions. The head item is
- *     returned or %NULL if the list is empty.
- */
-
-struct sk_buff *skb_dequeue(struct sk_buff_head *list)
-{
-       unsigned long flags;
-       struct sk_buff *result;
-
-       spin_lock_irqsave(&list->lock, flags);
-       result = __skb_dequeue(list);
-       spin_unlock_irqrestore(&list->lock, flags);
-       return result;
-}
-
-/**
- *     skb_dequeue_tail - remove from the tail of the queue
- *     @list: list to dequeue from
- *
- *     Remove the tail of the list. The list lock is taken so the function
- *     may be used safely with other locking list functions. The tail item is
- *     returned or %NULL if the list is empty.
- */
-struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
-{
-       unsigned long flags;
-       struct sk_buff *result;
-
-       spin_lock_irqsave(&list->lock, flags);
-       result = __skb_dequeue_tail(list);
-       spin_unlock_irqrestore(&list->lock, flags);
-       return result;
-}
-
-/**
- *     skb_queue_purge - empty a list
- *     @list: list to empty
- *
- *     Delete all buffers on an &sk_buff list. Each buffer is removed from
- *     the list and one reference dropped. This function takes the list
- *     lock and is atomic with respect to other list locking functions.
- */
-void skb_queue_purge(struct sk_buff_head *list)
-{
-       struct sk_buff *skb;
-       while ((skb = skb_dequeue(list)) != NULL)
-               kfree_skb(skb);
-}
-
-/**
- *     skb_queue_head - queue a buffer at the list head
- *     @list: list to use
- *     @newsk: buffer to queue
- *
- *     Queue a buffer at the start of the list. This function takes the
- *     list lock and can be used safely with other locking &sk_buff functions
- *     safely.
- *
- *     A buffer cannot be placed on two lists at the same time.
- */
-void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&list->lock, flags);
-       __skb_queue_head(list, newsk);
-       spin_unlock_irqrestore(&list->lock, flags);
-}
-
-/**
- *     skb_queue_tail - queue a buffer at the list tail
- *     @list: list to use
- *     @newsk: buffer to queue
- *
- *     Queue a buffer at the tail of the list. This function takes the
- *     list lock and can be used safely with other locking &sk_buff functions
- *     safely.
- *
- *     A buffer cannot be placed on two lists at the same time.
- */
-void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&list->lock, flags);
-       __skb_queue_tail(list, newsk);
-       spin_unlock_irqrestore(&list->lock, flags);
-}
-/**
- *     skb_unlink      -       remove a buffer from a list
- *     @skb: buffer to remove
- *
- *     Place a packet after a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls
- *
- *     Works even without knowing the list it is sitting on, which can be
- *     handy at times. It also means that THE LIST MUST EXIST when you
- *     unlink. Thus a list must have its contents unlinked before it is
- *     destroyed.
- */
-void skb_unlink(struct sk_buff *skb)
-{
-       struct sk_buff_head *list = skb->list;
-
-       if (list) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&list->lock, flags);
-               if (skb->list == list)
-                       __skb_unlink(skb, skb->list);
-               spin_unlock_irqrestore(&list->lock, flags);
-       }
-}
-
-
-/**
- *     skb_append      -       append a buffer
- *     @old: buffer to insert after
- *     @newsk: buffer to insert
- *
- *     Place a packet after a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls.
- *     A buffer cannot be placed on two lists at the same time.
- */
-
-void skb_append(struct sk_buff *old, struct sk_buff *newsk)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&old->list->lock, flags);
-       __skb_append(old, newsk);
-       spin_unlock_irqrestore(&old->list->lock, flags);
-}
-
-
-/**
- *     skb_insert      -       insert a buffer
- *     @old: buffer to insert before
- *     @newsk: buffer to insert
- *
- *     Place a packet before a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls
- *     A buffer cannot be placed on two lists at the same time.
- */
-
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&old->list->lock, flags);
-       __skb_insert(newsk, old->prev, old, old->list);
-       spin_unlock_irqrestore(&old->list->lock, flags);
-}
-
-#if 0
-/*
- *     Tune the memory allocator for a new MTU size.
- */
-void skb_add_mtu(int mtu)
-{
-       /* Must match allocation in alloc_skb */
-       mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
-
-       kmem_add_cache_size(mtu);
-}
-#endif
-
-static void inline skb_split_inside_header(struct sk_buff *skb,
-                                          struct sk_buff* skb1,
-                                          const u32 len, const int pos)
-{
-       int i;
-
-       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
-
-       /* And move data appendix as is. */
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-               skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
-
-       skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
-       skb_shinfo(skb)->nr_frags  = 0;
-       skb1->data_len             = skb->data_len;
-       skb1->len                  += skb1->data_len;
-       skb->data_len              = 0;
-       skb->len                   = len;
-       skb->tail                  = skb->data + len;
-}
-
-static void inline skb_split_no_header(struct sk_buff *skb,
-                                      struct sk_buff* skb1,
-                                      const u32 len, int pos)
-{
-       int i, k = 0;
-       const int nfrags = skb_shinfo(skb)->nr_frags;
-
-       skb_shinfo(skb)->nr_frags = 0;
-       skb1->len                 = skb1->data_len = skb->len - len;
-       skb->len                  = len;
-       skb->data_len             = len - pos;
-
-       for (i = 0; i < nfrags; i++) {
-               int size = skb_shinfo(skb)->frags[i].size;
-
-               if (pos + size > len) {
-                       skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
-
-                       if (pos < len) {
-                               /* Split frag.
-                                * We have to variants in this case:
-                                * 1. Move all the frag to the second
-                                *    part, if it is possible. F.e.
-                                *    this approach is mandatory for TUX,
-                                *    where splitting is expensive.
-                                * 2. Split is accurately. We make this.
-                                */
-                               get_page(skb_shinfo(skb)->frags[i].page);
-                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
-                               skb_shinfo(skb1)->frags[0].size -= len - pos;
-                               skb_shinfo(skb)->frags[i].size  = len - pos;
-                               skb_shinfo(skb)->nr_frags++;
-                       }
-                       k++;
-               } else
-                       skb_shinfo(skb)->nr_frags++;
-               pos += size;
-       }
-       skb_shinfo(skb1)->nr_frags = k;
-}
-
-/**
- * skb_split - Split fragmented skb to two parts at length len.
- */
-void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
-{
-       int pos = skb_headlen(skb);
-
-       if (len < pos)  /* Split line is inside header. */
-               skb_split_inside_header(skb, skb1, len, pos);
-       else            /* Second chunk has no header, nothing to copy. */
-               skb_split_no_header(skb, skb1, len, pos);
-}
-
-void __init skb_init(void)
-{
-       skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
-                                             sizeof(struct sk_buff),
-                                             0,
-                                             SLAB_HWCACHE_ALIGN,
-                                             NULL, NULL);
-       if (!skbuff_head_cache)
-               panic("cannot create skbuff cache");
-}
-
-EXPORT_SYMBOL(___pskb_trim);
-EXPORT_SYMBOL(__kfree_skb);
-EXPORT_SYMBOL(__pskb_pull_tail);
-EXPORT_SYMBOL(alloc_skb);
-EXPORT_SYMBOL(pskb_copy);
-EXPORT_SYMBOL(pskb_expand_head);
-EXPORT_SYMBOL(skb_checksum);
-EXPORT_SYMBOL(skb_clone);
-EXPORT_SYMBOL(skb_clone_fraglist);
-EXPORT_SYMBOL(skb_copy);
-EXPORT_SYMBOL(skb_copy_and_csum_bits);
-EXPORT_SYMBOL(skb_copy_and_csum_dev);
-EXPORT_SYMBOL(skb_copy_bits);
-EXPORT_SYMBOL(skb_copy_expand);
-EXPORT_SYMBOL(skb_over_panic);
-EXPORT_SYMBOL(skb_pad);
-EXPORT_SYMBOL(skb_realloc_headroom);
-EXPORT_SYMBOL(skb_under_panic);
-EXPORT_SYMBOL(skb_dequeue);
-EXPORT_SYMBOL(skb_dequeue_tail);
-EXPORT_SYMBOL(skb_insert);
-EXPORT_SYMBOL(skb_queue_purge);
-EXPORT_SYMBOL(skb_queue_head);
-EXPORT_SYMBOL(skb_queue_tail);
-EXPORT_SYMBOL(skb_unlink);
-EXPORT_SYMBOL(skb_append);
-EXPORT_SYMBOL(skb_split);
-EXPORT_SYMBOL(skb_iter_first);
-EXPORT_SYMBOL(skb_iter_next);
-EXPORT_SYMBOL(skb_iter_abort);
diff --git a/linux-2.6.8.1-xen-sparse/net/ipv4/raw.c b/linux-2.6.8.1-xen-sparse/net/ipv4/raw.c
deleted file mode 100644 (file)
index 8cc0ea6..0000000
+++ /dev/null
@@ -1,837 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             RAW - implementation of IP "raw" sockets.
- *
- * Version:    $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
- *
- * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
- *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * Fixes:
- *             Alan Cox        :       verify_area() fixed up
- *             Alan Cox        :       ICMP error handling
- *             Alan Cox        :       EMSGSIZE if you send too big a packet
- *             Alan Cox        :       Now uses generic datagrams and shared
- *                                     skbuff library. No more peek crashes,
- *                                     no more backlogs
- *             Alan Cox        :       Checks sk->broadcast.
- *             Alan Cox        :       Uses skb_free_datagram/skb_copy_datagram
- *             Alan Cox        :       Raw passes ip options too
- *             Alan Cox        :       Setsocketopt added
- *             Alan Cox        :       Fixed error return for broadcasts
- *             Alan Cox        :       Removed wake_up calls
- *             Alan Cox        :       Use ttl/tos
- *             Alan Cox        :       Cleaned up old debugging
- *             Alan Cox        :       Use new kernel side addresses
- *     Arnt Gulbrandsen        :       Fixed MSG_DONTROUTE in raw sockets.
- *             Alan Cox        :       BSD style RAW socket demultiplexing.
- *             Alan Cox        :       Beginnings of mrouted support.
- *             Alan Cox        :       Added IP_HDRINCL option.
- *             Alan Cox        :       Skip broadcast check if BSDism set.
- *             David S. Miller :       New socket lookup architecture.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h> 
-#include <asm/atomic.h>
-#include <asm/byteorder.h>
-#include <asm/current.h>
-#include <asm/uaccess.h>
-#include <asm/ioctls.h>
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/aio.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/sockios.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/mroute.h>
-#include <linux/netdevice.h>
-#include <linux/in_route.h>
-#include <linux/route.h>
-#include <linux/tcp.h>
-#include <linux/skbuff.h>
-#include <net/dst.h>
-#include <net/sock.h>
-#include <linux/gfp.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <net/ip.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/raw.h>
-#include <net/snmp.h>
-#include <net/inet_common.h>
-#include <net/checksum.h>
-#include <net/xfrm.h>
-#include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-
-struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE];
-rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED;
-
-static void raw_v4_hash(struct sock *sk)
-{
-       struct hlist_head *head = &raw_v4_htable[inet_sk(sk)->num &
-                                                (RAWV4_HTABLE_SIZE - 1)];
-
-       write_lock_bh(&raw_v4_lock);
-       sk_add_node(sk, head);
-       sock_prot_inc_use(sk->sk_prot);
-       write_unlock_bh(&raw_v4_lock);
-}
-
-static void raw_v4_unhash(struct sock *sk)
-{
-       write_lock_bh(&raw_v4_lock);
-       if (sk_del_node_init(sk))
-               sock_prot_dec_use(sk->sk_prot);
-       write_unlock_bh(&raw_v4_lock);
-}
-
-struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
-                            unsigned long raddr, unsigned long laddr,
-                            int dif)
-{
-       struct hlist_node *node;
-
-       sk_for_each_from(sk, node) {
-               struct inet_opt *inet = inet_sk(sk);
-
-               if (inet->num == num                                    &&
-                   !(inet->daddr && inet->daddr != raddr)              &&
-                   !(inet->rcv_saddr && inet->rcv_saddr != laddr)      &&
-                   !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
-                       goto found; /* gotcha */
-       }
-       sk = NULL;
-found:
-       return sk;
-}
-
-/*
- *     0 - deliver
- *     1 - block
- */
-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
-{
-       int type;
-
-       if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
-               return 1;
-
-       type = skb->h.icmph->type;
-       if (type < 32) {
-               __u32 data = raw4_sk(sk)->filter.data;
-
-               return ((1 << type) & data) != 0;
-       }
-
-       /* Do not block unknown ICMP types */
-       return 0;
-}
-
-/* IP input processing comes here for RAW socket delivery.
- * Caller owns SKB, so we must make clones.
- *
- * RFC 1122: SHOULD pass TOS value up to the transport layer.
- * -> It does. And not only TOS, but all IP header.
- */
-void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
-{
-       struct sock *sk;
-       struct hlist_head *head;
-
-       read_lock(&raw_v4_lock);
-       head = &raw_v4_htable[hash];
-       if (hlist_empty(head))
-               goto out;
-       sk = __raw_v4_lookup(__sk_head(head), iph->protocol,
-                            iph->saddr, iph->daddr,
-                            skb->dev->ifindex);
-
-       while (sk) {
-               if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
-                       struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
-
-                       /* Not releasing hash table! */
-                       if (clone)
-                               raw_rcv(sk, clone);
-               }
-               sk = __raw_v4_lookup(sk_next(sk), iph->protocol,
-                                    iph->saddr, iph->daddr,
-                                    skb->dev->ifindex);
-       }
-out:
-       read_unlock(&raw_v4_lock);
-}
-
-void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
-{
-       struct inet_opt *inet = inet_sk(sk);
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
-       int err = 0;
-       int harderr = 0;
-
-       /* Report error on raw socket, if:
-          1. User requested ip_recverr.
-          2. Socket is connected (otherwise the error indication
-             is useless without ip_recverr and error is hard.
-        */
-       if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
-               return;
-
-       switch (type) {
-       default:
-       case ICMP_TIME_EXCEEDED:
-               err = EHOSTUNREACH;
-               break;
-       case ICMP_SOURCE_QUENCH:
-               return;
-       case ICMP_PARAMETERPROB:
-               err = EPROTO;
-               harderr = 1;
-               break;
-       case ICMP_DEST_UNREACH:
-               err = EHOSTUNREACH;
-               if (code > NR_ICMP_UNREACH)
-                       break;
-               err = icmp_err_convert[code].errno;
-               harderr = icmp_err_convert[code].fatal;
-               if (code == ICMP_FRAG_NEEDED) {
-                       harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
-                       err = EMSGSIZE;
-               }
-       }
-
-       if (inet->recverr) {
-               struct iphdr *iph = (struct iphdr*)skb->data;
-               u8 *payload = skb->data + (iph->ihl << 2);
-
-               if (inet->hdrincl)
-                       payload = skb->data;
-               ip_icmp_error(sk, skb, err, 0, info, payload);
-       }
-
-       if (inet->recverr || harderr) {
-               sk->sk_err = err;
-               sk->sk_error_report(sk);
-       }
-}
-
-static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
-{
-       /* Charge it to the socket. */
-       
-       if (sock_queue_rcv_skb(sk, skb) < 0) {
-               /* FIXME: increment a raw drops counter here */
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       return NET_RX_SUCCESS;
-}
-
-int raw_rcv(struct sock *sk, struct sk_buff *skb)
-{
-       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       skb_push(skb, skb->data - skb->nh.raw);
-
-       raw_rcv_skb(sk, skb);
-       return 0;
-}
-
-static int raw_send_hdrinc(struct sock *sk, void *from, int length,
-                       struct rtable *rt, 
-                       unsigned int flags)
-{
-       struct inet_opt *inet = inet_sk(sk);
-       int hh_len;
-       struct iphdr *iph;
-       struct sk_buff *skb;
-       int err;
-
-       if (length > rt->u.dst.dev->mtu) {
-               ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
-                              rt->u.dst.dev->mtu);
-               return -EMSGSIZE;
-       }
-       if (flags&MSG_PROBE)
-               goto out;
-
-       hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
-
-       skb = sock_alloc_send_skb(sk, length+hh_len+15,
-                                 flags&MSG_DONTWAIT, &err);
-       if (skb == NULL)
-               goto error; 
-       skb_reserve(skb, hh_len);
-
-       skb->priority = sk->sk_priority;
-       skb->dst = dst_clone(&rt->u.dst);
-
-       skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
-
-       skb->ip_summed = CHECKSUM_NONE;
-
-       skb->h.raw = skb->nh.raw;
-       err = memcpy_fromiovecend((void *)iph, from, 0, length);
-       if (err)
-               goto error_fault;
-
-       /* We don't modify invalid header */
-       if (length >= sizeof(*iph) && iph->ihl * 4 <= length) {
-               if (!iph->saddr)
-                       iph->saddr = rt->rt_src;
-               iph->check   = 0;
-               iph->tot_len = htons(length);
-               if (!iph->id)
-                       ip_select_ident(iph, &rt->u.dst, NULL);
-
-               iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-       }
-
-       err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
-                     dst_output);
-       if (err > 0)
-               err = inet->recverr ? net_xmit_errno(err) : 0;
-       if (err)
-               goto error;
-out:
-       return 0;
-
-error_fault:
-       err = -EFAULT;
-       kfree_skb(skb);
-error:
-       IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
-       return err; 
-}
-
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len)
-{
-       struct inet_opt *inet = inet_sk(sk);
-       struct ipcm_cookie ipc;
-       struct rtable *rt = NULL;
-       int free = 0;
-       u32 daddr;
-       u32 saddr;
-       u8  tos;
-       int err;
-
-       err = -EMSGSIZE;
-       if (len < 0 || len > 0xFFFF)
-               goto out;
-
-       /*
-        *      Check the flags.
-        */
-
-       err = -EOPNOTSUPP;
-       if (msg->msg_flags & MSG_OOB)   /* Mirror BSD error message */
-               goto out;               /* compatibility */
-                        
-       /*
-        *      Get and verify the address. 
-        */
-
-       if (msg->msg_namelen) {
-               struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
-               err = -EINVAL;
-               if (msg->msg_namelen < sizeof(*usin))
-                       goto out;
-               if (usin->sin_family != AF_INET) {
-                       static int complained;
-                       if (!complained++)
-                               printk(KERN_INFO "%s forgot to set AF_INET in "
-                                                "raw sendmsg. Fix it!\n",
-                                                current->comm);
-                       err = -EINVAL;
-                       if (usin->sin_family)
-                               goto out;
-               }
-               daddr = usin->sin_addr.s_addr;
-               /* ANK: I did not forget to get protocol from port field.
-                * I just do not know, who uses this weirdness.
-                * IP_HDRINCL is much more convenient.
-                */
-       } else {
-               err = -EDESTADDRREQ;
-               if (sk->sk_state != TCP_ESTABLISHED) 
-                       goto out;
-               daddr = inet->daddr;
-       }
-
-       ipc.addr = inet->saddr;
-       ipc.opt = NULL;
-       ipc.oif = sk->sk_bound_dev_if;
-
-       if (msg->msg_controllen) {
-               err = ip_cmsg_send(msg, &ipc);
-               if (err)
-                       goto out;
-               if (ipc.opt)
-                       free = 1;
-       }
-
-       saddr = ipc.addr;
-       ipc.addr = daddr;
-
-       if (!ipc.opt)
-               ipc.opt = inet->opt;
-
-       if (ipc.opt) {
-               err = -EINVAL;
-               /* Linux does not mangle headers on raw sockets,
-                * so that IP options + IP_HDRINCL is non-sense.
-                */
-               if (inet->hdrincl)
-                       goto done;
-               if (ipc.opt->srr) {
-                       if (!daddr)
-                               goto done;
-                       daddr = ipc.opt->faddr;
-               }
-       }
-       tos = RT_TOS(inet->tos) | sk->sk_localroute;
-       if (msg->msg_flags & MSG_DONTROUTE)
-               tos |= RTO_ONLINK;
-
-       if (MULTICAST(daddr)) {
-               if (!ipc.oif)
-                       ipc.oif = inet->mc_index;
-               if (!saddr)
-                       saddr = inet->mc_addr;
-       }
-
-       {
-               struct flowi fl = { .oif = ipc.oif,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = daddr,
-                                               .saddr = saddr,
-                                               .tos = tos } },
-                                   .proto = inet->hdrincl ? IPPROTO_RAW :
-                                                            sk->sk_protocol,
-                                 };
-               err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
-       }
-       if (err)
-               goto done;
-
-       err = -EACCES;
-       if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
-               goto done;
-
-       if (msg->msg_flags & MSG_CONFIRM)
-               goto do_confirm;
-back_from_confirm:
-
-       if (inet->hdrincl)
-               err = raw_send_hdrinc(sk, msg->msg_iov, len, 
-                                       rt, msg->msg_flags);
-       
-        else {
-               if (!ipc.addr)
-                       ipc.addr = rt->rt_dst;
-               lock_sock(sk);
-               err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
-                                       &ipc, rt, msg->msg_flags);
-               if (err)
-                       ip_flush_pending_frames(sk);
-               else if (!(msg->msg_flags & MSG_MORE))
-                       err = ip_push_pending_frames(sk);
-               release_sock(sk);
-       }
-done:
-       if (free)
-               kfree(ipc.opt);
-       ip_rt_put(rt);
-
-out:   return err < 0 ? err : len;
-
-do_confirm:
-       dst_confirm(&rt->u.dst);
-       if (!(msg->msg_flags & MSG_PROBE) || len)
-               goto back_from_confirm;
-       err = 0;
-       goto done;
-}
-
-static void raw_close(struct sock *sk, long timeout)
-{
-        /*
-        * Raw sockets may have direct kernel refereneces. Kill them.
-        */
-       ip_ra_control(sk, 0, NULL);
-
-       sk_common_release(sk);
-}
-
-/* This gets rid of all the nasties in af_inet. -DaveM */
-static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
-       struct inet_opt *inet = inet_sk(sk);
-       struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
-       int ret = -EINVAL;
-       int chk_addr_ret;
-
-       if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
-               goto out;
-       chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
-       ret = -EADDRNOTAVAIL;
-       if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
-           chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
-               goto out;
-       inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
-       if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
-               inet->saddr = 0;  /* Use device */
-       sk_dst_reset(sk);
-       ret = 0;
-out:   return ret;
-}
-
-/*
- *     This should be easy, if there is something there
- *     we return it, otherwise we block.
- */
-
-int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len, int noblock, int flags, int *addr_len)
-{
-       struct inet_opt *inet = inet_sk(sk);
-       size_t copied = 0;
-       int err = -EOPNOTSUPP;
-       struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
-       struct sk_buff *skb;
-
-       if (flags & MSG_OOB)
-               goto out;
-
-       if (addr_len)
-               *addr_len = sizeof(*sin);
-
-       if (flags & MSG_ERRQUEUE) {
-               err = ip_recv_error(sk, msg, len);
-               goto out;
-       }
-
-       skb = skb_recv_datagram(sk, flags, noblock, &err);
-       if (!skb)
-               goto out;
-
-       copied = skb->len;
-       if (len < copied) {
-               msg->msg_flags |= MSG_TRUNC;
-               copied = len;
-       }
-
-       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
-       if (err)
-               goto done;
-
-       sock_recv_timestamp(msg, sk, skb);
-
-       /* Copy the address. */
-       if (sin) {
-               sin->sin_family = AF_INET;
-               sin->sin_addr.s_addr = skb->nh.iph->saddr;
-               memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
-       }
-       if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
-       if (flags & MSG_TRUNC)
-               copied = skb->len;
-done:
-       skb_free_datagram(sk, skb);
-out:   return err ? err : copied;
-}
-
-static int raw_init(struct sock *sk)
-{
-       struct raw_opt *tp = raw4_sk(sk);
-       if (inet_sk(sk)->num == IPPROTO_ICMP)
-               memset(&tp->filter, 0, sizeof(tp->filter));
-       return 0;
-}
-
-static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
-{
-       if (optlen > sizeof(struct icmp_filter))
-               optlen = sizeof(struct icmp_filter);
-       if (copy_from_user(&raw4_sk(sk)->filter, optval, optlen))
-               return -EFAULT;
-       return 0;
-}
-
-static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
-{
-       int len, ret = -EFAULT;
-
-       if (get_user(len, optlen))
-               goto out;
-       ret = -EINVAL;
-       if (len < 0)
-               goto out;
-       if (len > sizeof(struct icmp_filter))
-               len = sizeof(struct icmp_filter);
-       ret = -EFAULT;
-       if (put_user(len, optlen) ||
-           copy_to_user(optval, &raw4_sk(sk)->filter, len))
-               goto out;
-       ret = 0;
-out:   return ret;
-}
-
-static int raw_setsockopt(struct sock *sk, int level, int optname, 
-                         char __user *optval, int optlen)
-{
-       if (level != SOL_RAW)
-               return ip_setsockopt(sk, level, optname, optval, optlen);
-
-       if (optname == ICMP_FILTER) {
-               if (inet_sk(sk)->num != IPPROTO_ICMP)
-                       return -EOPNOTSUPP;
-               else
-                       return raw_seticmpfilter(sk, optval, optlen);
-       }
-       return -ENOPROTOOPT;
-}
-
-static int raw_getsockopt(struct sock *sk, int level, int optname, 
-                         char __user *optval, int __user *optlen)
-{
-       if (level != SOL_RAW)
-               return ip_getsockopt(sk, level, optname, optval, optlen);
-
-       if (optname == ICMP_FILTER) {
-               if (inet_sk(sk)->num != IPPROTO_ICMP)
-                       return -EOPNOTSUPP;
-               else
-                       return raw_geticmpfilter(sk, optval, optlen);
-       }
-       return -ENOPROTOOPT;
-}
-
-static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
-{
-       switch (cmd) {
-               case SIOCOUTQ: {
-                       int amount = atomic_read(&sk->sk_wmem_alloc);
-                       return put_user(amount, (int __user *)arg);
-               }
-               case SIOCINQ: {
-                       struct sk_buff *skb;
-                       int amount = 0;
-
-                       spin_lock_irq(&sk->sk_receive_queue.lock);
-                       skb = skb_peek(&sk->sk_receive_queue);
-                       if (skb != NULL)
-                               amount = skb->len;
-                       spin_unlock_irq(&sk->sk_receive_queue.lock);
-                       return put_user(amount, (int __user *)arg);
-               }
-
-               default:
-#ifdef CONFIG_IP_MROUTE
-                       return ipmr_ioctl(sk, cmd, (void __user *)arg);
-#else
-                       return -ENOIOCTLCMD;
-#endif
-       }
-}
-
-struct proto raw_prot = {
-       .name =         "RAW",
-       .close =        raw_close,
-       .connect =      ip4_datagram_connect,
-       .disconnect =   udp_disconnect,
-       .ioctl =        raw_ioctl,
-       .init =         raw_init,
-       .setsockopt =   raw_setsockopt,
-       .getsockopt =   raw_getsockopt,
-       .sendmsg =      raw_sendmsg,
-       .recvmsg =      raw_recvmsg,
-       .bind =         raw_bind,
-       .backlog_rcv =  raw_rcv_skb,
-       .hash =         raw_v4_hash,
-       .unhash =       raw_v4_unhash,
-};
-
-#ifdef CONFIG_PROC_FS
-struct raw_iter_state {
-       int bucket;
-};
-
-#define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private)
-
-static struct sock *raw_get_first(struct seq_file *seq)
-{
-       struct sock *sk;
-       struct raw_iter_state* state = raw_seq_private(seq);
-
-       for (state->bucket = 0; state->bucket < RAWV4_HTABLE_SIZE; ++state->bucket) {
-               struct hlist_node *node;
-
-               sk_for_each(sk, node, &raw_v4_htable[state->bucket])
-                       if (sk->sk_family == PF_INET)
-                               goto found;
-       }
-       sk = NULL;
-found:
-       return sk;
-}
-
-static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
-{
-       struct raw_iter_state* state = raw_seq_private(seq);
-
-       do {
-               sk = sk_next(sk);
-try_again:
-               ;
-       } while (sk && sk->sk_family != PF_INET);
-
-       if (!sk && ++state->bucket < RAWV4_HTABLE_SIZE) {
-               sk = sk_head(&raw_v4_htable[state->bucket]);
-               goto try_again;
-       }
-       return sk;
-}
-
-static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
-{
-       struct sock *sk = raw_get_first(seq);
-
-       if (sk)
-               while (pos && (sk = raw_get_next(seq, sk)) != NULL)
-                       --pos;
-       return pos ? NULL : sk;
-}
-
-static void *raw_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       read_lock(&raw_v4_lock);
-       return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
-}
-
-static void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       struct sock *sk;
-
-       if (v == SEQ_START_TOKEN)
-               sk = raw_get_first(seq);
-       else
-               sk = raw_get_next(seq, v);
-       ++*pos;
-       return sk;
-}
-
-static void raw_seq_stop(struct seq_file *seq, void *v)
-{
-       read_unlock(&raw_v4_lock);
-}
-
-static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
-{
-       struct inet_opt *inet = inet_sk(sp);
-       unsigned int dest = inet->daddr,
-                    src = inet->rcv_saddr;
-       __u16 destp = 0,
-             srcp  = inet->num;
-
-       sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
-               " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
-               i, src, srcp, dest, destp, sp->sk_state, 
-               atomic_read(&sp->sk_wmem_alloc),
-               atomic_read(&sp->sk_rmem_alloc),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
-               atomic_read(&sp->sk_refcnt), sp);
-       return tmpbuf;
-}
-
-static int raw_seq_show(struct seq_file *seq, void *v)
-{
-       char tmpbuf[129];
-
-       if (v == SEQ_START_TOKEN)
-               seq_printf(seq, "%-127s\n",
-                              "  sl  local_address rem_address   st tx_queue "
-                              "rx_queue tr tm->when retrnsmt   uid  timeout "
-                              "inode");
-       else {
-               struct raw_iter_state *state = raw_seq_private(seq);
-
-               seq_printf(seq, "%-127s\n",
-                          get_raw_sock(v, tmpbuf, state->bucket));
-       }
-       return 0;
-}
-
-static struct seq_operations raw_seq_ops = {
-       .start = raw_seq_start,
-       .next  = raw_seq_next,
-       .stop  = raw_seq_stop,
-       .show  = raw_seq_show,
-};
-
-static int raw_seq_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq;
-       int rc = -ENOMEM;
-       struct raw_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
-
-       if (!s)
-               goto out;
-       rc = seq_open(file, &raw_seq_ops);
-       if (rc)
-               goto out_kfree;
-
-       seq = file->private_data;
-       seq->private = s;
-       memset(s, 0, sizeof(*s));
-out:
-       return rc;
-out_kfree:
-       kfree(s);
-       goto out;
-}
-
-static struct file_operations raw_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = raw_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release_private,
-};
-
-int __init raw_proc_init(void)
-{
-       if (!proc_net_fops_create("raw", S_IRUGO, &raw_seq_fops))
-               return -ENOMEM;
-       return 0;
-}
-
-void __init raw_proc_exit(void)
-{
-       proc_net_remove("raw");
-}
-#endif /* CONFIG_PROC_FS */
diff --git a/linux-2.6.9-patches/agpgart.patch b/linux-2.6.9-patches/agpgart.patch
new file mode 100644 (file)
index 0000000..aba8b20
--- /dev/null
@@ -0,0 +1,346 @@
+--- linux-2.6.8.1/drivers/char/agp/ali-agp.c   2004-08-14 11:55:35.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/ali-agp.c      2004-09-05 05:55:58.876495340 +0100
+@@ -150,7 +150,7 @@
+       pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+       pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+                       (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+-                        virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
++                        virt_to_bus(addr)) | ALI_CACHE_FLUSH_EN ));
+       return addr;
+ }
+@@ -174,7 +174,7 @@
+       pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+       pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+                       (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+-                        virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
++                        virt_to_bus(addr)) | ALI_CACHE_FLUSH_EN));
+       agp_generic_destroy_page(addr);
+ }
+--- linux-2.6.8.1/drivers/char/agp/amd-k7-agp.c        2004-08-14 11:56:24.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/amd-k7-agp.c   2004-09-05 05:55:58.877495108 +0100
+@@ -43,7 +43,7 @@
+       SetPageReserved(virt_to_page(page_map->real));
+       global_cache_flush();
+-      page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
++      page_map->remapped = ioremap_nocache(virt_to_bus(page_map->real),
+                                           PAGE_SIZE);
+       if (page_map->remapped == NULL) {
+               ClearPageReserved(virt_to_page(page_map->real));
+@@ -152,7 +152,7 @@
+       agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+       agp_bridge->gatt_table = (u32 *)page_dir.remapped;
+-      agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
++      agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
+       /* Get the address for the gart region.
+        * This is a bus address even on the alpha, b/c its
+@@ -166,7 +166,7 @@
+       /* Calculate the agp offset */
+       for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+               page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
+-                      virt_to_phys(amd_irongate_private.gatt_pages[i]->real);
++                      virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
+               page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
+       }
+--- linux-2.6.8.1/drivers/char/agp/amd64-agp.c 2004-08-14 11:55:47.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/amd64-agp.c    2004-09-05 05:55:58.877495108 +0100
+@@ -212,7 +212,7 @@
+ static int amd_8151_configure(void)
+ {
+-      unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
++      unsigned long gatt_bus = virt_to_bus(agp_bridge->gatt_table_real);
+       /* Configure AGP regs in each x86-64 host bridge. */
+       for_each_nb() {
+@@ -521,7 +521,7 @@
+ {
+       struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+-      release_mem_region(virt_to_phys(bridge->gatt_table_real),
++      release_mem_region(virt_to_bus(bridge->gatt_table_real),
+                          amd64_aperture_sizes[bridge->aperture_size_idx].size);
+       agp_remove_bridge(bridge);
+       agp_put_bridge(bridge);
+--- linux-2.6.8.1/drivers/char/agp/ati-agp.c   2004-08-14 11:55:48.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/ati-agp.c      2004-09-05 05:55:58.877495108 +0100
+@@ -64,7 +64,7 @@
+       /* CACHE_FLUSH(); */
+       global_cache_flush();
+-      page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
++      page_map->remapped = ioremap_nocache(virt_to_bus(page_map->real),
+                                           PAGE_SIZE);
+       if (page_map->remapped == NULL || err) {
+               ClearPageReserved(virt_to_page(page_map->real));
+--- linux-2.6.8.1/drivers/char/agp/backend.c   2004-08-14 11:55:47.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/backend.c      2004-09-05 05:55:58.878494876 +0100
+@@ -142,7 +142,7 @@
+                       return -ENOMEM;
+               }
+-              bridge->scratch_page_real = virt_to_phys(addr);
++              bridge->scratch_page_real = virt_to_bus(addr);
+               bridge->scratch_page =
+                   bridge->driver->mask_memory(bridge->scratch_page_real, 0);
+       }
+@@ -186,7 +186,7 @@
+ err_out:
+       if (bridge->driver->needs_scratch_page)
+               bridge->driver->agp_destroy_page(
+-                              phys_to_virt(bridge->scratch_page_real));
++                              bus_to_virt(bridge->scratch_page_real));
+       if (got_gatt)
+               bridge->driver->free_gatt_table();
+       if (got_keylist) {
+@@ -211,7 +211,7 @@
+       if (bridge->driver->agp_destroy_page &&
+           bridge->driver->needs_scratch_page)
+               bridge->driver->agp_destroy_page(
+-                              phys_to_virt(bridge->scratch_page_real));
++                              bus_to_virt(bridge->scratch_page_real));
+ }
+ static const drm_agp_t drm_agp = {
+--- linux-2.6.8.1/drivers/char/agp/generic.c   2004-08-14 11:55:10.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/generic.c      2004-09-05 05:55:58.879494644 +0100
+@@ -127,7 +127,7 @@
+       }
+       if (curr->page_count != 0) {
+               for (i = 0; i < curr->page_count; i++) {
+-                      agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
++                      agp_bridge->driver->agp_destroy_page(bus_to_virt(curr->memory[i]));
+               }
+       }
+       agp_free_key(curr->key);
+@@ -181,7 +181,7 @@
+                       return NULL;
+               }
+               new->memory[i] =
+-                      agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
++                      agp_bridge->driver->mask_memory(virt_to_bus(addr), type);
+               new->page_count++;
+       }
+@@ -636,6 +636,7 @@
+       int i;
+       void *temp;
+       struct page *page;
++      dma_addr_t dma;
+       /* The generic routines can't handle 2 level gatt's */
+       if (agp_bridge->driver->size_type == LVL2_APER_SIZE)
+@@ -674,8 +675,10 @@
+                               break;
+                       }
+-                      table = (char *) __get_free_pages(GFP_KERNEL,
+-                                                        page_order);
++                      table = dma_alloc_coherent(
++                                      &agp_bridge->dev->dev,
++                                      PAGE_SIZE << page_order, &dma,
++                                      GFP_KERNEL);
+                       if (table == NULL) {
+                               i++;
+@@ -706,7 +709,9 @@
+               size = ((struct aper_size_info_fixed *) temp)->size;
+               page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+               num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+-              table = (char *) __get_free_pages(GFP_KERNEL, page_order);
++              table = dma_alloc_coherent(
++                              &agp_bridge->dev->dev,
++                              PAGE_SIZE << page_order, &dma, GFP_KERNEL);
+       }
+       if (table == NULL)
+@@ -721,7 +726,7 @@
+       agp_gatt_table = (void *)table;
+       agp_bridge->driver->cache_flush();
+-      agp_bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
++      agp_bridge->gatt_table = ioremap_nocache(virt_to_bus(table),
+                                       (PAGE_SIZE * (1 << page_order)));
+       agp_bridge->driver->cache_flush();
+@@ -729,11 +734,12 @@
+               for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+                       ClearPageReserved(page);
+-              free_pages((unsigned long) table, page_order);
++              dma_free_coherent(&agp_bridge->dev->dev, PAGE_SIZE<<page_order,
++                                      table, dma);
+               return -ENOMEM;
+       }
+-      agp_bridge->gatt_bus_addr = virt_to_phys(agp_bridge->gatt_table_real);
++      agp_bridge->gatt_bus_addr = virt_to_bus(table);
+       /* AK: bogus, should encode addresses > 4GB */
+       for (i = 0; i < num_entries; i++)
+@@ -785,7 +791,8 @@
+       for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+               ClearPageReserved(page);
+-      free_pages((unsigned long) agp_bridge->gatt_table_real, page_order);
++      dma_free_coherent(&agp_bridge->dev->dev, PAGE_SIZE<<page_order,
++              agp_bridge->gatt_table_real, agp_bridge->gatt_bus_addr);
+       agp_gatt_table = NULL;
+       agp_bridge->gatt_table = NULL;
+--- linux-2.6.8.1/drivers/char/agp/hp-agp.c    2004-08-14 11:55:59.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/hp-agp.c       2004-09-05 05:55:58.879494644 +0100
+@@ -110,7 +110,7 @@
+       hp->gart_size = HP_ZX1_GART_SIZE;
+       hp->gatt_entries = hp->gart_size / hp->io_page_size;
+-      hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
++      hp->io_pdir = bus_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
+       hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+       if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
+@@ -248,7 +248,7 @@
+       agp_bridge->mode = INREG32(hp->lba_regs, hp->lba_cap_offset + PCI_AGP_STATUS);
+       if (hp->io_pdir_owner) {
+-              OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir));
++              OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_bus(hp->io_pdir));
+               OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
+               OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
+               OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
+--- linux-2.6.8.1/drivers/char/agp/i460-agp.c  2004-08-14 11:55:34.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/i460-agp.c     2004-09-05 05:55:58.879494644 +0100
+@@ -371,7 +371,7 @@
+       }
+       memset(lp->alloced_map, 0, map_size);
+-      lp->paddr = virt_to_phys(lpage);
++      lp->paddr = virt_to_bus(lpage);
+       lp->refcount = 0;
+       atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+       return 0;
+@@ -382,7 +382,7 @@
+       kfree(lp->alloced_map);
+       lp->alloced_map = NULL;
+-      free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
++      free_pages((unsigned long) bus_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+       atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+ }
+--- linux-2.6.8.1/drivers/char/agp/intel-agp.c 2004-08-14 11:55:32.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/intel-agp.c    2004-09-05 05:55:58.880494412 +0100
+@@ -285,7 +285,7 @@
+       if (new == NULL)
+               return NULL;
+-      new->memory[0] = virt_to_phys(addr);
++      new->memory[0] = virt_to_bus(addr);
+       if (pg_count == 4) {
+               /* kludge to get 4 physical pages for ARGB cursor */
+               new->memory[1] = new->memory[0] + PAGE_SIZE;
+@@ -328,10 +328,10 @@
+       agp_free_key(curr->key);
+       if(curr->type == AGP_PHYS_MEMORY) {
+               if (curr->page_count == 4)
+-                      i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
++                      i8xx_destroy_pages(bus_to_virt(curr->memory[0]));
+               else
+                       agp_bridge->driver->agp_destroy_page(
+-                               phys_to_virt(curr->memory[0]));
++                               bus_to_virt(curr->memory[0]));
+               vfree(curr->memory);
+       }
+       kfree(curr);
+--- linux-2.6.8.1/drivers/char/agp/intel-mch-agp.c     2004-08-14 11:54:49.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/intel-mch-agp.c        2004-09-05 05:55:58.880494412 +0100
+@@ -51,7 +51,7 @@
+       if (new == NULL)
+               return NULL;
+-      new->memory[0] = agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
++      new->memory[0] = agp_bridge->driver->mask_memory(virt_to_bus(addr), type);
+       new->page_count = 1;
+       new->num_scratch_pages = 1;
+       new->type = AGP_PHYS_MEMORY;
+@@ -63,7 +63,7 @@
+ {
+       agp_free_key(curr->key);
+       if(curr->type == AGP_PHYS_MEMORY) {
+-              agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[0]));
++              agp_bridge->driver->agp_destroy_page(bus_to_virt(curr->memory[0]));
+               vfree(curr->memory);
+       }
+       kfree(curr);
+--- linux-2.6.8.1/drivers/char/agp/sworks-agp.c        2004-08-14 11:55:10.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/sworks-agp.c   2004-09-05 05:55:58.881494180 +0100
+@@ -51,7 +51,7 @@
+       }
+       SetPageReserved(virt_to_page(page_map->real));
+       global_cache_flush();
+-      page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
++      page_map->remapped = ioremap_nocache(virt_to_bus(page_map->real), 
+                                           PAGE_SIZE);
+       if (page_map->remapped == NULL) {
+               ClearPageReserved(virt_to_page(page_map->real));
+@@ -164,7 +164,7 @@
+       for(i = 0; i < 1024; i++) {
+               serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge->scratch_page;
+               page_dir.remapped[i] =
+-                      virt_to_phys(serverworks_private.scratch_dir.real);
++                      virt_to_bus(serverworks_private.scratch_dir.real);
+               page_dir.remapped[i] |= 0x00000001;
+       }
+@@ -177,7 +177,7 @@
+       agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+       agp_bridge->gatt_table = (u32 *)page_dir.remapped;
+-      agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
++      agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
+       /* Get the address for the gart region.
+        * This is a bus address even on the alpha, b/c its
+@@ -191,7 +191,7 @@
+       for(i = 0; i < value->num_entries / 1024; i++) {
+               page_dir.remapped[i] =
+-                      virt_to_phys(serverworks_private.gatt_pages[i]->real);
++                      virt_to_bus(serverworks_private.gatt_pages[i]->real);
+               page_dir.remapped[i] |= 0x00000001;
+       }
+--- linux-2.6.8.1/drivers/char/agp/uninorth-agp.c      2004-08-14 11:55:32.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/agp/uninorth-agp.c 2004-09-05 05:55:58.881494180 +0100
+@@ -200,7 +200,7 @@
+       agp_bridge->gatt_table_real = (u32 *) table;
+       agp_bridge->gatt_table = (u32 *)table;
+-      agp_bridge->gatt_bus_addr = virt_to_phys(table);
++      agp_bridge->gatt_bus_addr = virt_to_bus(table);
+       for (i = 0; i < num_entries; i++) {
+               agp_bridge->gatt_table[i] =
+--- linux-2.6.8.1/include/asm-i386/agp.h       2004-08-14 11:54:47.000000000 +0100
++++ linux-2.6.8.1-xen0/include/asm-i386/agp.h  2004-09-05 05:57:26.040268956 +0100
+@@ -3,6 +3,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
++#include <asm/system.h>
+ /* 
+  * Functions to keep the agpgart mappings coherent with the MMU.
+@@ -19,6 +20,6 @@
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+    need to be called for each cacheline of the whole page so it may not be 
+    worth it. Would need a page for it. */
+-#define flush_agp_cache() asm volatile("wbinvd":::"memory")
++#define flush_agp_cache() wbinvd()
+ #endif
diff --git a/linux-2.6.9-patches/drm.patch b/linux-2.6.9-patches/drm.patch
new file mode 100644 (file)
index 0000000..3412ce6
--- /dev/null
@@ -0,0 +1,11 @@
+--- linux-2.6.8.1/drivers/char/drm/ati_pcigart.h       2004-08-14 11:56:14.000000000 +0100
++++ linux-2.6.8.1-xen0/drivers/char/drm/ati_pcigart.h  2004-09-05 06:14:51.751782846 +0100
+@@ -158,7 +158,7 @@
+       ret = 1;
+ #if defined(__i386__) || defined(__x86_64__)
+-      asm volatile ( "wbinvd" ::: "memory" );
++      wbinvd();
+ #else
+       mb();
+ #endif
diff --git a/linux-2.6.9-xen-sparse/arch/xen/Kconfig b/linux-2.6.9-xen-sparse/arch/xen/Kconfig
new file mode 100644 (file)
index 0000000..57e3eba
--- /dev/null
@@ -0,0 +1,172 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+mainmenu "Linux Kernel Configuration"
+
+config XEN
+       bool
+       default y
+       help
+         This is the Linux Xen port.
+
+config ARCH_XEN
+       bool
+       default y
+
+
+config NO_IDLE_HZ
+       bool
+       default y
+
+
+menu "XEN"
+
+config XEN_PRIVILEGED_GUEST
+       bool "Privileged Guest (domain 0)"
+       default n
+        select XEN_PHYSDEV_ACCESS
+       help
+         Support for privileged operation (domain 0)
+
+config XEN_PHYSDEV_ACCESS
+       bool "Physical device access"
+       default y if XEN_PRIVILEGED_GUEST
+       default n if !XEN_PRIVILEGED_GUEST
+       help
+         Assume access is available to physical hardware devices
+          (e.g., hard drives, network cards). This allows you to configure
+          such devices and also includes some low-level support that is
+          otherwise not compiled into the kernel.
+
+config XEN_BLKDEV_BACKEND
+        bool "Block-device backend driver"
+        default y if XEN_PHYSDEV_ACCESS
+        default n if !XEN_PHYSDEV_ACCESS
+        help
+          The block-device backend driver allows the kernel to export its
+          block devices to other guests via a high-performance shared-memory
+          interface.
+
+config XEN_NETDEV_BACKEND
+        bool "Network-device backend driver"
+        default y if XEN_PHYSDEV_ACCESS
+        default n if !XEN_PHYSDEV_ACCESS
+        help
+          The network-device backend driver allows the kernel to export its
+          network devices to other guests via a high-performance shared-memory
+          interface.
+
+config XEN_BLKDEV_FRONTEND
+        bool "Block-device frontend driver"
+        default y
+        help
+          The block-device frontend driver allows the kernel to access block
+          devices mounted within another guest OS. Unless you are building a
+          dedicated device-driver domain, or your master control domain
+          (domain 0), then you almost certainly want to say Y here.
+
+config XEN_NETDEV_FRONTEND
+        bool "Network-device frontend driver"
+        default y
+        help
+          The network-device frontend driver allows the kernel to access
+          network interfaces within another guest OS. Unless you are building a
+          dedicated device-driver domain, or your master control domain
+          (domain 0), then you almost certainly want to say Y here.
+
+if XEN_NETDEV_FRONTEND
+config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+        bool "Pipelined transmitter (DANGEROUS)"
+        default n
+        help
+          The driver will assume that the backend is pipelining packets for
+          transmission: whenever packets are pending in the remote backend,
+          the driver will not send asynchronous notifications when it queues
+          additional packets for transmission.
+          If the backend is a dumb domain, such as a transparent Ethernet
+          bridge with no local IP interface, it is safe to say Y here to get
+          slightly lower network overhead.
+          If the backend has a local IP interface; or may be doing smart things
+          like reassembling packets to perform firewall filtering; or if you
+          are unsure; or if you experience network hangs when this option is
+          enabled; then you must say N here.
+endif
+
+config XEN_WRITABLE_PAGETABLES
+       bool "Use writable pagetables"
+       default n
+       help
+         Use writable L1 pagetables
+
+config XEN_SCRUB_PAGES
+        bool "Scrub memory before freeing it to Xen"
+        default y
+        help
+          Erase memory contents before freeing it back to Xen's global
+          pool. This ensures that any secrets contained within that
+          memory (e.g., private keys) cannot be found by other guests that
+          may be running on the machine. Most people will want to say Y here.
+          If security is not a concern then you may increase performance by
+          saying N.
+
+endmenu
+
+config FOREIGN_PAGES
+       bool
+       default y
+
+config HAVE_ARCH_DEV_ALLOC_SKB
+       bool
+       default y
+
+#config VT
+#      bool
+#      default y
+
+#config VT_CONSOLE
+#      bool
+#      default y
+
+#config HW_CONSOLE
+#      bool
+#      default y
+
+choice
+       prompt "Processor Type"
+       default X86
+
+config X86
+       bool "X86"
+       help
+         Choose this option if your computer is a X86 architecture.
+
+config X86_64
+       bool "X86_64"
+       help
+         Choose this option if your computer is a X86 architecture.
+
+endchoice
+
+source "init/Kconfig"
+
+if X86
+source "arch/xen/i386/Kconfig"
+endif
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "arch/xen/Kconfig.drivers"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/linux-2.6.9-xen-sparse/arch/xen/Kconfig.drivers b/linux-2.6.9-xen-sparse/arch/xen/Kconfig.drivers
new file mode 100644 (file)
index 0000000..93db5f9
--- /dev/null
@@ -0,0 +1,55 @@
+# arch/xen/Kconfig.drivers
+
+menu "Device Drivers"
+
+source "drivers/base/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/mtd/Kconfig"
+source "drivers/parport/Kconfig"
+source "drivers/pnp/Kconfig"
+endif
+
+source "drivers/block/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/ide/Kconfig"
+endif
+
+source "drivers/scsi/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/cdrom/Kconfig"
+endif
+
+source "drivers/md/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/message/fusion/Kconfig"
+source "drivers/ieee1394/Kconfig"
+source "drivers/message/i2o/Kconfig"
+endif
+
+source "net/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/isdn/Kconfig"
+source "drivers/telephony/Kconfig"
+source "drivers/input/Kconfig"
+source "drivers/char/Kconfig"
+source "drivers/i2c/Kconfig"
+source "drivers/w1/Kconfig"
+source "drivers/misc/Kconfig"
+source "drivers/media/Kconfig"
+source "drivers/video/Kconfig"
+source "sound/Kconfig"
+source "drivers/usb/Kconfig"
+endif
+
+if !XEN_PHYSDEV_ACCESS
+config UNIX98_PTYS
+       bool
+       default y
+endif
+
+endmenu
diff --git a/linux-2.6.9-xen-sparse/arch/xen/Makefile b/linux-2.6.9-xen-sparse/arch/xen/Makefile
new file mode 100644 (file)
index 0000000..224e0aa
--- /dev/null
@@ -0,0 +1,75 @@
+#
+# xen/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2004 by Christian Limpach
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+# pick up headers from include/asm-xen/asm in preference over include/asm
+NOSTDINC_FLAGS  = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen -iwithprefix include
+
+# make uname return the processor arch
+UTS_MACHINE := $(XENARCH)
+
+core-y += arch/xen/kernel/
+
+include/.asm-ignore:
+       @rm -f include/.asm-ignore
+       @mv include/asm include/.asm-ignore
+       @echo '  SYMLINK include/asm -> include/asm-$(XENARCH)'
+       $(Q)if [ ! -d include ]; then mkdir -p include; fi;
+       @ln -fsn asm-$(XENARCH) include/asm
+
+include/asm-xen/asm:
+       @echo '  SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
+       @ln -fsn asm-$(XENARCH) $@
+
+include/asm-xen/asm-$(XENARCH)/hypervisor-ifs:
+       @echo '  SYMLINK $@ -> include/asm-xen/hypervisor-ifs'
+       @ln -fsn ../hypervisor-ifs $@
+
+arch/xen/arch:
+       @rm -f $@
+       @ln -fsn $(XENARCH) $@
+
+prepare: include/.asm-ignore include/asm-xen/asm \
+       include/asm-xen/asm-$(XENARCH)/hypervisor-ifs \
+       arch/xen/arch ;
+
+all: vmlinuz
+
+vmlinuz: vmlinux
+       $(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
+
+XINSTALL_NAME ?= $(KERNELRELEASE)
+install: vmlinuz
+       mkdir -p $(INSTALL_PATH)/boot
+       install -m0644 vmlinuz $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+       install -m0644 vmlinux $(INSTALL_PATH)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+       install -m0664 .config $(INSTALL_PATH)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+       install -m0664 System.map $(INSTALL_PATH)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+
+dist:
+       $(MAKE) INSTALL_PATH=../install install
+
+archclean:
+       @if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
+       @rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
+
+define archhelp
+  echo  '* vmlinuz     - Compressed kernel image'
+  echo  '  install     - Install kernel image and config file'
+endef
+
+ifneq ($(XENARCH),)
+include        $(srctree)/arch/xen/$(XENARCH)/Makefile
+endif
diff --git a/linux-2.6.9-xen-sparse/arch/xen/boot/Makefile b/linux-2.6.9-xen-sparse/arch/xen/boot/Makefile
new file mode 100644 (file)
index 0000000..ff37924
--- /dev/null
@@ -0,0 +1,8 @@
+
+OBJCOPYFLAGS := -g --strip-unneeded
+
+vmlinuz: vmlinux-stripped FORCE
+       $(call if_changed,gzip)
+
+vmlinux-stripped: vmlinux FORCE
+       $(call if_changed,objcopy)
diff --git a/linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig b/linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig
new file mode 100644 (file)
index 0000000..42aeb48
--- /dev/null
@@ -0,0 +1,1032 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_BLKDEV_BACKEND=y
+CONFIG_XEN_NETDEV_BACKEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_FOREIGN_PAGES=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+# CONFIG_CLEAN_COMPILE is not set
+CONFIG_BROKEN=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+CONFIG_PCI_GODIRECT=y
+# CONFIG_PCI_GOANY is not set
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_NAMES is not set
+CONFIG_ISA=y
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+
+#
+# PCMCIA/CardBus support
+#
+CONFIG_PCMCIA=m
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_YENTA=m
+CONFIG_CARDBUS=y
+# CONFIG_PD6729 is not set
+# CONFIG_I82092 is not set
+# CONFIG_I82365 is not set
+# CONFIG_TCIC is not set
+CONFIG_PCMCIA_PROBE=y
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_STD_RESOURCES=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+# CONFIG_PNP is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=y
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_LBD is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECS is not set
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDE_TASKFILE_IO is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+CONFIG_BLK_DEV_ADMA=y
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+CONFIG_BLK_DEV_SVWKS=y
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
+# CONFIG_IDE_CHIPSETS is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+CONFIG_SCSI_AIC79XX=y
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+CONFIG_SCSI_MEGARAID=y
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+CONFIG_SCSI_SATA_PROMISE=y
+CONFIG_SCSI_SATA_SX4=y
+CONFIG_SCSI_SATA_SIL=y
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA6322 is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_LINEAR is not set
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID5=y
+# CONFIG_MD_RAID6 is not set
+# CONFIG_MD_MULTIPATH is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_CRYPT is not set
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_ZERO is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_IRC is not set
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+# CONFIG_IP_NF_MATCH_IPRANGE is not set
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+# CONFIG_IP_NF_MATCH_STATE is not set
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+# CONFIG_IP_NF_FILTER is not set
+# CONFIG_IP_NF_NAT is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+
+#
+# Bridge: Netfilter Configuration
+#
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL1 is not set
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_EL16 is not set
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_PCMCIA_XIRCOM is not set
+# CONFIG_PCMCIA_XIRTULIP is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_E100_NAPI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+CONFIG_VIA_RHINE=y
+# CONFIG_VIA_RHINE_MMIO is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=y
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+CONFIG_TIGON3=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+# CONFIG_IBM_ASM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+# CONFIG_VIDEO_SELECT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
diff --git a/linux-2.6.9-xen-sparse/arch/xen/configs/xenU_defconfig b/linux-2.6.9-xen-sparse/arch/xen/configs/xenU_defconfig
new file mode 100644 (file)
index 0000000..d6940f6
--- /dev/null
@@ -0,0 +1,475 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_XEN_PHYSDEV_ACCESS is not set
+# CONFIG_XEN_BLKDEV_BACKEND is not set
+# CONFIG_XEN_NETDEV_BACKEND is not set
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_FOREIGN_PAGES=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_STD_RESOURCES=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_LBD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_UNIX98_PTYS=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+# CONFIG_DEVPTS_FS_SECURITY is not set
+CONFIG_TMPFS=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig b/linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig
new file mode 100644 (file)
index 0000000..6e22f7c
--- /dev/null
@@ -0,0 +1,937 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+menu "X86 Processor Configuration"
+
+config XENARCH
+       string
+       default i386
+
+config MMU
+       bool
+       default y
+
+config SBUS
+       bool
+
+config UID16
+       bool
+       default y
+
+config GENERIC_ISA_DMA
+       bool
+       default y
+
+
+choice
+       prompt "Processor family"
+       default M686
+
+#config M386
+#      bool "386"
+#      ---help---
+#        This is the processor type of your CPU. This information is used for
+#        optimizing purposes. In order to compile a kernel that can run on
+#        all x86 CPU types (albeit not optimally fast), you can specify
+#        "386" here.
+#
+#        The kernel will not necessarily run on earlier architectures than
+#        the one you have chosen, e.g. a Pentium optimized kernel will run on
+#        a PPro, but not necessarily on a i486.
+#
+#        Here are the settings recommended for greatest speed:
+#        - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
+#        486DLC/DLC2, UMC 486SX-S and NexGen Nx586.  Only "386" kernels
+#        will run on a 386 class machine.
+#        - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
+#        SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
+#        - "586" for generic Pentium CPUs lacking the TSC
+#        (time stamp counter) register.
+#        - "Pentium-Classic" for the Intel Pentium.
+#        - "Pentium-MMX" for the Intel Pentium MMX.
+#        - "Pentium-Pro" for the Intel Pentium Pro.
+#        - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
+#        - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
+#        - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
+#        - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
+#        - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
+#        - "Crusoe" for the Transmeta Crusoe series.
+#        - "Winchip-C6" for original IDT Winchip.
+#        - "Winchip-2" for IDT Winchip 2.
+#        - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
+#        - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
+#        - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
+#
+#        If you don't know what to do, choose "386".
+
+#config M486
+#      bool "486"
+#      help
+#        Select this for a 486 series processor, either Intel or one of the
+#        compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
+#        DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
+#        U5S.
+
+#config M586
+#      bool "586/K5/5x86/6x86/6x86MX"
+#      help
+#        Select this for an 586 or 686 series processor such as the AMD K5,
+#        the Intel 5x86 or 6x86, or the Intel 6x86MX.  This choice does not
+#        assume the RDTSC (Read Time Stamp Counter) instruction.
+
+#config M586TSC
+#      bool "Pentium-Classic"
+#      help
+#        Select this for a Pentium Classic processor with the RDTSC (Read
+#        Time Stamp Counter) instruction for benchmarking.
+
+#config M586MMX
+#      bool "Pentium-MMX"
+#      help
+#        Select this for a Pentium with the MMX graphics/multimedia
+#        extended instructions.
+
+config M686
+       bool "Pentium-Pro"
+       help
+         Select this for Intel Pentium Pro chips.  This enables the use of
+         Pentium Pro extended instructions, and disables the init-time guard
+         against the f00f bug found in earlier Pentiums.
+
+config MPENTIUMII
+       bool "Pentium-II/Celeron(pre-Coppermine)"
+       help
+         Select this for Intel chips based on the Pentium-II and
+         pre-Coppermine Celeron core.  This option enables an unaligned
+         copy optimization, compiles the kernel with optimization flags
+         tailored for the chip, and applies any applicable Pentium Pro
+         optimizations.
+
+config MPENTIUMIII
+       bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
+       help
+         Select this for Intel chips based on the Pentium-III and
+         Celeron-Coppermine core.  This option enables use of some
+         extended prefetch instructions in addition to the Pentium II
+         extensions.
+
+config MPENTIUMM
+       bool "Pentium M"
+       help
+         Select this for Intel Pentium M (not Pentium-4 M)
+         notebook chips.
+
+config MPENTIUM4
+       bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+       help
+         Select this for Intel Pentium 4 chips.  This includes the
+         Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
+         (not Pentium M) chips.  This option enables compile flags
+         optimized for the chip, uses the correct cache shift, and
+         applies any applicable Pentium III optimizations.
+
+config MK6
+       bool "K6/K6-II/K6-III"
+       help
+         Select this for an AMD K6-family processor.  Enables use of
+         some extended instructions, and passes appropriate optimization
+         flags to GCC.
+
+config MK7
+       bool "Athlon/Duron/K7"
+       help
+         Select this for an AMD Athlon K7-family processor.  Enables use of
+         some extended instructions, and passes appropriate optimization
+         flags to GCC.
+
+config MK8
+       bool "Opteron/Athlon64/Hammer/K8"
+       help
+         Select this for an AMD Opteron or Athlon64 Hammer-family processor.  Enables
+         use of some extended instructions, and passes appropriate optimization
+         flags to GCC.
+
+config MCRUSOE
+       bool "Crusoe"
+       help
+         Select this for a Transmeta Crusoe processor.  Treats the processor
+         like a 586 with TSC, and sets some GCC optimization flags (like a
+         Pentium Pro with no alignment requirements).
+
+#config MWINCHIPC6
+#      bool "Winchip-C6"
+#      help
+#        Select this for an IDT Winchip C6 chip.  Linux and GCC
+#        treat this chip as a 586TSC with some extended instructions
+#        and alignment requirements.
+
+#config MWINCHIP2
+#      bool "Winchip-2"
+#      help
+#        Select this for an IDT Winchip-2.  Linux and GCC
+#        treat this chip as a 586TSC with some extended instructions
+#        and alignment requirements.
+
+#config MWINCHIP3D
+#      bool "Winchip-2A/Winchip-3"
+#      help
+#        Select this for an IDT Winchip-2A or 3.  Linux and GCC
+#        treat this chip as a 586TSC with some extended instructions
+#        and alignment reqirements.  Also enable out of order memory
+#        stores for this CPU, which can increase performance of some
+#        operations.
+
+config MCYRIXIII
+       bool "CyrixIII/VIA-C3"
+       help
+         Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
+         treat this chip as a generic 586. Whilst the CPU is 686 class,
+         it lacks the cmov extension which gcc assumes is present when
+         generating 686 code.
+         Note that Nehemiah (Model 9) and above will not boot with this
+         kernel due to them lacking the 3DNow! instructions used in earlier
+         incarnations of the CPU.
+
+config MVIAC3_2
+       bool "VIA C3-2 (Nehemiah)"
+       help
+         Select this for a VIA C3 "Nehemiah". Selecting this enables usage
+         of SSE and tells gcc to treat the CPU as a 686.
+         Note, this kernel will not boot on older (pre model 9) C3s.
+
+endchoice
+
+config X86_GENERIC
+       bool "Generic x86 support" 
+       help
+         Instead of just including optimizations for the selected
+         x86 variant (e.g. PII, Crusoe or Athlon), include some more
+         generic optimizations as well. This will make the kernel
+         perform better on x86 CPUs other than that selected.
+
+         This is really intended for distributors who need more
+         generic optimizations.
+
+#
+# Define implied options from the CPU selection here
+#
+config X86_CMPXCHG
+       bool
+       depends on !M386
+       default y
+
+config X86_XADD
+       bool
+       depends on !M386
+       default y
+
+config X86_L1_CACHE_SHIFT
+       int
+       default "7" if MPENTIUM4 || X86_GENERIC
+       default "4" if X86_ELAN || M486 || M386
+       default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2
+       default "6" if MK7 || MK8 || MPENTIUMM
+
+config RWSEM_GENERIC_SPINLOCK
+       bool
+       depends on M386
+       default y
+
+config RWSEM_XCHGADD_ALGORITHM
+       bool
+       depends on !M386
+       default y
+
+config X86_PPRO_FENCE
+       bool
+       depends on M686 || M586MMX || M586TSC || M586 || M486 || M386
+       default y
+
+config X86_F00F_BUG
+       bool
+       depends on M586MMX || M586TSC || M586 || M486 || M386
+       default y
+
+config X86_WP_WORKS_OK
+       bool
+       depends on !M386
+       default y
+
+config X86_INVLPG
+       bool
+       depends on !M386
+       default y
+
+config X86_BSWAP
+       bool
+       depends on !M386
+       default y
+
+config X86_POPAD_OK
+       bool
+       depends on !M386
+       default y
+
+config X86_ALIGNMENT_16
+       bool
+       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
+       default y
+
+config X86_GOOD_APIC
+       bool
+       depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8
+       default y
+
+config X86_INTEL_USERCOPY
+       bool
+       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7
+       default y
+
+config X86_USE_PPRO_CHECKSUM
+       bool
+       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2
+       default y
+
+config X86_USE_3DNOW
+       bool
+       depends on MCYRIXIII || MK7
+       default y
+
+config X86_OOSTORE
+       bool
+       depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
+       default y
+
+config HPET_TIMER
+       bool
+       default n
+#config HPET_TIMER
+#      bool "HPET Timer Support"
+#      help
+#        This enables the use of the HPET for the kernel's internal timer.
+#        HPET is the next generation timer replacing legacy 8254s.
+#        You can safely choose Y here.  However, HPET will only be
+#        activated if the platform and the BIOS support this feature.
+#        Otherwise the 8254 will be used for timing services.
+#
+#        Choose N to continue using the legacy 8254 timer.
+
+config HPET_EMULATE_RTC
+       def_bool HPET_TIMER && RTC=y
+
+config SMP
+       bool
+       default n
+#config SMP
+#      bool "Symmetric multi-processing support"
+#      ---help---
+#        This enables support for systems with more than one CPU. If you have
+#        a system with only one CPU, like most personal computers, say N. If
+#        you have a system with more than one CPU, say Y.
+#
+#        If you say N here, the kernel will run on single and multiprocessor
+#        machines, but will use only one CPU of a multiprocessor machine. If
+#        you say Y here, the kernel will run on many, but not all,
+#        singleprocessor machines. On a singleprocessor machine, the kernel
+#        will run faster if you say N here.
+#
+#        Note that if you say Y here and choose architecture "586" or
+#        "Pentium" under "Processor family", the kernel will not work on 486
+#        architectures. Similarly, multiprocessor kernels for the "PPro"
+#        architecture may not work on all Pentium based boards.
+#
+#        People using multiprocessor machines who say Y here should also say
+#        Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
+#        Management" code will be disabled if you say Y here.
+#
+#        See also the <file:Documentation/smp.txt>,
+#        <file:Documentation/i386/IO-APIC.txt>,
+#        <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+#        <http://www.tldp.org/docs.html#howto>.
+#
+#        If you don't know what to do here, say N.
+
+config NR_CPUS
+       int "Maximum number of CPUs (2-255)"
+       range 2 255
+       depends on SMP
+       default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
+       default "8"
+       help
+         This allows you to specify the maximum number of CPUs which this
+         kernel will support.  The maximum supported value is 255 and the
+         minimum value which makes sense is 2.
+
+         This is purely to save memory - each supported CPU adds
+         approximately eight kilobytes to the kernel image.
+
+config SCHED_SMT
+       bool "SMT (Hyperthreading) scheduler support"
+       depends on SMP
+       default off
+       help
+         SMT scheduler support improves the CPU scheduler's decision making
+         when dealing with Intel Pentium 4 chips with HyperThreading at a
+         cost of slightly increased overhead in some places. If unsure say
+         N here.
+
+config PREEMPT
+       bool "Preemptible Kernel"
+       help
+         This option reduces the latency of the kernel when reacting to
+         real-time or interactive events by allowing a low priority process to
+         be preempted even if it is in kernel mode executing a system call.
+         This allows applications to run more reliably even when the system is
+         under load.
+
+         Say Y here if you are building a kernel for a desktop, embedded
+         or real-time system.  Say N if you are unsure.
+
+#config X86_TSC
+#       bool
+#      depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
+#       default y
+
+#config X86_MCE
+#       bool "Machine Check Exception"
+#      depends on !X86_VOYAGER
+#       ---help---
+#         Machine Check Exception support allows the processor to notify the
+#         kernel if it detects a problem (e.g. overheating, component failure).
+#         The action the kernel takes depends on the severity of the problem,
+#         ranging from a warning message on the console, to halting the machine.
+#         Your processor must be a Pentium or newer to support this - check the
+#         flags in /proc/cpuinfo for mce.  Note that some older Pentium systems
+#         have a design flaw which leads to false MCE events - hence MCE is
+#         disabled on all P5 processors, unless explicitly enabled with "mce"
+#         as a boot argument.  Similarly, if MCE is built in and creates a
+#         problem on some new non-standard machine, you can boot with "nomce"
+#         to disable it.  MCE support simply ignores non-MCE processors like
+#         the 386 and 486, so nearly everyone can say Y here.
+
+#config X86_MCE_NONFATAL
+#      tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
+#       depends on X86_MCE
+#       help
+#         Enabling this feature starts a timer that triggers every 5 seconds which
+#         will look at the machine check registers to see if anything happened.
+#         Non-fatal problems automatically get corrected (but still logged).
+#         Disable this if you don't want to see these messages.
+#         Seeing the messages this option prints out may be indicative of dying hardware,
+#         or out-of-spec (ie, overclocked) hardware.
+#         This option only does something on certain CPUs.
+#         (AMD Athlon/Duron and Intel Pentium 4)
+
+#config X86_MCE_P4THERMAL
+#       bool "check for P4 thermal throttling interrupt."
+#       depends on X86_MCE && (X86_UP_APIC || SMP)
+#       help
+#         Enabling this feature will cause a message to be printed when the P4
+#         enters thermal throttling.
+
+#config MICROCODE
+#       tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
+#       ---help---
+#         If you say Y here and also to "/dev file system support" in the
+#         'File systems' section, you will be able to update the microcode on
+#         Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
+#         Pentium III, Pentium 4, Xeon etc.  You will obviously need the
+#         actual microcode binary data itself which is not shipped with the
+#         Linux kernel.
+#
+#         For latest news and information on obtaining all the required
+#         ingredients for this driver, check:
+#         <http://www.urbanmyth.org/microcode/>.
+#
+#         To compile this driver as a module, choose M here: the
+#         module will be called microcode.
+
+#config X86_MSR
+#       tristate "/dev/cpu/*/msr - Model-specific register support"
+#       help
+#         This device gives privileged processes access to the x86
+#         Model-Specific Registers (MSRs).  It is a character device with
+#         major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
+#         MSR accesses are directed to a specific CPU on multi-processor
+#         systems.
+
+config X86_CPUID
+       tristate "/dev/cpu/*/cpuid - CPU information support"
+       help
+         This device gives processes access to the x86 CPUID instruction to
+         be executed on a specific processor.  It is a character device
+         with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+         /dev/cpu/31/cpuid.
+
+source "drivers/firmware/Kconfig"
+
+choice
+       prompt "High Memory Support"
+       default NOHIGHMEM
+
+config NOHIGHMEM
+       bool "off"
+       ---help---
+         Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+         However, the address space of 32-bit x86 processors is only 4
+         Gigabytes large. That means that, if you have a large amount of
+         physical memory, not all of it can be "permanently mapped" by the
+         kernel. The physical memory that's not permanently mapped is called
+         "high memory".
+
+         If you are compiling a kernel which will never run on a machine with
+         more than 1 Gigabyte total physical RAM, answer "off" here (default
+         choice and suitable for most users). This will result in a "3GB/1GB"
+         split: 3GB are mapped so that each process sees a 3GB virtual memory
+         space and the remaining part of the 4GB virtual memory space is used
+         by the kernel to permanently map as much physical memory as
+         possible.
+
+         If the machine has between 1 and 4 Gigabytes physical RAM, then
+         answer "4GB" here.
+
+         If more than 4 Gigabytes is used then answer "64GB" here. This
+         selection turns Intel PAE (Physical Address Extension) mode on.
+         PAE implements 3-level paging on IA32 processors. PAE is fully
+         supported by Linux, PAE mode is implemented on all recent Intel
+         processors (Pentium Pro and better). NOTE: If you say "64GB" here,
+         then the kernel will not boot on CPUs that don't support PAE!
+
+         The actual amount of total physical memory will either be
+         auto detected or can be forced by using a kernel command line option
+         such as "mem=256M". (Try "man bootparam" or see the documentation of
+         your boot loader (lilo or loadlin) about how to pass options to the
+         kernel at boot time.)
+
+         If unsure, say "off".
+
+config HIGHMEM4G
+       bool "4GB"
+       help
+         Select this if you have a 32-bit processor and between 1 and 4
+         gigabytes of physical RAM.
+
+#config HIGHMEM64G
+#      bool "64GB"
+#      help
+#        Select this if you have a 32-bit processor and more than 4
+#        gigabytes of physical RAM.
+
+endchoice
+
+config HIGHMEM
+       bool
+       depends on HIGHMEM64G || HIGHMEM4G
+       default y
+
+config X86_PAE
+       bool
+       depends on HIGHMEM64G
+       default y
+
+# Common NUMA Features
+config NUMA
+       bool "Numa Memory Allocation and Scheduler Support"
+       depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
+       default n if X86_PC
+       default y if (X86_NUMAQ || X86_SUMMIT)
+
+# Need comments to help the hapless user trying to turn on NUMA support
+comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
+       depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
+
+comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
+       depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
+
+config DISCONTIGMEM
+       bool
+       depends on NUMA
+       default y
+
+config HAVE_ARCH_BOOTMEM_NODE
+       bool
+       depends on NUMA
+       default y
+
+#config HIGHPTE
+#      bool "Allocate 3rd-level pagetables from highmem"
+#      depends on HIGHMEM4G || HIGHMEM64G
+#      help
+#        The VM uses one page table entry for each page of physical memory.
+#        For systems with a lot of RAM, this can be wasteful of precious
+#        low memory.  Setting this option will put user-space page table
+#        entries in high memory.
+
+#config MTRR
+#       bool "MTRR (Memory Type Range Register) support"
+#       ---help---
+#         On Intel P6 family processors (Pentium Pro, Pentium II and later)
+#         the Memory Type Range Registers (MTRRs) may be used to control
+#         processor access to memory ranges. This is most useful if you have
+#         a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+#         allows bus write transfers to be combined into a larger transfer
+#         before bursting over the PCI/AGP bus. This can increase performance
+#         of image write operations 2.5 times or more. Saying Y here creates a
+#         /proc/mtrr file which may be used to manipulate your processor's
+#         MTRRs. Typically the X server should use this.
+#
+#         This code has a reasonably generic interface so that similar
+#         control registers on other processors can be easily supported
+#         as well:
+#
+#         The Cyrix 6x86, 6x86MX and M II processors have Address Range
+#         Registers (ARRs) which provide a similar functionality to MTRRs. For
+#         these, the ARRs are used to emulate the MTRRs.
+#         The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
+#         MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
+#         write-combining. All of these processors are supported by this code
+#         and it makes sense to say Y here if you have one of them.
+#
+#         Saying Y here also fixes a problem with buggy SMP BIOSes which only
+#         set the MTRRs for the boot CPU and not for the secondary CPUs. This
+#         can lead to all sorts of problems, so it's good to say Y here.
+#
+#         You can safely say Y even if your machine doesn't have MTRRs, you'll
+#         just add about 9 KB to your kernel.
+#
+#         See <file:Documentation/mtrr.txt> for more information.
+
+config IRQBALANCE
+       bool "Enable kernel irq balancing"
+       depends on SMP && X86_IO_APIC
+       default y
+       help
+         The default yes will allow the kernel to do irq load balancing.
+         Saying no will keep the kernel from doing irq load balancing.
+
+config HAVE_DEC_LOCK
+       bool
+       depends on (SMP || PREEMPT) && X86_CMPXCHG
+       default y
+
+# turning this on wastes a bunch of space.
+# Summit needs it only when NUMA is on
+config BOOT_IOREMAP
+       bool
+       depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
+       default y
+
+config REGPARM
+       bool "Use register arguments (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       default n
+       help
+       Compile the kernel with -mregparm=3. This uses an different ABI
+       and passes the first three arguments of a function call in registers.
+       This will probably break binary only modules.
+
+       This feature is only enabled for gcc-3.0 and later - earlier compilers
+       generate incorrect output with certain kernel constructs when
+       -mregparm=3 is used.
+
+
+if XEN_PHYSDEV_ACCESS
+
+menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
+
+config X86_VISWS_APIC
+       bool
+       depends on X86_VISWS
+       default y
+
+config X86_LOCAL_APIC
+       bool
+       depends on (X86_VISWS || SMP) && !X86_VOYAGER
+       default y
+
+config X86_IO_APIC
+       bool
+       depends on SMP && !(X86_VISWS || X86_VOYAGER)
+       default y
+
+config PCI
+       bool "PCI support" if !X86_VISWS
+       depends on !X86_VOYAGER
+       default y if X86_VISWS
+       help
+         Find out whether you have a PCI motherboard. PCI is the name of a
+         bus system, i.e. the way the CPU talks to the other stuff inside
+         your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+         VESA. If you have PCI, say Y, otherwise N.
+
+         The PCI-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>, contains valuable
+         information about which PCI hardware does work under Linux and which
+         doesn't.
+
+#choice
+#      prompt "PCI access mode"
+#      depends on PCI && !X86_VISWS
+#      default PCI_GOANY
+#      ---help---
+#        On PCI systems, the BIOS can be used to detect the PCI devices and
+#        determine their configuration. However, some old PCI motherboards
+#        have BIOS bugs and may crash if this is done. Also, some embedded
+#        PCI-based systems don't have any BIOS at all. Linux can also try to
+#        detect the PCI hardware directly without using the BIOS.
+#
+#        With this option, you can specify how Linux should detect the
+#        PCI devices. If you choose "BIOS", the BIOS will be used,
+#        if you choose "Direct", the BIOS won't be used, and if you
+#        choose "MMConfig", then PCI Express MMCONFIG will be used.
+#        If you choose "Any", the kernel will try MMCONFIG, then the
+#        direct access method and falls back to the BIOS if that doesn't
+#        work. If unsure, go with the default, which is "Any".
+#
+#config PCI_GOBIOS
+#      bool "BIOS"
+#
+#config PCI_GOMMCONFIG
+#      bool "MMConfig"
+#
+#config PCI_GODIRECT
+#      bool "Direct"
+#
+#config PCI_GOANY
+#      bool "Any"
+#
+#endchoice
+#
+#config PCI_BIOS
+#      bool
+#      depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
+#      default y
+#
+#config PCI_DIRECT
+#      bool
+#      depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+#      default y
+
+config PCI_DIRECT
+       bool
+       depends on PCI
+       default y
+
+source "drivers/pci/Kconfig"
+
+config ISA
+       bool "ISA support"
+       depends on !(X86_VOYAGER || X86_VISWS)
+       help
+         Find out whether you have ISA slots on your motherboard.  ISA is the
+         name of a bus system, i.e. the way the CPU talks to the other stuff
+         inside your box.  Other bus systems are PCI, EISA, MicroChannel
+         (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
+         newer boards don't support it.  If you have ISA, say Y, otherwise N.
+
+config EISA
+       bool "EISA support"
+       depends on ISA
+       ---help---
+         The Extended Industry Standard Architecture (EISA) bus was
+         developed as an open alternative to the IBM MicroChannel bus.
+
+         The EISA bus provided some of the features of the IBM MicroChannel
+         bus while maintaining backward compatibility with cards made for
+         the older ISA bus.  The EISA bus saw limited use between 1988 and
+         1995 when it was made obsolete by the PCI bus.
+
+         Say Y here if you are building a kernel for an EISA-based machine.
+
+         Otherwise, say N.
+
+source "drivers/eisa/Kconfig"
+
+config MCA
+       bool "MCA support"
+       depends on !(X86_VISWS || X86_VOYAGER)
+       help
+         MicroChannel Architecture is found in some IBM PS/2 machines and
+         laptops.  It is a bus system similar to PCI or ISA. See
+         <file:Documentation/mca.txt> (and especially the web page given
+         there) before attempting to build an MCA bus kernel.
+
+config MCA
+       depends on X86_VOYAGER
+       default y if X86_VOYAGER
+
+source "drivers/mca/Kconfig"
+
+config SCx200
+       tristate "NatSemi SCx200 support"
+       depends on !X86_VOYAGER
+       help
+         This provides basic support for the National Semiconductor SCx200 
+         processor.  Right now this is just a driver for the GPIO pins.
+
+         If you don't know what to do here, say N.
+
+         This support is also available as a module.  If compiled as a
+         module, it will be called scx200.
+
+source "drivers/pcmcia/Kconfig"
+
+source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+endif
+
+menu "Kernel hacking"
+
+config DEBUG_KERNEL
+       bool "Kernel debugging"
+       help
+         Say Y here if you are developing drivers or trying to debug and
+         identify kernel problems.
+
+config EARLY_PRINTK
+       bool "Early printk" if EMBEDDED
+       default y
+       help
+         Write kernel log output directly into the VGA buffer or to a serial
+         port.
+
+         This is useful for kernel debugging when your machine crashes very
+         early before the console code is initialized. For normal operation
+         it is not recommended because it looks ugly and doesn't cooperate
+         with klogd/syslogd or the X server. You should normally N here,
+         unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+       bool "Check for stack overflows"
+       depends on DEBUG_KERNEL
+
+config DEBUG_STACK_USAGE
+       bool "Stack utilization instrumentation"
+       depends on DEBUG_KERNEL
+       help
+         Enables the display of the minimum amount of free stack which each
+         task has ever had available in the sysrq-T and sysrq-P debug output.
+
+         This option will slow down process creation somewhat.
+
+config DEBUG_SLAB
+       bool "Debug memory allocations"
+       depends on DEBUG_KERNEL
+       help
+         Say Y here to have the kernel do limited verification on memory
+         allocation as well as poisoning memory on free to catch use of freed
+         memory.
+
+config MAGIC_SYSRQ
+       bool "Magic SysRq key"
+       depends on DEBUG_KERNEL
+       help
+         If you say Y here, you will have some control over the system even
+         if the system crashes for example during kernel debugging (e.g., you
+         will be able to flush the buffer cache to disk, reboot the system
+         immediately or dump some status information). This is accomplished
+         by pressing various keys while holding SysRq (Alt+PrintScreen). It
+         also works on a serial console (on PC hardware at least), if you
+         send a BREAK and then within 5 seconds a command keypress. The
+         keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+         unless you really know what this hack does.
+
+config DEBUG_SPINLOCK
+       bool "Spinlock debugging"
+       depends on DEBUG_KERNEL
+       help
+         Say Y here and build SMP to catch missing spinlock initialization
+         and certain other kinds of spinlock errors commonly made.  This is
+         best used in conjunction with the NMI watchdog so that spinlock
+         deadlocks are also debuggable.
+
+config DEBUG_PAGEALLOC
+       bool "Page alloc debugging"
+       depends on DEBUG_KERNEL
+       help
+         Unmap pages from the kernel linear mapping after free_pages().
+         This results in a large slowdown, but helps to find certain types
+         of memory corruptions.
+
+config DEBUG_HIGHMEM
+       bool "Highmem debugging"
+       depends on DEBUG_KERNEL && HIGHMEM
+       help
+         This options enables addition error checking for high memory systems.
+         Disable for production systems.
+
+config DEBUG_INFO
+       bool "Compile the kernel with debug info"
+       depends on DEBUG_KERNEL
+       help
+          If you say Y here the resulting kernel image will include
+         debugging info resulting in a larger kernel image.
+         Say Y here only if you plan to use gdb to debug the kernel.
+         If you don't debug the kernel, you can say N.
+         
+config DEBUG_SPINLOCK_SLEEP
+       bool "Sleep-inside-spinlock checking"
+       help
+         If you say Y here, various routines which may sleep will become very
+         noisy if they are called with a spinlock held.        
+
+config FRAME_POINTER
+       bool "Compile the kernel with frame pointers"
+       help
+         If you say Y here the resulting kernel image will be slightly larger
+         and slower, but it will give very useful debugging information.
+         If you don't debug the kernel, you can say N, but we may not be able
+         to solve problems without frame pointers.
+
+config 4KSTACKS
+       bool "Use 4Kb for kernel stacks instead of 8Kb"
+       help
+         If you say Y here the kernel will use a 4Kb stacksize for the
+         kernel stack attached to each process/thread. This facilitates
+         running more threads on a system and also reduces the pressure
+         on the VM subsystem for higher order allocations. This option
+         will also use IRQ stacks to compensate for the reduced stackspace.
+
+config X86_FIND_SMP_CONFIG
+       bool
+       depends on X86_LOCAL_APIC || X86_VOYAGER
+       default y
+
+config X86_MPPARSE
+       bool
+       depends on X86_LOCAL_APIC && !X86_VISWS
+       default y
+
+endmenu
+
+config X86_SMP
+       bool
+       depends on SMP && !X86_VOYAGER
+       default y
+
+config X86_HT
+       bool
+       depends on SMP && !(X86_VISWS || X86_VOYAGER)
+       default y
+
+config X86_BIOS_REBOOT
+       bool
+       depends on !(X86_VISWS || X86_VOYAGER)
+       default y
+
+config X86_TRAMPOLINE
+       bool
+       depends on X86_SMP || (X86_VOYAGER && SMP)
+       default y
+
+# std_resources is overridden for pc9800, but that's not
+# a currently selectable arch choice
+config X86_STD_RESOURCES
+       bool
+       default y
+
+config PC
+       bool
+       depends on X86 && !EMBEDDED
+       default y
+
+endmenu
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/Makefile b/linux-2.6.9-xen-sparse/arch/xen/i386/Makefile
new file mode 100644 (file)
index 0000000..2fcb9fe
--- /dev/null
@@ -0,0 +1,98 @@
+#
+# i386/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713  Artur Skawina <skawina@geocities.com>
+#           Added '-march' and '-mpreferred-stack-boundary' support
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+LDFLAGS                := -m elf_i386
+LDFLAGS_vmlinux :=
+CHECK          := $(CHECK) -D__i386__=1
+
+CFLAGS += -pipe -msoft-float
+
+# prevent gcc from keeping the stack 16 byte aligned
+CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2,)
+
+align := $(subst -functions=0,,$(call cc-option,-falign-functions=0,-malign-functions=0))
+
+cflags-$(CONFIG_M386)          += -march=i386
+cflags-$(CONFIG_M486)          += -march=i486
+cflags-$(CONFIG_M586)          += -march=i586
+cflags-$(CONFIG_M586TSC)       += -march=i586
+cflags-$(CONFIG_M586MMX)       += $(call cc-option,-march=pentium-mmx,-march=i586)
+cflags-$(CONFIG_M686)          += -march=i686
+cflags-$(CONFIG_MPENTIUMII)    += $(call cc-option,-march=pentium2,-march=i686)
+cflags-$(CONFIG_MPENTIUMIII)   += $(call cc-option,-march=pentium3,-march=i686)
+cflags-$(CONFIG_MPENTIUMM)     += $(call cc-option,-march=pentium3,-march=i686)
+cflags-$(CONFIG_MPENTIUM4)     += $(call cc-option,-march=pentium4,-march=i686)
+cflags-$(CONFIG_MK6)           += -march=k6
+# Please note, that patches that add -march=athlon-xp and friends are pointless.
+# They make zero difference whatsosever to performance at this time.
+cflags-$(CONFIG_MK7)           += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
+cflags-$(CONFIG_MK8)           += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
+cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MWINCHIPC6)    += $(call cc-option,-march=winchip-c6,-march=i586)
+cflags-$(CONFIG_MWINCHIP2)     += $(call cc-option,-march=winchip2,-march=i586)
+cflags-$(CONFIG_MWINCHIP3D)    += $(call cc-option,-march=winchip2,-march=i586)
+cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MVIAC3_2)      += $(call cc-option,-march=c3-2,-march=i686)
+
+# AMD Elan support
+cflags-$(CONFIG_X86_ELAN)      += -march=i486
+
+# -mregparm=3 works ok on gcc-3.0 and later
+#
+GCC_VERSION                    := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
+cflags-$(CONFIG_REGPARM)       += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
+
+# Disable unit-at-a-time mode, it makes gcc use a lot more stack
+# due to the lack of sharing of stacklots.
+CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
+
+CFLAGS += $(cflags-y)
+
+head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
+
+libs-y                                         += arch/i386/lib/
+core-y                                 += arch/xen/i386/kernel/ \
+                                          arch/xen/i386/mm/ \
+                                          arch/i386/crypto/
+# \
+#                                         arch/xen/$(mcore-y)/
+drivers-$(CONFIG_MATH_EMULATION)       += arch/i386/math-emu/
+drivers-$(CONFIG_PCI)                  += arch/xen/i386/pci/
+# must be linked after kernel/
+drivers-$(CONFIG_OPROFILE)             += arch/i386/oprofile/
+drivers-$(CONFIG_PM)                   += arch/i386/power/
+
+# for clean
+obj-   += kernel/ mm/ pci/
+#obj-  += ../../i386/lib/ ../../i386/mm/ 
+#../../i386/$(mcore-y)/
+#obj-  += ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
+
+xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen
+CFLAGS += $(xenflags-y)
+AFLAGS += $(xenflags-y)
+
+prepare: include/asm-$(XENARCH)/asm_offsets.h
+CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
+
+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
+       include/linux/version.h include/config/MARKER
+
+include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
+       $(call filechk,gen-asm-offsets)
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/Makefile b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/Makefile
new file mode 100644 (file)
index 0000000..3124b25
--- /dev/null
@@ -0,0 +1,91 @@
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/kernel
+
+extra-y := head.o init_task.o vmlinux.lds
+
+obj-y  := traps.o irq.o ldt.o setup.o entry.o time.o pci-dma.o process.o \
+               ioport.o signal.o i386_ksyms.o
+
+c-obj-y        := semaphore.o vm86.o \
+               ptrace.o sys_i386.o \
+               i387.o dmi_scan.o bootflag.o \
+               doublefault.o
+s-obj-y        :=
+
+obj-y                          += cpu/
+obj-y                          += timers/
+c-obj-$(CONFIG_ACPI_BOOT)      += acpi/
+#c-obj-$(CONFIG_X86_BIOS_REBOOT)       += reboot.o
+c-obj-$(CONFIG_MCA)            += mca.o
+c-obj-$(CONFIG_X86_MSR)                += msr.o
+c-obj-$(CONFIG_X86_CPUID)      += cpuid.o
+c-obj-$(CONFIG_MICROCODE)      += microcode.o
+c-obj-$(CONFIG_APM)            += apm.o
+c-obj-$(CONFIG_X86_SMP)                += smp.o smpboot.o
+c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+c-obj-$(CONFIG_X86_MPPARSE)    += mpparse.o
+c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
+c-obj-$(CONFIG_X86_IO_APIC)    += io_apic.o
+c-obj-$(CONFIG_X86_NUMAQ)      += numaq.o
+c-obj-$(CONFIG_X86_SUMMIT_NUMA)        += summit.o
+c-obj-$(CONFIG_MODULES)                += module.o
+obj-y                          += sysenter.o
+obj-y                          += vsyscall.o
+c-obj-$(CONFIG_ACPI_SRAT)      += srat.o
+c-obj-$(CONFIG_HPET_TIMER)     += time_hpet.o
+c-obj-$(CONFIG_EFI)            += efi.o efi_stub.o
+c-obj-$(CONFIG_EARLY_PRINTK)   += early_printk.o
+
+EXTRA_AFLAGS   := -traditional
+
+c-obj-$(CONFIG_SCx200)         += scx200.o
+
+CPPFLAGS_vmlinux.lds += -U$(XENARCH)
+
+# vsyscall.o contains the vsyscall DSO images as __initdata.
+# We must build both images before we can assemble it.
+# Note: kbuild does not track this dependency due to usage of .incbin
+$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
+targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
+
+# The DSO images are built using a special linker script.
+quiet_cmd_syscall = SYSCALL $@
+      cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
+                         -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
+SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
+SYSCFLAGS_vsyscall-int80.so    = $(vsyscall-flags)
+
+$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+$(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+       $(call if_changed,syscall)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO.  With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+extra-y += vsyscall-syms.o
+$(obj)/built-in.o: $(obj)/vsyscall-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+
+SYSCFLAGS_vsyscall-syms.o = -r
+$(obj)/vsyscall-syms.o: $(obj)/vsyscall.lds $(obj)/vsyscall-sysenter.o FORCE
+       $(call if_changed,syscall)
+
+c-link := init_task.o
+s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
+       @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
+
+$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
+
+obj-y  += $(c-obj-y) $(s-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
new file mode 100644 (file)
index 0000000..478e023
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Makefile for x86-compatible CPU details and quirks
+#
+
+CFLAGS += -Iarch/i386/kernel/cpu
+
+obj-y  :=      common.o
+c-obj-y        +=      proc.o
+
+c-obj-y        +=      amd.o
+c-obj-y        +=      cyrix.o
+c-obj-y        +=      centaur.o
+c-obj-y        +=      transmeta.o
+c-obj-y        +=      intel.o
+c-obj-y        +=      rise.o
+c-obj-y        +=      nexgen.o
+c-obj-y        +=      umc.o
+
+#obj-$(CONFIG_X86_MCE) +=      ../../../../i386/kernel/cpu/mcheck/
+
+#obj-$(CONFIG_MTRR)    +=      ../../../../i386/kernel/cpu/mtrr/
+#obj-$(CONFIG_CPU_FREQ)        +=      ../../../../i386/kernel/cpu/cpufreq/
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c
new file mode 100644 (file)
index 0000000..a59d760
--- /dev/null
@@ -0,0 +1,606 @@
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm-xen/hypervisor.h>
+
+#include "cpu.h"
+
+DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
+
+static int cachesize_override __initdata = -1;
+static int disable_x86_fxsr __initdata = 0;
+static int disable_x86_serial_nr __initdata = 1;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+extern void mcheck_init(struct cpuinfo_x86 *c);
+
+extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
+
+extern int disable_pse;
+
+static void default_init(struct cpuinfo_x86 * c)
+{
+       /* Not much we can do here... */
+       /* Check if at least it has cpuid */
+       if (c->cpuid_level == -1) {
+               /* No cpuid. It must be an ancient CPU */
+               if (c->x86 == 4)
+                       strcpy(c->x86_model_id, "486");
+               else if (c->x86 == 3)
+                       strcpy(c->x86_model_id, "386");
+       }
+}
+
+static struct cpu_dev default_cpu = {
+       .c_init = default_init,
+};
+static struct cpu_dev * this_cpu = &default_cpu;
+
+static int __init cachesize_setup(char *str)
+{
+       get_option (&str, &cachesize_override);
+       return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
+int __init get_model_name(struct cpuinfo_x86 *c)
+{
+       unsigned int *v;
+       char *p, *q;
+
+       if (cpuid_eax(0x80000000) < 0x80000004)
+               return 0;
+
+       v = (unsigned int *) c->x86_model_id;
+       cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+       cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+       cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+       c->x86_model_id[48] = 0;
+
+       /* Intel chips right-justify this string for some dumb reason;
+          undo that brain damage */
+       p = q = &c->x86_model_id[0];
+       while ( *p == ' ' )
+            p++;
+       if ( p != q ) {
+            while ( *p )
+                 *q++ = *p++;
+            while ( q <= &c->x86_model_id[48] )
+                 *q++ = '\0';  /* Zero-pad the rest */
+       }
+
+       return 1;
+}
+
+
+void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+       unsigned int n, dummy, ecx, edx, l2size;
+
+       n = cpuid_eax(0x80000000);
+
+       if (n >= 0x80000005) {
+               cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+                       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+               c->x86_cache_size=(ecx>>24)+(edx>>24);  
+       }
+
+       if (n < 0x80000006)     /* Some chips just has a large L1. */
+               return;
+
+       ecx = cpuid_ecx(0x80000006);
+       l2size = ecx >> 16;
+       
+       /* do processor-specific cache resizing */
+       if (this_cpu->c_size_cache)
+               l2size = this_cpu->c_size_cache(c,l2size);
+
+       /* Allow user to override all this if necessary. */
+       if (cachesize_override != -1)
+               l2size = cachesize_override;
+
+       if ( l2size == 0 )
+               return;         /* Again, no L2 cache is possible */
+
+       c->x86_cache_size = l2size;
+
+       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+              l2size, ecx & 0xFF);
+}
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+       struct cpu_model_info *info;
+
+       if ( c->x86_model >= 16 )
+               return NULL;    /* Range check */
+
+       if (!this_cpu)
+               return NULL;
+
+       info = this_cpu->c_models;
+
+       while (info && info->family) {
+               if (info->family == c->x86)
+                       return info->model_names[c->x86_model];
+               info++;
+       }
+       return NULL;            /* Not found */
+}
+
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+{
+       char *v = c->x86_vendor_id;
+       int i;
+
+       for (i = 0; i < X86_VENDOR_NUM; i++) {
+               if (cpu_devs[i]) {
+                       if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+                           (cpu_devs[i]->c_ident[1] && 
+                            !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+                               c->x86_vendor = i;
+                               if (!early)
+                                       this_cpu = cpu_devs[i];
+                               break;
+                       }
+               }
+       }
+}
+
+
+static int __init x86_fxsr_setup(char * s)
+{
+       disable_x86_fxsr = 1;
+       return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
+{
+       u32 f1, f2;
+
+       asm("pushfl\n\t"
+           "pushfl\n\t"
+           "popl %0\n\t"
+           "movl %0,%1\n\t"
+           "xorl %2,%0\n\t"
+           "pushl %0\n\t"
+           "popfl\n\t"
+           "pushfl\n\t"
+           "popl %0\n\t"
+           "popfl\n\t"
+           : "=&r" (f1), "=&r" (f2)
+           : "ir" (flag));
+
+       return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+int __init have_cpuid_p(void)
+{
+       return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+/* Do minimum CPU detection early.
+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+   The others are not touched to avoid unwanted side effects. */
+void __init early_cpu_detect(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       c->x86_cache_alignment = 32;
+
+       if (!have_cpuid_p())
+               return;
+
+       /* Get vendor name */
+       cpuid(0x00000000, &c->cpuid_level,
+             (int *)&c->x86_vendor_id[0],
+             (int *)&c->x86_vendor_id[8],
+             (int *)&c->x86_vendor_id[4]);
+
+       get_cpu_vendor(c, 1);
+
+       c->x86 = 4;
+       if (c->cpuid_level >= 0x00000001) {
+               u32 junk, tfms, cap0, misc;
+               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+               c->x86 = (tfms >> 8) & 15;
+               c->x86_model = (tfms >> 4) & 15;
+               if (c->x86 == 0xf) {
+                       c->x86 += (tfms >> 20) & 0xff;
+                       c->x86_model += ((tfms >> 16) & 0xF) << 4;
+               }
+               c->x86_mask = tfms & 15;
+               if (cap0 & (1<<19))
+                       c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+       }
+
+       early_intel_workaround(c);
+}
+
+void __init generic_identify(struct cpuinfo_x86 * c)
+{
+       u32 tfms, xlvl;
+       int junk;
+
+       if (have_cpuid_p()) {
+               /* Get vendor name */
+               cpuid(0x00000000, &c->cpuid_level,
+                     (int *)&c->x86_vendor_id[0],
+                     (int *)&c->x86_vendor_id[8],
+                     (int *)&c->x86_vendor_id[4]);
+               
+               get_cpu_vendor(c, 0);
+               /* Initialize the standard set of capabilities */
+               /* Note that the vendor-specific code below might override */
+       
+               /* Intel-defined flags: level 0x00000001 */
+               if ( c->cpuid_level >= 0x00000001 ) {
+                       u32 capability, excap;
+                       cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+                       c->x86_capability[0] = capability;
+                       c->x86_capability[4] = excap;
+                       c->x86 = (tfms >> 8) & 15;
+                       c->x86_model = (tfms >> 4) & 15;
+                       if (c->x86 == 0xf) {
+                               c->x86 += (tfms >> 20) & 0xff;
+                               c->x86_model += ((tfms >> 16) & 0xF) << 4;
+                       } 
+                       c->x86_mask = tfms & 15;
+               } else {
+                       /* Have CPUID level 0 only - unheard of */
+                       c->x86 = 4;
+               }
+
+               /* AMD-defined flags: level 0x80000001 */
+               xlvl = cpuid_eax(0x80000000);
+               if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+                       if ( xlvl >= 0x80000001 )
+                               c->x86_capability[1] = cpuid_edx(0x80000001);
+                       if ( xlvl >= 0x80000004 )
+                               get_model_name(c); /* Default name */
+               }
+       }
+}
+
+static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+               /* Disable processor serial number */
+               unsigned long lo,hi;
+               rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               lo |= 0x200000;
+               wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               printk(KERN_NOTICE "CPU serial number disabled.\n");
+               clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+               /* Disabling the serial number may affect the cpuid level */
+               c->cpuid_level = cpuid_eax(0);
+       }
+}
+
+static int __init x86_serial_nr_setup(char *s)
+{
+       disable_x86_serial_nr = 0;
+       return 1;
+}
+__setup("serialnumber", x86_serial_nr_setup);
+
+
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+       int i;
+
+       c->loops_per_jiffy = loops_per_jiffy;
+       c->x86_cache_size = -1;
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       c->cpuid_level = -1;    /* CPUID not detected */
+       c->x86_model = c->x86_mask = 0; /* So far unknown... */
+       c->x86_vendor_id[0] = '\0'; /* Unset */
+       c->x86_model_id[0] = '\0';  /* Unset */
+       memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+       if (!have_cpuid_p()) {
+               /* First of all, decide if this is a 486 or higher */
+               /* It's a 486 if we can modify the AC flag */
+               if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+                       c->x86 = 4;
+               else
+                       c->x86 = 3;
+       }
+
+       generic_identify(c);
+
+       printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
+               c->x86_capability[0],
+               c->x86_capability[1],
+               c->x86_capability[2],
+               c->x86_capability[3]);
+
+       if (this_cpu->c_identify) {
+               this_cpu->c_identify(c);
+
+       printk(KERN_DEBUG "CPU: After vendor identify, caps:  %08lx %08lx %08lx %08lx\n",
+               c->x86_capability[0],
+               c->x86_capability[1],
+               c->x86_capability[2],
+               c->x86_capability[3]);
+}
+
+       /*
+        * Vendor-specific initialization.  In this section we
+        * canonicalize the feature flags, meaning if there are
+        * features a certain CPU supports which CPUID doesn't
+        * tell us, CPUID claiming incorrect flags, or other bugs,
+        * we handle them here.
+        *
+        * At the end of this section, c->x86_capability better
+        * indicate the features this CPU genuinely supports!
+        */
+       if (this_cpu->c_init)
+               this_cpu->c_init(c);
+
+       /* Disable the PN if appropriate */
+       squash_the_stupid_serial_number(c);
+
+       /*
+        * The vendor-specific functions might have changed features.  Now
+        * we do "generic changes."
+        */
+
+       /* TSC disabled? */
+       if ( tsc_disable )
+               clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+       /* FXSR disabled? */
+       if (disable_x86_fxsr) {
+               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+               clear_bit(X86_FEATURE_XMM, c->x86_capability);
+       }
+
+       if (disable_pse)
+               clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+       /* If the model name is still unset, do table lookup. */
+       if ( !c->x86_model_id[0] ) {
+               char *p;
+               p = table_lookup_model(c);
+               if ( p )
+                       strcpy(c->x86_model_id, p);
+               else
+                       /* Last resort... */
+                       sprintf(c->x86_model_id, "%02x/%02x",
+                               c->x86_vendor, c->x86_model);
+       }
+
+       machine_specific_modify_cpu_capabilities(c);
+
+       /* Now the feature flags better reflect actual CPU features! */
+
+       printk(KERN_DEBUG "CPU: After all inits, caps:        %08lx %08lx %08lx %08lx\n",
+              c->x86_capability[0],
+              c->x86_capability[1],
+              c->x86_capability[2],
+              c->x86_capability[3]);
+
+       /*
+        * On SMP, boot_cpu_data holds the common feature set between
+        * all CPUs; so make sure that we indicate which features are
+        * common between the CPUs.  The first time this routine gets
+        * executed, c == &boot_cpu_data.
+        */
+       if ( c != &boot_cpu_data ) {
+               /* AND the already accumulated flags with these */
+               for ( i = 0 ; i < NCAPINTS ; i++ )
+                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+       }
+
+       /* Init Machine Check Exception if available. */
+#ifdef CONFIG_X86_MCE
+       mcheck_init(c);
+#endif
+}
+/*
+ *     Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
+ */
+void __init dodgy_tsc(void)
+{
+       if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
+           ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
+               cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+}
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+       char *vendor = NULL;
+
+       if (c->x86_vendor < X86_VENDOR_NUM)
+               vendor = this_cpu->c_vendor;
+       else if (c->cpuid_level >= 0)
+               vendor = c->x86_vendor_id;
+
+       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+               printk("%s ", vendor);
+
+       if (!c->x86_model_id[0])
+               printk("%d86", c->x86);
+       else
+               printk("%s", c->x86_model_id);
+
+       if (c->x86_mask || c->cpuid_level >= 0) 
+               printk(" stepping %02x\n", c->x86_mask);
+       else
+               printk("\n");
+}
+
+unsigned long cpu_initialized __initdata = 0;
+
+/* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+extern int intel_cpu_init(void);
+extern int cyrix_init_cpu(void);
+extern int nsc_init_cpu(void);
+extern int amd_init_cpu(void);
+extern int centaur_init_cpu(void);
+extern int transmeta_init_cpu(void);
+extern int rise_init_cpu(void);
+extern int nexgen_init_cpu(void);
+extern int umc_init_cpu(void);
+void early_cpu_detect(void);
+
+void __init early_cpu_init(void)
+{
+       intel_cpu_init();
+       cyrix_init_cpu();
+       nsc_init_cpu();
+       amd_init_cpu();
+       centaur_init_cpu();
+       transmeta_init_cpu();
+       rise_init_cpu();
+       nexgen_init_cpu();
+       umc_init_cpu();
+       early_cpu_detect();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /* pse is not compatible with on-the-fly unmapping,
+        * disable it even if the cpus claim to support it.
+        */
+       clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+       disable_pse = 1;
+#endif
+}
+
+void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
+{
+       unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+       unsigned long va;
+       int f;
+
+       for (va = gdt_descr->address, f = 0;
+            va < gdt_descr->address + gdt_descr->size;
+            va += PAGE_SIZE, f++) {
+               frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+               protect_page(swapper_pg_dir, (void *)va, PROT_ON);
+       }
+       flush_page_update_queue();
+       if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
+               BUG();
+       lgdt_finish();
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __init cpu_init (void)
+{
+       int cpu = smp_processor_id();
+       struct tss_struct * t = &per_cpu(init_tss, cpu);
+       struct thread_struct *thread = &current->thread;
+
+       if (test_and_set_bit(cpu, &cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+               for (;;) local_irq_enable();
+       }
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+       if (cpu_has_vme || cpu_has_de)
+               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+       if (tsc_disable && cpu_has_tsc) {
+               printk(KERN_NOTICE "Disabling TSC...\n");
+               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+               set_in_cr4(X86_CR4_TSD);
+       }
+
+       /*
+        * Initialize the per-CPU GDT with the boot GDT,
+        * and set up the GDT descriptor:
+        */
+       if (cpu) {
+               cpu_gdt_descr[cpu].size = GDT_SIZE;
+               cpu_gdt_descr[cpu].address = 0; /* XXXcl alloc page */
+               BUG();          /* XXXcl SMP */
+               memcpy((void *)cpu_gdt_descr[cpu].address,
+                   (void *)cpu_gdt_descr[0].address, GDT_SIZE);
+       }
+       /*
+        * Set up the per-thread TLS descriptor cache:
+        */
+       memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+           GDT_ENTRY_TLS_ENTRIES * 8);
+
+       cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+       /*
+        * Delete NT
+        */
+       __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
+
+       /*
+        * Set up and load the per-CPU TSS and LDT
+        */
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+       if (current->mm)
+               BUG();
+       enter_lazy_tlb(&init_mm, current);
+
+       load_esp0(t, thread);
+
+       load_LDT(&init_mm.context);
+       flush_page_update_queue();
+
+       /* Clear %fs and %gs. */
+       asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
+
+       /* Clear all 6 debug registers: */
+
+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+       CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+#undef CD
+
+       /*
+        * Force FPU initialization:
+        */
+       current_thread_info()->status = 0;
+       current->used_math = 0;
+       mxcsr_feature_mask_init();
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
new file mode 100644 (file)
index 0000000..f1e60cc
--- /dev/null
@@ -0,0 +1,1029 @@
+/*
+ *  linux/arch/i386/entry.S
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
+ * on a 486.
+ *
+ * Stack layout in 'ret_from_system_call':
+ *     ptrace needs to have all regs on the stack.
+ *     if the order here is changed, it needs to be
+ *     updated in fork.c:copy_process, signal.c:do_signal,
+ *     ptrace.c and ptrace.h
+ *
+ *      0(%esp) - %ebx
+ *      4(%esp) - %ecx
+ *      8(%esp) - %edx
+ *       C(%esp) - %esi
+ *     10(%esp) - %edi
+ *     14(%esp) - %ebp
+ *     18(%esp) - %eax
+ *     1C(%esp) - %ds
+ *     20(%esp) - %es
+ *     24(%esp) - orig_eax
+ *     28(%esp) - %eip
+ *     2C(%esp) - %cs
+ *     30(%esp) - %eflags
+ *     34(%esp) - %oldesp
+ *     38(%esp) - %oldss
+ *
+ * "current" is in register %ebx during any slow entries.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include "irq_vectors.h"
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#define nr_syscalls ((syscall_table_size)/4)
+
+EBX            = 0x00
+ECX            = 0x04
+EDX            = 0x08
+ESI            = 0x0C
+EDI            = 0x10
+EBP            = 0x14
+EAX            = 0x18
+DS             = 0x1C
+ES             = 0x20
+ORIG_EAX       = 0x24
+EIP            = 0x28
+CS             = 0x2C
+EFLAGS         = 0x30
+OLDESP         = 0x34
+OLDSS          = 0x38
+
+CF_MASK                = 0x00000001
+TF_MASK                = 0x00000100
+IF_MASK                = 0x00000200
+DF_MASK                = 0x00000400 
+NT_MASK                = 0x00004000
+VM_MASK                = 0x00020000
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending          /* 0 */
+#define evtchn_upcall_mask             1
+
+#define XEN_BLOCK_EVENTS(reg)  movb $1,evtchn_upcall_mask(reg)
+#define XEN_UNBLOCK_EVENTS(reg)        movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(%reg)
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop           movl HYPERVISOR_shared_info,%esi        ; \
+                               XEN_BLOCK_EVENTS(%esi)
+#else
+#define preempt_stop
+#define resume_kernel          restore_all
+#endif
+
+#define SAVE_ALL \
+       cld; \
+       pushl %es; \
+       pushl %ds; \
+       pushl %eax; \
+       pushl %ebp; \
+       pushl %edi; \
+       pushl %esi; \
+       pushl %edx; \
+       pushl %ecx; \
+       pushl %ebx; \
+       movl $(__KERNEL_DS), %edx; \
+       movl %edx, %ds; \
+       movl %edx, %es;
+       # XXXcl USER?
+
+#define RESTORE_INT_REGS \
+       popl %ebx;      \
+       popl %ecx;      \
+       popl %edx;      \
+       popl %esi;      \
+       popl %edi;      \
+       popl %ebp;      \
+       popl %eax
+
+#define RESTORE_REGS   \
+       RESTORE_INT_REGS; \
+1:     popl %ds;       \
+2:     popl %es;       \
+.section .fixup,"ax";  \
+3:     movl $0,(%esp); \
+       jmp 1b;         \
+4:     movl $0,(%esp); \
+       jmp 2b;         \
+.previous;             \
+.section __ex_table,"a";\
+       .align 4;       \
+       .long 1b,3b;    \
+       .long 2b,4b;    \
+.previous
+
+
+#define RESTORE_ALL    \
+       RESTORE_REGS    \
+       addl $4, %esp;  \
+1:     iret;           \
+.section .fixup,"ax";   \
+2:     movl $(__USER_DS), %edx; \
+       movl %edx, %ds; \
+       movl %edx, %es; \
+       pushl $11;      \
+       call do_exit;   \
+.previous;             \
+.section __ex_table,"a";\
+       .align 4;       \
+       .long 1b,2b;    \
+.previous
+
+
+
+ENTRY(lcall7)
+       pushfl                  # We get a different stack layout with call
+                               # gates, which has to be cleaned up later..
+       pushl %eax
+       SAVE_ALL
+       movl %esp, %ebp
+       pushl %ebp
+       pushl $0x7
+do_lcall:
+       movl EIP(%ebp), %eax    # due to call gates, this is eflags, not eip..
+       movl CS(%ebp), %edx     # this is eip..
+       movl EFLAGS(%ebp), %ecx # and this is cs..
+       movl %eax,EFLAGS(%ebp)  #
+       movl %edx,EIP(%ebp)     # Now we move them to their "normal" places
+       movl %ecx,CS(%ebp)      #
+       GET_THREAD_INFO_WITH_ESP(%ebp)  # GET_THREAD_INFO
+       movl TI_exec_domain(%ebp), %edx # Get the execution domain
+       call *EXEC_DOMAIN_handler(%edx) # Call the handler for the domain
+       addl $4, %esp
+       popl %eax
+       jmp resume_userspace
+
+ENTRY(lcall27)
+       pushfl                  # We get a different stack layout with call
+                               # gates, which has to be cleaned up later..
+       pushl %eax
+       SAVE_ALL
+       movl %esp, %ebp
+       pushl %ebp
+       pushl $0x27
+       jmp do_lcall
+
+
+ENTRY(ret_from_fork)
+       pushl %eax
+       call schedule_tail
+       GET_THREAD_INFO(%ebp)
+       popl %eax
+       jmp syscall_exit
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+       # userspace resumption stub bypassing syscall exit tracing
+       ALIGN
+ret_from_exception:
+       preempt_stop
+ret_from_intr:
+       GET_THREAD_INFO(%ebp)
+       movl EFLAGS(%esp), %eax         # mix EFLAGS and CS
+       movb CS(%esp), %al
+       testl $(VM_MASK | 2), %eax
+       jz resume_kernel                # returning to kernel or vm86-space
+ENTRY(resume_userspace)
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+                                       # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+ret_syscall_tests:
+       movl TI_flags(%ebp), %ecx
+       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
+                                       # int/exception return?
+       jne work_pending
+       jmp restore_all_enable_events
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+       movl HYPERVISOR_shared_info,%esi
+       cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
+       jnz restore_all_enable_events
+need_resched:
+       movl TI_flags(%ebp), %ecx       # need_resched set ?
+       testb $_TIF_NEED_RESCHED, %cl
+       jz restore_all_enable_events
+       testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
+       jz restore_all_enable_events
+       movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       call schedule
+       movl $0,TI_preempt_count(%ebp)
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+       jmp need_resched
+#endif
+
+/* SYSENTER_RETURN points to after the "sysenter" instruction in
+   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
+
+       # sysenter call handler stub
+ENTRY(sysenter_entry)
+       movl TSS_sysenter_esp0(%esp),%esp
+sysenter_past_esp:
+       sti
+       pushl $(__USER_DS)
+       pushl %ebp
+       pushfl
+       pushl $(__USER_CS)
+       pushl $SYSENTER_RETURN
+
+/*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
+       cmpl $__PAGE_OFFSET-3,%ebp
+       jae syscall_fault
+1:     movl (%ebp),%ebp
+.section __ex_table,"a"
+       .align 4
+       .long 1b,syscall_fault
+.previous
+
+       pushl %eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       cmpl $(nr_syscalls), %eax
+       jae syscall_badsys
+
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       jnz syscall_trace_entry
+       call *sys_call_table(,%eax,4)
+       movl %eax,EAX(%esp)
+       cli
+       movl TI_flags(%ebp), %ecx
+       testw $_TIF_ALLWORK_MASK, %cx
+       jne syscall_exit_work
+/* if something modifies registers it must also disable sysexit */
+       movl EIP(%esp), %edx
+       movl OLDESP(%esp), %ecx
+       sti
+       sysexit
+
+
+       # system call handler stub
+ENTRY(system_call)
+       pushl %eax                      # save orig_eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       cmpl $(nr_syscalls), %eax
+       jae syscall_badsys
+                                       # system call tracing in operation
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       jnz syscall_trace_entry
+syscall_call:
+       call *sys_call_table(,%eax,4)
+       movl %eax,EAX(%esp)             # store the return value
+syscall_exit:
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+                                       # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+       movl TI_flags(%ebp), %ecx
+       testw $_TIF_ALLWORK_MASK, %cx   # current->work
+       jne syscall_exit_work
+       jmp restore_all_enable_events
+
+       ALIGN
+restore_all:
+       RESTORE_ALL
+
+       # perform work that needs to be done immediately before resumption
+       ALIGN
+work_pending:
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       testb $_TIF_NEED_RESCHED, %cl
+       jz work_notifysig
+work_resched:
+       call schedule
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+                                       # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+       movl TI_flags(%ebp), %ecx
+       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
+                                       # than syscall tracing?
+       jz restore_all_enable_events
+       # XXXcl sti missing???
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       testb $_TIF_NEED_RESCHED, %cl
+       jnz work_resched
+
+work_notifysig:                                # deal with pending signals and
+                                       # notify-resume requests
+       testl $VM_MASK, EFLAGS(%esp)
+       movl %esp, %eax
+       jne work_notifysig_v86          # returning to kernel-space or
+                                       # vm86-space
+       xorl %edx, %edx
+       call do_notify_resume
+       movl HYPERVISOR_shared_info,%esi
+       jmp restore_all_enable_events
+
+       ALIGN
+work_notifysig_v86:
+       pushl %ecx
+       call save_v86_state
+       popl %ecx
+       movl %eax, %esp
+       xorl %edx, %edx
+       call do_notify_resume
+       movl HYPERVISOR_shared_info,%esi
+       jmp restore_all_enable_events
+
+       # perform syscall exit tracing
+       ALIGN
+syscall_trace_entry:
+       movl $-ENOSYS,EAX(%esp)
+       movl %esp, %eax
+       xorl %edx,%edx
+       call do_syscall_trace
+       movl ORIG_EAX(%esp), %eax
+       cmpl $(nr_syscalls), %eax
+       jnae syscall_call
+       jmp syscall_exit
+
+       # perform syscall exit tracing
+       ALIGN
+syscall_exit_work:
+       movl HYPERVISOR_shared_info,%esi
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
+       jz work_pending
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+                                       # could let do_syscall_trace() call
+                                       # schedule() instead
+       movl %esp, %eax
+       movl $1, %edx
+       call do_syscall_trace
+       jmp resume_userspace
+
+       ALIGN
+syscall_fault:
+       pushl %eax                      # save orig_eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       movl $-EFAULT,EAX(%esp)
+       jmp resume_userspace
+
+       ALIGN
+syscall_badsys:
+       movl $-ENOSYS,EAX(%esp)
+       jmp resume_userspace
+
+ENTRY(divide_error)
+       pushl $0                        # no error code
+       pushl $do_divide_error
+       ALIGN
+error_code:
+       pushl %ds
+       pushl %eax
+       xorl %eax, %eax
+       pushl %ebp
+       pushl %edi
+       pushl %esi
+       pushl %edx
+       decl %eax                       # eax = -1
+       pushl %ecx
+       pushl %ebx
+       cld
+       movl %es, %ecx
+       movl ORIG_EAX(%esp), %esi       # get the error code
+       movl ES(%esp), %edi             # get the function address
+       movl %eax, ORIG_EAX(%esp)
+       movl %ecx, ES(%esp)
+       movl %esp, %edx
+       pushl %esi                      # push the error code
+       pushl %edx                      # push the pt_regs pointer
+       movl $(__KERNEL_DS), %edx       # XXXcl USER?
+       movl %edx, %ds
+       movl %edx, %es
+       call *%edi
+       addl $8, %esp
+       jmp ret_from_exception
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until we've done all processing. HOWEVER, we must enable events before
+# popping the stack frame (can't be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so we'd
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+       pushl %eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       movl EIP(%esp),%eax
+       cmpl $scrit,%eax
+       jb   11f
+       cmpl $ecrit,%eax
+       jb   critical_region_fixup
+11:    push %esp
+       call evtchn_do_upcall
+       add  $4,%esp
+       movl HYPERVISOR_shared_info,%esi
+       movb CS(%esp),%cl
+       test $2,%cl                     # slow return to ring 2 or 3
+       jne  ret_syscall_tests
+restore_all_enable_events:  
+safesti:XEN_UNBLOCK_EVENTS(%esi)       # reenable event callbacks
+scrit: /**** START OF CRITICAL REGION ****/
+       testb $1,evtchn_upcall_pending(%esi)
+       jnz  14f                        # process more events if necessary...
+       RESTORE_ALL
+14:    XEN_BLOCK_EVENTS(%esi)
+       jmp  11b
+ecrit:  /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame. 
+critical_region_fixup:
+       addl $critical_fixup_table-scrit,%eax
+       movzbl (%eax),%eax              # %eax contains num bytes popped
+       mov  %esp,%esi
+       add  %eax,%esi                  # %esi points at end of src region
+       mov  %esp,%edi
+       add  $0x34,%edi                 # %edi points at end of dst region
+       mov  %eax,%ecx
+       shr  $2,%ecx                    # convert words to bytes
+       je   16f                        # skip loop if nothing to copy
+15:    subl $4,%esi                    # pre-decrementing copy loop
+       subl $4,%edi
+       movl (%esi),%eax
+       movl %eax,(%edi)
+       loop 15b
+16:    movl %edi,%esp                  # final %edi is top of merged stack
+       jmp  11b
+
+critical_fixup_table:
+       .byte 0x00,0x00,0x00            # testb $0xff,(%esi)
+       .byte 0x00,0x00                 # jnz  14f
+       .byte 0x00                      # pop  %ebx
+       .byte 0x04                      # pop  %ecx
+       .byte 0x08                      # pop  %edx
+       .byte 0x0c                      # pop  %esi
+       .byte 0x10                      # pop  %edi
+       .byte 0x14                      # pop  %ebp
+       .byte 0x18                      # pop  %eax
+       .byte 0x1c                      # pop  %ds
+       .byte 0x20                      # pop  %es
+       .byte 0x24,0x24,0x24            # add  $4,%esp
+       .byte 0x28                      # iret
+       .byte 0x00,0x00,0x00,0x00       # movb $1,1(%esi)
+       .byte 0x00,0x00                 # jmp  11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+       pushal
+       call install_safe_pf_handler
+       movl 32(%esp),%ebx
+1:     movl %ebx,%ds
+       movl 36(%esp),%ebx
+2:     movl %ebx,%es
+       movl 40(%esp),%ebx
+3:     movl %ebx,%fs
+       movl 44(%esp),%ebx
+4:     movl %ebx,%gs
+       call install_normal_pf_handler
+       popal
+       addl $16,%esp
+5:     iret
+.section .fixup,"ax";  \
+6:     xorl %ebx,%ebx; \
+       jmp 1b;         \
+7:     xorl %ebx,%ebx; \
+       jmp 2b;         \
+8:     xorl %ebx,%ebx; \
+       jmp 3b;         \
+9:     xorl %ebx,%ebx; \
+       jmp 4b;         \
+10:    pushl %ss;      \
+       popl %ds;       \
+       pushl %ss;      \
+       popl %es;       \
+       pushl $11;      \
+       call do_exit;   \
+.previous;             \
+.section __ex_table,"a";\
+       .align 4;       \
+       .long 1b,6b;    \
+       .long 2b,7b;    \
+       .long 3b,8b;    \
+       .long 4b,9b;    \
+       .long 5b,10b;   \
+.previous
+
+ENTRY(coprocessor_error)
+       pushl $0
+       pushl $do_coprocessor_error
+       jmp error_code
+
+ENTRY(simd_coprocessor_error)
+       pushl $0
+       pushl $do_simd_coprocessor_error
+       jmp error_code
+
+ENTRY(device_not_available)
+       pushl $-1                       # mark this as an int
+       SAVE_ALL
+       preempt_stop
+       call math_state_restore
+       jmp ret_from_exception
+
+/*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+ * We just load the right stack, and push the three (known) values
+ * by hand onto the new stack - while updating the return eip past
+ * the instruction that would have done it for sysenter.
+ */
+#define FIX_STACK(offset, ok, label)           \
+       cmpw $__KERNEL_CS,4(%esp);              \
+       jne ok;                                 \
+label:                                         \
+       movl TSS_sysenter_esp0+offset(%esp),%esp;       \
+       pushfl;                                 \
+       pushl $__KERNEL_CS;                     \
+       pushl $sysenter_past_esp
+
+ENTRY(debug)
+       cmpl $sysenter_entry,(%esp)
+       jne debug_stack_correct
+       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+debug_stack_correct:
+       pushl $0
+       pushl $do_debug
+       jmp error_code
+
+#if 0
+/*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+ * a debug fault, and the debug fault hasn't yet been able to
+ * clear up the stack. So we first check whether we got  an
+ * NMI on the sysenter entry path, but after that we need to
+ * check whether we got an NMI on the debug path where the debug
+ * fault happened on the sysenter path.
+ */
+ENTRY(nmi)
+       cmpl $sysenter_entry,(%esp)
+       je nmi_stack_fixup
+       pushl %eax
+       movl %esp,%eax
+       /* Do not access memory above the end of our stack page,
+        * it might not exist.
+        */
+       andl $(THREAD_SIZE-1),%eax
+       cmpl $(THREAD_SIZE-20),%eax
+       popl %eax
+       jae nmi_stack_correct
+       cmpl $sysenter_entry,12(%esp)
+       je nmi_debug_stack_check
+nmi_stack_correct:
+       pushl %eax
+       SAVE_ALL
+       movl %esp, %edx
+       pushl $0
+       pushl %edx
+       call do_nmi
+       addl $8, %esp
+       RESTORE_ALL
+
+nmi_stack_fixup:
+       FIX_STACK(12,nmi_stack_correct, 1)
+       jmp nmi_stack_correct
+nmi_debug_stack_check:
+       cmpw $__KERNEL_CS,16(%esp)
+       jne nmi_stack_correct
+       cmpl $debug - 1,(%esp)
+       jle nmi_stack_correct
+       cmpl $debug_esp_fix_insn,(%esp)
+       jle nmi_debug_stack_fixup
+nmi_debug_stack_fixup:
+       FIX_STACK(24,nmi_stack_correct, 1)
+       jmp nmi_stack_correct
+#endif
+
+ENTRY(int3)
+       pushl $0
+       pushl $do_int3
+       jmp error_code
+
+ENTRY(overflow)
+       pushl $0
+       pushl $do_overflow
+       jmp error_code
+
+ENTRY(bounds)
+       pushl $0
+       pushl $do_bounds
+       jmp error_code
+
+ENTRY(invalid_op)
+       pushl $0
+       pushl $do_invalid_op
+       jmp error_code
+
+ENTRY(coprocessor_segment_overrun)
+       pushl $0
+       pushl $do_coprocessor_segment_overrun
+       jmp error_code
+
+ENTRY(double_fault)
+       pushl $do_double_fault
+       jmp error_code
+
+ENTRY(invalid_TSS)
+       pushl $do_invalid_TSS
+       jmp error_code
+
+ENTRY(segment_not_present)
+       pushl $do_segment_not_present
+       jmp error_code
+
+ENTRY(stack_segment)
+       pushl $do_stack_segment
+       jmp error_code
+
+ENTRY(general_protection)
+       pushl $do_general_protection
+       jmp error_code
+
+ENTRY(alignment_check)
+       pushl $do_alignment_check
+       jmp error_code
+
+# This handler is special, because it gets an extra value on its stack,
+# which is the linear faulting address.
+#define PAGE_FAULT_STUB(_name1, _name2)                                          \
+ENTRY(_name1)                                                            \
+       pushl %ds                                                       ; \
+       pushl %eax                                                      ; \
+       xorl %eax,%eax                                                  ; \
+       pushl %ebp                                                      ; \
+       pushl %edi                                                      ; \
+       pushl %esi                                                      ; \
+       pushl %edx                                                      ; \
+       decl %eax                       /* eax = -1 */                  ; \
+       pushl %ecx                                                      ; \
+       pushl %ebx                                                      ; \
+       GET_THREAD_INFO(%ebp)                                           ; \
+       cld                                                             ; \
+       movl %es,%ecx                                                   ; \
+       movl ORIG_EAX(%esp), %esi       /* get the error code */        ; \
+       movl ES(%esp), %edi             /* get the faulting address */  ; \
+       movl %eax, ORIG_EAX(%esp)                                       ; \
+       movl %ecx, ES(%esp)                                             ; \
+       movl %esp,%edx                                                  ; \
+       pushl %edi                      /* push the faulting address */ ; \
+       pushl %esi                      /* push the error code */       ; \
+       pushl %edx                      /* push the pt_regs pointer */  ; \
+       movl $(__KERNEL_DS),%edx                                        ; \
+       movl %edx,%ds                                                   ; \
+       movl %edx,%es                                                   ; \
+       call _name2                                                     ; \
+       addl $12,%esp                                                   ; \
+       jmp ret_from_exception                                          ;
+PAGE_FAULT_STUB(page_fault, do_page_fault)
+PAGE_FAULT_STUB(safe_page_fault, do_safe_page_fault)
+
+#ifdef CONFIG_X86_MCE
+ENTRY(machine_check)
+       pushl $0
+       pushl machine_check_vector
+       jmp error_code
+#endif
+
+ENTRY(fixup_4gb_segment)
+       pushl $do_fixup_4gb_segment
+       jmp error_code
+
+.data
+ENTRY(sys_call_table)
+       .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
+       .long sys_exit
+       .long sys_fork
+       .long sys_read
+       .long sys_write
+       .long sys_open          /* 5 */
+       .long sys_close
+       .long sys_waitpid
+       .long sys_creat
+       .long sys_link
+       .long sys_unlink        /* 10 */
+       .long sys_execve
+       .long sys_chdir
+       .long sys_time
+       .long sys_mknod
+       .long sys_chmod         /* 15 */
+       .long sys_lchown16
+       .long sys_ni_syscall    /* old break syscall holder */
+       .long sys_stat
+       .long sys_lseek
+       .long sys_getpid        /* 20 */
+       .long sys_mount
+       .long sys_oldumount
+       .long sys_setuid16
+       .long sys_getuid16
+       .long sys_stime         /* 25 */
+       .long sys_ptrace
+       .long sys_alarm
+       .long sys_fstat
+       .long sys_pause
+       .long sys_utime         /* 30 */
+       .long sys_ni_syscall    /* old stty syscall holder */
+       .long sys_ni_syscall    /* old gtty syscall holder */
+       .long sys_access
+       .long sys_nice
+       .long sys_ni_syscall    /* 35 - old ftime syscall holder */
+       .long sys_sync
+       .long sys_kill
+       .long sys_rename
+       .long sys_mkdir
+       .long sys_rmdir         /* 40 */
+       .long sys_dup
+       .long sys_pipe
+       .long sys_times
+       .long sys_ni_syscall    /* old prof syscall holder */
+       .long sys_brk           /* 45 */
+       .long sys_setgid16
+       .long sys_getgid16
+       .long sys_signal
+       .long sys_geteuid16
+       .long sys_getegid16     /* 50 */
+       .long sys_acct
+       .long sys_umount        /* recycled never used phys() */
+       .long sys_ni_syscall    /* old lock syscall holder */
+       .long sys_ioctl
+       .long sys_fcntl         /* 55 */
+       .long sys_ni_syscall    /* old mpx syscall holder */
+       .long sys_setpgid
+       .long sys_ni_syscall    /* old ulimit syscall holder */
+       .long sys_olduname
+       .long sys_umask         /* 60 */
+       .long sys_chroot
+       .long sys_ustat
+       .long sys_dup2
+       .long sys_getppid
+       .long sys_getpgrp       /* 65 */
+       .long sys_setsid
+       .long sys_sigaction
+       .long sys_sgetmask
+       .long sys_ssetmask
+       .long sys_setreuid16    /* 70 */
+       .long sys_setregid16
+       .long sys_sigsuspend
+       .long sys_sigpending
+       .long sys_sethostname
+       .long sys_setrlimit     /* 75 */
+       .long sys_old_getrlimit
+       .long sys_getrusage
+       .long sys_gettimeofday
+       .long sys_settimeofday
+       .long sys_getgroups16   /* 80 */
+       .long sys_setgroups16
+       .long old_select
+       .long sys_symlink
+       .long sys_lstat
+       .long sys_readlink      /* 85 */
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+       .long old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+       .long sys_ftruncate
+       .long sys_fchmod
+       .long sys_fchown16      /* 95 */
+       .long sys_getpriority
+       .long sys_setpriority
+       .long sys_ni_syscall    /* old profil syscall holder */
+       .long sys_statfs
+       .long sys_fstatfs       /* 100 */
+       .long sys_ioperm
+       .long sys_socketcall
+       .long sys_syslog
+       .long sys_setitimer
+       .long sys_getitimer     /* 105 */
+       .long sys_newstat
+       .long sys_newlstat
+       .long sys_newfstat
+       .long sys_uname
+       .long sys_iopl          /* 110 */
+       .long sys_vhangup
+       .long sys_ni_syscall    /* old "idle" system call */
+       .long sys_ni_syscall    /* disable sys_vm86old */
+       .long sys_wait4
+       .long sys_swapoff       /* 115 */
+       .long sys_sysinfo
+       .long sys_ipc
+       .long sys_fsync
+       .long sys_sigreturn
+       .long sys_clone         /* 120 */
+       .long sys_setdomainname
+       .long sys_newuname
+       .long sys_modify_ldt
+       .long sys_adjtimex
+       .long sys_mprotect      /* 125 */
+       .long sys_sigprocmask
+       .long sys_ni_syscall    /* old "create_module" */ 
+       .long sys_init_module
+       .long sys_delete_module
+       .long sys_ni_syscall    /* 130: old "get_kernel_syms" */
+       .long sys_quotactl
+       .long sys_getpgid
+       .long sys_fchdir
+       .long sys_bdflush
+       .long sys_sysfs         /* 135 */
+       .long sys_personality
+       .long sys_ni_syscall    /* reserved for afs_syscall */
+       .long sys_setfsuid16
+       .long sys_setfsgid16
+       .long sys_llseek        /* 140 */
+       .long sys_getdents
+       .long sys_select
+       .long sys_flock
+       .long sys_msync
+       .long sys_readv         /* 145 */
+       .long sys_writev
+       .long sys_getsid
+       .long sys_fdatasync
+       .long sys_sysctl
+       .long sys_mlock         /* 150 */
+       .long sys_munlock
+       .long sys_mlockall
+       .long sys_munlockall
+       .long sys_sched_setparam
+       .long sys_sched_getparam   /* 155 */
+       .long sys_sched_setscheduler
+       .long sys_sched_getscheduler
+       .long sys_sched_yield
+       .long sys_sched_get_priority_max
+       .long sys_sched_get_priority_min  /* 160 */
+       .long sys_sched_rr_get_interval
+       .long sys_nanosleep
+       .long sys_mremap
+       .long sys_setresuid16
+       .long sys_getresuid16   /* 165 */
+       .long sys_vm86
+       .long sys_ni_syscall    /* Old sys_query_module */
+       .long sys_poll
+       .long sys_nfsservctl
+       .long sys_setresgid16   /* 170 */
+       .long sys_getresgid16
+       .long sys_prctl
+       .long sys_rt_sigreturn
+       .long sys_rt_sigaction
+       .long sys_rt_sigprocmask        /* 175 */
+       .long sys_rt_sigpending
+       .long sys_rt_sigtimedwait
+       .long sys_rt_sigqueueinfo
+       .long sys_rt_sigsuspend
+       .long sys_pread64       /* 180 */
+       .long sys_pwrite64
+       .long sys_chown16
+       .long sys_getcwd
+       .long sys_capget
+       .long sys_capset        /* 185 */
+       .long sys_sigaltstack
+       .long sys_sendfile
+       .long sys_ni_syscall    /* reserved for streams1 */
+       .long sys_ni_syscall    /* reserved for streams2 */
+       .long sys_vfork         /* 190 */
+       .long sys_getrlimit
+       .long sys_mmap2
+       .long sys_truncate64
+       .long sys_ftruncate64
+       .long sys_stat64        /* 195 */
+       .long sys_lstat64
+       .long sys_fstat64
+       .long sys_lchown
+       .long sys_getuid
+       .long sys_getgid        /* 200 */
+       .long sys_geteuid
+       .long sys_getegid
+       .long sys_setreuid
+       .long sys_setregid
+       .long sys_getgroups     /* 205 */
+       .long sys_setgroups
+       .long sys_fchown
+       .long sys_setresuid
+       .long sys_getresuid
+       .long sys_setresgid     /* 210 */
+       .long sys_getresgid
+       .long sys_chown
+       .long sys_setuid
+       .long sys_setgid
+       .long sys_setfsuid      /* 215 */
+       .long sys_setfsgid
+       .long sys_pivot_root
+       .long sys_mincore
+       .long sys_madvise
+       .long sys_getdents64    /* 220 */
+       .long sys_fcntl64
+       .long sys_ni_syscall    /* reserved for TUX */
+       .long sys_ni_syscall
+       .long sys_gettid
+       .long sys_readahead     /* 225 */
+       .long sys_setxattr
+       .long sys_lsetxattr
+       .long sys_fsetxattr
+       .long sys_getxattr
+       .long sys_lgetxattr     /* 230 */
+       .long sys_fgetxattr
+       .long sys_listxattr
+       .long sys_llistxattr
+       .long sys_flistxattr
+       .long sys_removexattr   /* 235 */
+       .long sys_lremovexattr
+       .long sys_fremovexattr
+       .long sys_tkill
+       .long sys_sendfile64
+       .long sys_futex         /* 240 */
+       .long sys_sched_setaffinity
+       .long sys_sched_getaffinity
+       .long sys_set_thread_area
+       .long sys_get_thread_area
+       .long sys_io_setup      /* 245 */
+       .long sys_io_destroy
+       .long sys_io_getevents
+       .long sys_io_submit
+       .long sys_io_cancel
+       .long sys_fadvise64     /* 250 */
+       .long sys_ni_syscall
+       .long sys_exit_group
+       .long sys_lookup_dcookie
+       .long sys_epoll_create
+       .long sys_epoll_ctl     /* 255 */
+       .long sys_epoll_wait
+       .long sys_remap_file_pages
+       .long sys_set_tid_address
+       .long sys_timer_create
+       .long sys_timer_settime         /* 260 */
+       .long sys_timer_gettime
+       .long sys_timer_getoverrun
+       .long sys_timer_delete
+       .long sys_clock_settime
+       .long sys_clock_gettime         /* 265 */
+       .long sys_clock_getres
+       .long sys_clock_nanosleep
+       .long sys_statfs64
+       .long sys_fstatfs64     
+       .long sys_tgkill        /* 270 */
+       .long sys_utimes
+       .long sys_fadvise64_64
+       .long sys_ni_syscall    /* sys_vserver */
+       .long sys_mbind
+       .long sys_get_mempolicy
+       .long sys_set_mempolicy
+       .long sys_mq_open
+       .long sys_mq_unlink
+       .long sys_mq_timedsend
+       .long sys_mq_timedreceive       /* 280 */
+       .long sys_mq_notify
+       .long sys_mq_getsetattr
+       .long sys_ni_syscall            /* reserved for kexec */
+
+syscall_table_size=(.-sys_call_table)
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/head.S b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/head.S
new file mode 100644 (file)
index 0000000..b41a96d
--- /dev/null
@@ -0,0 +1,171 @@
+
+#include <linux/config.h>
+
+.section __xen_guest
+       .ascii  "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=2.0,VIRT_BASE=0xC0000000"
+       .ascii  ",LOADER=generic"
+#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
+       .ascii  ",PT_MODE_WRITABLE"
+#endif
+       .byte   0
+
+.text
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#include <asm/asm_offsets.h>
+#include <asm/hypervisor-ifs/arch-x86_32.h>
+
+/*
+ * References to members of the new_cpu_data structure.
+ */
+
+#define X86            new_cpu_data+CPUINFO_x86
+#define X86_VENDOR     new_cpu_data+CPUINFO_x86_vendor
+#define X86_MODEL      new_cpu_data+CPUINFO_x86_model
+#define X86_MASK       new_cpu_data+CPUINFO_x86_mask
+#define X86_HARD_MATH  new_cpu_data+CPUINFO_hard_math
+#define X86_CPUID      new_cpu_data+CPUINFO_cpuid_level
+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
+#define X86_VENDOR_ID  new_cpu_data+CPUINFO_x86_vendor_id
+
+ENTRY(startup_32)
+       cld
+
+       /* Set up the stack pointer */
+       lss stack_start,%esp
+
+       /* Copy the necessary stuff from xen_start_info structure. */
+       mov  $xen_start_info_union,%edi
+       mov  $128,%ecx
+       rep movsl
+
+checkCPUtype:
+
+       /* get vendor info */
+       xorl %eax,%eax                  # call CPUID with 0 -> return vendor ID
+       cpuid
+       movl %eax,X86_CPUID             # save CPUID level
+       movl %ebx,X86_VENDOR_ID         # lo 4 chars
+       movl %edx,X86_VENDOR_ID+4       # next 4 chars
+       movl %ecx,X86_VENDOR_ID+8       # last 4 chars
+
+       movl $1,%eax            # Use the CPUID instruction to get CPU type
+       cpuid
+       movb %al,%cl            # save reg for future use
+       andb $0x0f,%ah          # mask processor family
+       movb %ah,X86
+       andb $0xf0,%al          # mask model
+       shrb $4,%al
+       movb %al,X86_MODEL
+       andb $0x0f,%cl          # mask mask revision
+       movb %cl,X86_MASK
+       movl %edx,X86_CAPABILITY
+
+       xorl %eax,%eax          # Clear FS/GS and LDT
+       movl %eax,%fs
+       movl %eax,%gs
+       cld             # gcc2 wants the direction flag cleared at all times
+
+       call start_kernel
+L6:
+       jmp L6                  # main should never return here, but
+                               # just in case, we know what happens.
+
+ENTRY(lgdt_finish)
+       movl $(__KERNEL_DS),%eax        # reload all the segment registers
+       movw %ax,%ss                    # after changing gdt.
+
+       movl $(__USER_DS),%eax          # DS/ES contains default USER segment
+       movw %ax,%ds
+       movw %ax,%es
+
+       popl %eax                       # reload CS by intersegment return
+       pushl $(__KERNEL_CS)
+       pushl %eax
+       lret
+
+ENTRY(stack_start)
+       .long init_thread_union+THREAD_SIZE
+       .long __BOOT_DS
+
+# XXXcl
+.globl idt_descr
+.globl cpu_gdt_descr
+
+       ALIGN
+       .word 0                         # 32-bit align idt_desc.address
+idt_descr:
+       .word IDT_ENTRIES*8-1           # idt contains 256 entries
+       .long idt_table
+# XXXcl
+
+# boot GDT descriptor (later on used by CPU#0):
+       .word 0                         # 32 bit align gdt_desc.address
+cpu_gdt_descr:
+       .word GDT_SIZE
+       .long cpu_gdt_table
+
+       .fill NR_CPUS-1,8,0             # space for the other GDT descriptors
+
+.org 0x1000
+ENTRY(empty_zero_page)
+
+.org 0x2000
+ENTRY(swapper_pg_dir)
+
+.org 0x3000
+ENTRY(cpu_gdt_table)
+       .quad 0x0000000000000000        /* NULL descriptor */
+       .quad 0x0000000000000000        /* 0x0b reserved */
+       .quad 0x0000000000000000        /* 0x13 reserved */
+       .quad 0x0000000000000000        /* 0x1b reserved */
+       .quad 0x0000000000000000        /* 0x20 unused */
+       .quad 0x0000000000000000        /* 0x28 unused */
+       .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
+       .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
+       .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
+       .quad 0x0000000000000000        /* 0x4b reserved */
+       .quad 0x0000000000000000        /* 0x53 reserved */
+       .quad 0x0000000000000000        /* 0x5b reserved */
+
+       .quad 0x00cfbb000000c3ff        /* 0x60 kernel 4GB code at 0x00000000 */
+       .quad 0x00cfb3000000c3ff        /* 0x68 kernel 4GB data at 0x00000000 */
+       .quad 0x00cffb000000c3ff        /* 0x73 user 4GB code at 0x00000000 */
+       .quad 0x00cff3000000c3ff        /* 0x7b user 4GB data at 0x00000000 */
+
+       .quad 0x0000000000000000        /* 0x80 TSS descriptor */
+       .quad 0x0000000000000000        /* 0x88 LDT descriptor */
+
+       /* Segments used for calling PnP BIOS */
+       .quad 0x0000000000000000        /* 0x90 32-bit code */
+       .quad 0x0000000000000000        /* 0x98 16-bit code */
+       .quad 0x0000000000000000        /* 0xa0 16-bit data */
+       .quad 0x0000000000000000        /* 0xa8 16-bit data */
+       .quad 0x0000000000000000        /* 0xb0 16-bit data */
+       /*
+        * The APM segments have byte granularity and their bases
+        * and limits are set at run time.
+        */
+       .quad 0x0000000000000000        /* 0xb8 APM CS    code */
+       .quad 0x0000000000000000        /* 0xc0 APM CS 16 code (16 bit) */
+       .quad 0x0000000000000000        /* 0xc8 APM DS    data */
+
+       .quad 0x0000000000000000        /* 0xd0 - unused */
+       .quad 0x0000000000000000        /* 0xd8 - unused */
+       .quad 0x0000000000000000        /* 0xe0 - unused */
+       .quad 0x0000000000000000        /* 0xe8 - unused */
+       .quad 0x0000000000000000        /* 0xf0 - unused */
+       .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
+       .fill GDT_ENTRIES-32,8,0
+
+.org 0x4000
+ENTRY(default_ldt)
+
+.org 0x5000
+/*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
new file mode 100644 (file)
index 0000000..83ef382
--- /dev/null
@@ -0,0 +1,201 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/mca.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/apm_bios.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/mmx.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/nmi.h>
+#include <asm/ist.h>
+#include <asm/kdebug.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern spinlock_t rtc_lock;
+
+/* This is definitely a GPL-only symbol */
+EXPORT_SYMBOL_GPL(cpu_gdt_table);
+
+#if defined(CONFIG_APM_MODULE)
+extern void machine_real_restart(unsigned char *, int);
+EXPORT_SYMBOL(machine_real_restart);
+extern void default_idle(void);
+EXPORT_SYMBOL(default_idle);
+#endif
+
+#ifdef CONFIG_SMP
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+extern unsigned long cpu_khz;
+extern unsigned long get_cmos_time(void);
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+EXPORT_SYMBOL(MCA_bus);
+#ifdef CONFIG_DISCONTIGMEM
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(physnode_map);
+#endif
+#ifdef CONFIG_X86_NUMAQ
+EXPORT_SYMBOL(xquad_portio);
+#endif
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(dump_extended_fpu);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(pm_idle);
+#ifdef CONFIG_APM
+EXPORT_SYMBOL(pm_power_off);
+#endif
+EXPORT_SYMBOL(get_cmos_time);
+EXPORT_SYMBOL(cpu_khz);
+EXPORT_SYMBOL(apm_info);
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_failed_trylock);
+EXPORT_SYMBOL(__up_wakeup);
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_generic);
+/* Delay loops */
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_from_user_ll);
+EXPORT_SYMBOL(__copy_to_user_ll);
+EXPORT_SYMBOL(strnlen_user);
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+EXPORT_SYMBOL(dma_free_coherent);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pcibios_penalize_isa_irq);
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+
+#ifdef CONFIG_PCI_BIOS
+EXPORT_SYMBOL(pcibios_set_irq_routing);
+EXPORT_SYMBOL(pcibios_get_irq_routing_table);
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+EXPORT_SYMBOL(_mmx_memcpy);
+EXPORT_SYMBOL(mmx_clear_page);
+EXPORT_SYMBOL(mmx_copy_page);
+#endif
+
+#ifdef CONFIG_X86_HT
+EXPORT_SYMBOL(smp_num_siblings);
+EXPORT_SYMBOL(cpu_sibling_map);
+#endif
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_callout_map);
+EXPORT_SYMBOL(__write_lock_failed);
+EXPORT_SYMBOL(__read_lock_failed);
+
+/* Global SMP stuff */
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(smp_call_function);
+
+/* TLB flushing */
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL_GPL(flush_tlb_all);
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+#endif
+
+#ifdef CONFIG_MCA
+EXPORT_SYMBOL(machine_id);
+#endif
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(get_wchan);
+
+EXPORT_SYMBOL(rtc_lock);
+
+EXPORT_SYMBOL_GPL(set_nmi_callback);
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
+
+#undef memcmp
+extern int memcmp(const void *,const void *,__kernel_size_t);
+EXPORT_SYMBOL(memcmp);
+
+EXPORT_SYMBOL(register_die_notifier);
+#ifdef CONFIG_HAVE_DEC_LOCK
+EXPORT_SYMBOL(atomic_dec_and_lock);
+#endif
+
+EXPORT_SYMBOL(__PAGE_KERNEL);
+
+#ifdef CONFIG_HIGHMEM
+EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_to_page);
+#endif
+
+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
+EXPORT_SYMBOL(ist_info);
+#endif
+
+EXPORT_SYMBOL(csum_partial);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ioport.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ioport.c
new file mode 100644 (file)
index 0000000..bb3b0f3
--- /dev/null
@@ -0,0 +1,49 @@
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <asm/hypervisor-ifs/dom0_ops.h>
+
+asmlinkage long sys_iopl(unsigned int new_io_pl)
+{
+       unsigned int old_io_pl = current->thread.io_pl;
+       dom0_op_t op;
+
+       if (new_io_pl > 3)
+               return -EINVAL;
+
+       /* Need "raw I/O" privileges for direct port access. */
+       if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
+               return -EPERM;
+
+       if (!(xen_start_info.flags & SIF_PRIVILEGED))
+               return -EPERM;
+
+       /* Maintain OS privileges even if user attempts to relinquish them. */
+       if (new_io_pl == 0)
+               new_io_pl = 1;
+
+       /* Change our version of the privilege levels. */
+       current->thread.io_pl = new_io_pl;
+
+       /* Force the change at ring 0. */
+       op.cmd           = DOM0_IOPL;
+       op.u.iopl.domain = DOMID_SELF;
+       op.u.iopl.iopl   = new_io_pl;
+       HYPERVISOR_dom0_op(&op);
+
+       return 0;
+}
+
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+       printk(KERN_INFO "ioperm not fully supported - %s\n",
+               turn_on ? "set iopl to 3" : "ignore resource release");
+       return turn_on ? sys_iopl(3) : 0;
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/irq.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/irq.c
new file mode 100644 (file)
index 0000000..c23ea80
--- /dev/null
@@ -0,0 +1,1161 @@
+/*
+ *     linux/arch/i386/kernel/irq.c
+ *
+ *     Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
+ *
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+#include <asm/desc.h>
+#include <asm/irq.h>
+
+/*
+ * Linux has a controller-independent x86 interrupt architecture.
+ * every controller has a 'controller-template', that is used
+ * by the main code to do the right thing. Each driver-visible
+ * interrupt source is transparently wired to the apropriate
+ * controller. Thus drivers need not be aware of the
+ * interrupt-controller.
+ *
+ * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
+ * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
+ * (IO-APICs assumed to be messaging to Pentium local-APICs)
+ *
+ * the code is designed to be easily extended with new/different
+ * interrupt controllers, without having to do assembly magic.
+ */
+
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+       [0 ... NR_IRQS-1] = {
+               .handler = &no_irq_type,
+               .lock = SPIN_LOCK_UNLOCKED
+       }
+};
+
+static void register_irq_proc (unsigned int irq);
+
+/*
+ * per-CPU IRQ handling stacks
+ */
+#ifdef CONFIG_4KSTACKS
+union irq_ctx *hardirq_ctx[NR_CPUS];
+union irq_ctx *softirq_ctx[NR_CPUS];
+#endif
+
+/*
+ * Special irq handlers.
+ */
+
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{ return IRQ_NONE; }
+
+/*
+ * Generic no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves, it doesn't deserve
+ * a generic callback i think.
+ */
+#ifdef CONFIG_X86
+       printk("unexpected IRQ trap at vector %02x\n", irq);
+#ifdef CONFIG_X86_LOCAL_APIC
+       /*
+        * Currently unexpected vectors happen only on SMP and APIC.
+        * We _must_ ack these because every local APIC has only N
+        * irq slots per priority level, and a 'hanging, unacked' IRQ
+        * holds up an irq slot - in excessive cases (when multiple
+        * unexpected vectors occur) that might lock up the APIC
+        * completely.
+        */
+       ack_APIC_irq();
+#endif
+#endif
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none  disable_none
+#define end_none       enable_none
+
+struct hw_interrupt_type no_irq_type = {
+       "none",
+       startup_none,
+       shutdown_none,
+       enable_none,
+       disable_none,
+       ack_none,
+       end_none
+};
+
+atomic_t irq_err_count;
+#if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
+atomic_t irq_mis_count;
+#endif
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+       int i = *(loff_t *) v, j;
+       struct irqaction * action;
+       unsigned long flags;
+
+       if (i == 0) {
+               seq_printf(p, "           ");
+               for (j=0; j<NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "CPU%d       ",j);
+               seq_putc(p, '\n');
+       }
+
+       if (i < NR_IRQS) {
+               spin_lock_irqsave(&irq_desc[i].lock, flags);
+               action = irq_desc[i].action;
+               if (!action) 
+                       goto skip;
+               seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+               seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+               for (j = 0; j < NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+               seq_printf(p, " %14s", irq_desc[i].handler->typename);
+               seq_printf(p, "  %s", action->name);
+
+               for (action=action->next; action; action = action->next)
+                       seq_printf(p, ", %s", action->name);
+
+               seq_putc(p, '\n');
+skip:
+               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+       } else if (i == NR_IRQS) {
+               seq_printf(p, "NMI: ");
+               for (j = 0; j < NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "%10u ", nmi_count(j));
+               seq_putc(p, '\n');
+#ifdef CONFIG_X86_LOCAL_APIC
+               seq_printf(p, "LOC: ");
+               for (j = 0; j < NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
+               seq_putc(p, '\n');
+#endif
+               seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
+               seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+       }
+       return 0;
+}
+
+
+
+
+#ifdef CONFIG_SMP
+inline void synchronize_irq(unsigned int irq)
+{
+       while (irq_desc[irq].status & IRQ_INPROGRESS)
+               cpu_relax();
+}
+#endif
+
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+asmlinkage int handle_IRQ_event(unsigned int irq,
+               struct pt_regs *regs, struct irqaction *action)
+{
+       int status = 1; /* Force the "do bottom halves" bit */
+       int ret, retval = 0;
+
+       if (!(action->flags & SA_INTERRUPT))
+               local_irq_enable();
+
+       do {
+               ret = action->handler(irq, action->dev_id, regs);
+               if (ret == IRQ_HANDLED)
+                       status |= action->flags;
+               retval |= ret;
+               action = action->next;
+       } while (action);
+       if (status & SA_SAMPLE_RANDOM)
+               add_interrupt_randomness(irq);
+       local_irq_disable();
+       return retval;
+}
+
+static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+       struct irqaction *action;
+
+       if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+               printk(KERN_ERR "irq event %d: bogus return value %x\n",
+                               irq, action_ret);
+       } else {
+               printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+       }
+       dump_stack();
+       printk(KERN_ERR "handlers:\n");
+       action = desc->action;
+       do {
+               printk(KERN_ERR "[<%p>]", action->handler);
+               print_symbol(" (%s)",
+                       (unsigned long)action->handler);
+               printk("\n");
+               action = action->next;
+       } while (action);
+}
+
+static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+       static int count = 100;
+
+       if (count) {
+               count--;
+               __report_bad_irq(irq, desc, action_ret);
+       }
+}
+
+static int noirqdebug;
+
+static int __init noirqdebug_setup(char *str)
+{
+       noirqdebug = 1;
+       printk("IRQ lockup detection disabled\n");
+       return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+/*
+ * If 99,900 of the previous 100,000 interrupts have not been handled then
+ * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
+ * turn the IRQ off.
+ *
+ * (The other 100-of-100,000 interrupts may have been a correctly-functioning
+ *  device sharing an IRQ with the failing one)
+ *
+ * Called under desc->lock
+ */
+static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+       if (action_ret != IRQ_HANDLED) {
+               desc->irqs_unhandled++;
+               if (action_ret != IRQ_NONE)
+                       report_bad_irq(irq, desc, action_ret);
+       }
+
+       desc->irq_count++;
+       if (desc->irq_count < 100000)
+               return;
+
+       desc->irq_count = 0;
+       if (desc->irqs_unhandled > 99900) {
+               /*
+                * The interrupt is stuck
+                */
+               __report_bad_irq(irq, desc, action_ret);
+               /*
+                * Now kill the IRQ
+                */
+               printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       desc->irqs_unhandled = 0;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock. 
+ */
+/**
+ *     disable_irq_nosync - disable an irq without waiting
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Disables and Enables are
+ *     nested.
+ *     Unlike disable_irq(), this function does not ensure existing
+ *     instances of the IRQ handler have completed before returning.
+ *
+ *     This function may be called from IRQ context.
+ */
+inline void disable_irq_nosync(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       if (!desc->depth++) {
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ *     disable_irq - disable an irq and wait for completion
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Enables and Disables are
+ *     nested.
+ *     This function waits for any pending IRQ handlers for this interrupt
+ *     to complete before returning. If you use this function while
+ *     holding a resource the IRQ handler may need you will deadlock.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+void disable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       disable_irq_nosync(irq);
+       if (desc->action)
+               synchronize_irq(irq);
+}
+
+/**
+ *     enable_irq - enable handling of an irq
+ *     @irq: Interrupt to enable
+ *
+ *     Undoes the effect of one call to disable_irq().  If this
+ *     matches the last disable, processing of interrupts on this
+ *     IRQ line is re-enabled.
+ *
+ *     This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       switch (desc->depth) {
+       case 1: {
+               unsigned int status = desc->status & ~IRQ_DISABLED;
+               desc->status = status;
+               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+                       desc->status = status | IRQ_REPLAY;
+                       hw_resend_irq(desc->handler,irq);
+               }
+               desc->handler->enable(irq);
+               /* fall-through */
+       }
+       default:
+               desc->depth--;
+               break;
+       case 0:
+               printk("enable_irq(%u) unbalanced from %p\n", irq,
+                      __builtin_return_address(0));
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
+{      
+       /* 
+        * We ack quickly, we don't want the irq controller
+        * thinking we're snobs just because some other CPU has
+        * disabled global interrupts (we have already done the
+        * INT_ACK cycles, it's too late to try to pretend to the
+        * controller that we aren't taking the interrupt).
+        *
+        * 0 return value means that this irq is already being
+        * handled by some other CPU. (or is disabled)
+        */
+       irq_desc_t *desc = irq_desc + irq;
+       struct irqaction * action;
+       unsigned int status;
+
+       irq_enter();
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+       /* Debugging check for stack overflow: is there less than 1KB free? */
+       {
+               long esp;
+
+               __asm__ __volatile__("andl %%esp,%0" :
+                                       "=r" (esp) : "0" (THREAD_SIZE - 1));
+               if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
+                       printk("do_IRQ: stack overflow: %ld\n",
+                               esp - sizeof(struct thread_info));
+                       dump_stack();
+               }
+       }
+#endif
+       kstat_this_cpu.irqs[irq]++;
+       spin_lock(&desc->lock);
+       desc->handler->ack(irq);
+       /*
+          REPLAY is when Linux resends an IRQ that was dropped earlier
+          WAITING is used by probe to mark irqs that are being tested
+          */
+       status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+       status |= IRQ_PENDING; /* we _want_ to handle it */
+
+       /*
+        * If the IRQ is disabled for whatever reason, we cannot
+        * use the action we have.
+        */
+       action = NULL;
+       if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
+               action = desc->action;
+               status &= ~IRQ_PENDING; /* we commit to handling */
+               status |= IRQ_INPROGRESS; /* we are handling it */
+       }
+       desc->status = status;
+
+       /*
+        * If there is no IRQ handler or it was disabled, exit early.
+          Since we set PENDING, if another processor is handling
+          a different instance of this same irq, the other processor
+          will take care of it.
+        */
+       if (unlikely(!action))
+               goto out;
+
+       /*
+        * Edge triggered interrupts need to remember
+        * pending events.
+        * This applies to any hw interrupts that allow a second
+        * instance of the same irq to arrive while we are in do_IRQ
+        * or in the handler. But the code here only handles the _second_
+        * instance of the irq, not the third or fourth. So it is mostly
+        * useful for irq hardware that does not mask cleanly in an
+        * SMP environment.
+        */
+#ifdef CONFIG_4KSTACKS
+
+       for (;;) {
+               irqreturn_t action_ret;
+               u32 *isp;
+               union irq_ctx * curctx;
+               union irq_ctx * irqctx;
+
+               curctx = (union irq_ctx *) current_thread_info();
+               irqctx = hardirq_ctx[smp_processor_id()];
+
+               spin_unlock(&desc->lock);
+
+               /*
+                * this is where we switch to the IRQ stack. However, if we are already using
+                * the IRQ stack (because we interrupted a hardirq handler) we can't do that
+                * and just have to keep using the current stack (which is the irq stack already
+                * after all)
+                */
+
+               if (curctx == irqctx)
+                       action_ret = handle_IRQ_event(irq, regs, action);
+               else {
+                       /* build the stack frame on the IRQ stack */
+                       isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+                       irqctx->tinfo.task = curctx->tinfo.task;
+                       irqctx->tinfo.previous_esp = current_stack_pointer();
+
+                       *--isp = (u32) action;
+                       *--isp = (u32) regs;
+                       *--isp = (u32) irq;
+
+                       asm volatile(
+                               "       xchgl   %%ebx,%%esp     \n"
+                               "       call    handle_IRQ_event \n"
+                               "       xchgl   %%ebx,%%esp     \n"
+                               : "=a"(action_ret)
+                               : "b"(isp)
+                               : "memory", "cc", "edx", "ecx"
+                       );
+
+
+               }
+               spin_lock(&desc->lock);
+               if (!noirqdebug)
+                       note_interrupt(irq, desc, action_ret);
+               if (curctx != irqctx)
+                       irqctx->tinfo.task = NULL;
+               if (likely(!(desc->status & IRQ_PENDING)))
+                       break;
+               desc->status &= ~IRQ_PENDING;
+       }
+
+#else
+
+       for (;;) {
+               irqreturn_t action_ret;
+
+               spin_unlock(&desc->lock);
+
+               action_ret = handle_IRQ_event(irq, regs, action);
+
+               spin_lock(&desc->lock);
+               if (!noirqdebug)
+                       note_interrupt(irq, desc, action_ret);
+               if (likely(!(desc->status & IRQ_PENDING)))
+                       break;
+               desc->status &= ~IRQ_PENDING;
+       }
+#endif
+       desc->status &= ~IRQ_INPROGRESS;
+
+out:
+       /*
+        * The ->end() handler has to deal with interrupts which got
+        * disabled while the handler was running.
+        */
+       desc->handler->end(irq);
+       spin_unlock(&desc->lock);
+
+       irq_exit();
+
+       return 1;
+}
+
+int can_request_irq(unsigned int irq, unsigned long irqflags)
+{
+       struct irqaction *action;
+
+       if (irq >= NR_IRQS)
+               return 0;
+       action = irq_desc[irq].action;
+       if (action) {
+               if (irqflags & action->flags & SA_SHIRQ)
+                       action = NULL;
+       }
+       return !action;
+}
+
+/**
+ *     request_irq - allocate an interrupt line
+ *     @irq: Interrupt line to allocate
+ *     @handler: Function to be called when the IRQ occurs
+ *     @irqflags: Interrupt type flags
+ *     @devname: An ascii name for the claiming device
+ *     @dev_id: A cookie passed back to the handler function
+ *
+ *     This call allocates interrupt resources and enables the
+ *     interrupt line and IRQ handling. From the point this
+ *     call is made your handler function may be invoked. Since
+ *     your handler function must clear any interrupt the board 
+ *     raises, you must take care both to initialise your hardware
+ *     and to set up the interrupt handler in the right order.
+ *
+ *     Dev_id must be globally unique. Normally the address of the
+ *     device data structure is used as the cookie. Since the handler
+ *     receives this value it makes sense to use it.
+ *
+ *     If your interrupt is shared you must pass a non NULL dev_id
+ *     as this is required when freeing the interrupt.
+ *
+ *     Flags:
+ *
+ *     SA_SHIRQ                Interrupt is shared
+ *
+ *     SA_INTERRUPT            Disable local interrupts while processing
+ *
+ *     SA_SAMPLE_RANDOM        The interrupt can be used for entropy
+ *
+ */
+int request_irq(unsigned int irq, 
+               irqreturn_t (*handler)(int, void *, struct pt_regs *),
+               unsigned long irqflags, 
+               const char * devname,
+               void *dev_id)
+{
+       int retval;
+       struct irqaction * action;
+
+#if 1
+       /*
+        * Sanity-check: shared interrupts should REALLY pass in
+        * a real dev-ID, otherwise we'll have trouble later trying
+        * to figure out which interrupt is which (messes up the
+        * interrupt freeing logic etc).
+        */
+       if (irqflags & SA_SHIRQ) {
+               if (!dev_id)
+                       printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
+       }
+#endif
+
+       if (irq >= NR_IRQS)
+               return -EINVAL;
+       if (!handler)
+               return -EINVAL;
+
+       action = (struct irqaction *)
+                       kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+       if (!action)
+               return -ENOMEM;
+
+       action->handler = handler;
+       action->flags = irqflags;
+       cpus_clear(action->mask);
+       action->name = devname;
+       action->next = NULL;
+       action->dev_id = dev_id;
+
+       retval = setup_irq(irq, action);
+       if (retval)
+               kfree(action);
+       return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+/**
+ *     free_irq - free an interrupt
+ *     @irq: Interrupt line to free
+ *     @dev_id: Device identity to free
+ *
+ *     Remove an interrupt handler. The handler is removed and if the
+ *     interrupt line is no longer in use by any driver it is disabled.
+ *     On a shared IRQ the caller must ensure the interrupt is disabled
+ *     on the card it drives before calling this function. The function
+ *     does not return until any executing interrupts for this IRQ
+ *     have completed.
+ *
+ *     This function must not be called from interrupt context. 
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+       irq_desc_t *desc;
+       struct irqaction **p;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       desc = irq_desc + irq;
+       spin_lock_irqsave(&desc->lock,flags);
+       p = &desc->action;
+       for (;;) {
+               struct irqaction * action = *p;
+
+               if (action) {
+                       struct irqaction **pp = p;
+                       p = &action->next;
+                       if (action->dev_id != dev_id)
+                               continue;
+
+                       /* Found it - now remove it from the list of entries */
+                       *pp = action->next;
+                       if (!desc->action) {
+                               desc->status |= IRQ_DISABLED;
+                               desc->handler->shutdown(irq);
+                       }
+                       spin_unlock_irqrestore(&desc->lock,flags);
+
+                       /* Wait to make sure it's not being used on another CPU */
+                       synchronize_irq(irq);
+
+#define SA_STATIC_ACTION 0x01000000 /* Is it our duty to free the action? */
+                       if (!(action->flags & SA_STATIC_ACTION))
+                               kfree(action);
+                       return;
+               }
+               printk("Trying to free free IRQ%d\n",irq);
+               spin_unlock_irqrestore(&desc->lock,flags);
+               return;
+       }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+
+static DECLARE_MUTEX(probe_sem);
+
+/**
+ *     probe_irq_on    - begin an interrupt autodetect
+ *
+ *     Commence probing for an interrupt. The interrupts are scanned
+ *     and a mask of potential interrupt lines is returned.
+ *
+ */
+unsigned long probe_irq_on(void)
+{
+       unsigned int i;
+       irq_desc_t *desc;
+       unsigned long val;
+       unsigned long delay;
+
+       down(&probe_sem);
+       /* 
+        * something may have generated an irq long ago and we want to
+        * flush such a longstanding irq before considering it as spurious. 
+        */
+       for (i = NR_PIRQS-1; i > 0; i--)  {
+               desc = irq_desc + i;
+
+               spin_lock_irq(&desc->lock);
+               if (!irq_desc[i].action) 
+                       irq_desc[i].handler->startup(i);
+               spin_unlock_irq(&desc->lock);
+       }
+
+       /* Wait for longstanding interrupts to trigger. */
+       for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+               /* about 20ms delay */ barrier();
+
+       /*
+        * enable any unassigned irqs
+        * (we must startup again here because if a longstanding irq
+        * happened in the previous stage, it may have masked itself)
+        */
+       for (i = NR_PIRQS-1; i > 0; i--) {
+               desc = irq_desc + i;
+
+               spin_lock_irq(&desc->lock);
+               if (!desc->action) {
+                       desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+                       if (desc->handler->startup(i))
+                               desc->status |= IRQ_PENDING;
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+
+       /*
+        * Wait for spurious interrupts to trigger
+        */
+       for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+               /* about 100ms delay */ barrier();
+
+       /*
+        * Now filter out any obviously spurious interrupts
+        */
+       val = 0;
+       for (i = 0; i < NR_PIRQS; i++) {
+               irq_desc_t *desc = irq_desc + i;
+               unsigned int status;
+
+               spin_lock_irq(&desc->lock);
+               status = desc->status;
+
+               if (status & IRQ_AUTODETECT) {
+                       /* It triggered already - consider it spurious. */
+                       if (!(status & IRQ_WAITING)) {
+                               desc->status = status & ~IRQ_AUTODETECT;
+                               desc->handler->shutdown(i);
+                       } else
+                               if (i < 32)
+                                       val |= 1 << i;
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+
+       return val;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+/*
+ * Return a mask of triggered interrupts (this
+ * can handle only legacy ISA interrupts).
+ */
+/**
+ *     probe_irq_mask - scan a bitmap of interrupt lines
+ *     @val:   mask of interrupts to consider
+ *
+ *     Scan the ISA bus interrupt lines and return a bitmap of
+ *     active interrupts. The interrupt probe logic state is then
+ *     returned to its previous value.
+ *
+ *     Note: we need to scan all the irq's even though we will
+ *     only return ISA irq numbers - just so that we reset them
+ *     all to a known state.
+ */
+unsigned int probe_irq_mask(unsigned long val)
+{
+       int i;
+       unsigned int mask;
+
+       mask = 0;
+       for (i = 0; i < NR_PIRQS; i++) {
+               irq_desc_t *desc = irq_desc + i;
+               unsigned int status;
+
+               spin_lock_irq(&desc->lock);
+               status = desc->status;
+
+               if (status & IRQ_AUTODETECT) {
+                       if (i < 16 && !(status & IRQ_WAITING))
+                               mask |= 1 << i;
+
+                       desc->status = status & ~IRQ_AUTODETECT;
+                       desc->handler->shutdown(i);
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+       up(&probe_sem);
+
+       return mask & val;
+}
+
+/*
+ * Return the one interrupt that triggered (this can
+ * handle any interrupt source).
+ */
+
+/**
+ *     probe_irq_off   - end an interrupt autodetect
+ *     @val: mask of potential interrupts (unused)
+ *
+ *     Scans the unused interrupt lines and returns the line which
+ *     appears to have triggered the interrupt. If no interrupt was
+ *     found then zero is returned. If more than one interrupt is
+ *     found then minus the first candidate is returned to indicate
+ *     their is doubt.
+ *
+ *     The interrupt probe logic state is returned to its previous
+ *     value.
+ *
+ *     BUGS: When used in a module (which arguably shouldnt happen)
+ *     nothing prevents two IRQ probe callers from overlapping. The
+ *     results of this are non-optimal.
+ */
+int probe_irq_off(unsigned long val)
+{
+       int i, irq_found, nr_irqs;
+
+       nr_irqs = 0;
+       irq_found = 0;
+       for (i = 0; i < NR_PIRQS; i++) {
+               irq_desc_t *desc = irq_desc + i;
+               unsigned int status;
+
+               spin_lock_irq(&desc->lock);
+               status = desc->status;
+
+               if (status & IRQ_AUTODETECT) {
+                       if (!(status & IRQ_WAITING)) {
+                               if (!nr_irqs)
+                                       irq_found = i;
+                               nr_irqs++;
+                       }
+                       desc->status = status & ~IRQ_AUTODETECT;
+                       desc->handler->shutdown(i);
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+       up(&probe_sem);
+
+       if (nr_irqs > 1)
+               irq_found = -irq_found;
+       return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+/* this was setup_x86_irq but it seems pretty generic */
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+       int shared = 0;
+       unsigned long flags;
+       struct irqaction *old, **p;
+       irq_desc_t *desc = irq_desc + irq;
+
+       if (desc->handler == &no_irq_type)
+               return -ENOSYS;
+       /*
+        * Some drivers like serial.c use request_irq() heavily,
+        * so we have to be careful not to interfere with a
+        * running system.
+        */
+       if (new->flags & SA_SAMPLE_RANDOM) {
+               /*
+                * This function might sleep, we want to call it first,
+                * outside of the atomic block.
+                * Yes, this might clear the entropy pool if the wrong
+                * driver is attempted to be loaded, without actually
+                * installing a new handler, but is this really a problem,
+                * only the sysadmin is able to do this.
+                */
+               rand_initialize_irq(irq);
+       }
+
+       /*
+        * The following block of code has to be executed atomically
+        */
+       spin_lock_irqsave(&desc->lock,flags);
+       p = &desc->action;
+       if ((old = *p) != NULL) {
+               /* Can't share interrupts unless both agree to */
+               if (!(old->flags & new->flags & SA_SHIRQ)) {
+                       spin_unlock_irqrestore(&desc->lock,flags);
+                       return -EBUSY;
+               }
+
+               /* add new interrupt at end of irq queue */
+               do {
+                       p = &old->next;
+                       old = *p;
+               } while (old);
+               shared = 1;
+       }
+
+       *p = new;
+
+       if (!shared) {
+               desc->depth = 0;
+               desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
+               desc->handler->startup(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock,flags);
+
+       register_irq_proc(irq);
+       return 0;
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+
+#ifdef CONFIG_SMP
+
+static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
+
+cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
+static int irq_affinity_read_proc(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+       int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+       if (count - len < 2)
+               return -EINVAL;
+       len += sprintf(page + len, "\n");
+       return len;
+}
+
+static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
+                                       unsigned long count, void *data)
+{
+       int irq = (long)data, full_count = count, err;
+       cpumask_t new_value, tmp;
+
+       if (!irq_desc[irq].handler->set_affinity)
+               return -EIO;
+
+       err = cpumask_parse(buffer, count, new_value);
+       if (err)
+               return err;
+
+       /*
+        * Do not allow disabling IRQs completely - it's a too easy
+        * way to make the system unusable accidentally :-) At least
+        * one online CPU still has to be targeted.
+        */
+       cpus_and(tmp, new_value, cpu_online_map);
+       if (cpus_empty(tmp))
+               return -EINVAL;
+
+       irq_affinity[irq] = new_value;
+       irq_desc[irq].handler->set_affinity(irq,
+                                       cpumask_of_cpu(first_cpu(new_value)));
+
+       return full_count;
+}
+
+#endif
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+       char name [MAX_NAMELEN];
+
+       if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
+                       irq_dir[irq])
+               return;
+
+       memset(name, 0, MAX_NAMELEN);
+       sprintf(name, "%d", irq);
+
+       /* create /proc/irq/1234 */
+       irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+       {
+               struct proc_dir_entry *entry;
+
+               /* create /proc/irq/1234/smp_affinity */
+               entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+               if (entry) {
+                       entry->nlink = 1;
+                       entry->data = (void *)(long)irq;
+                       entry->read_proc = irq_affinity_read_proc;
+                       entry->write_proc = irq_affinity_write_proc;
+               }
+
+               smp_affinity_entry[irq] = entry;
+       }
+#endif
+}
+
+void init_irq_proc (void)
+{
+       int i;
+
+       /* create /proc/irq */
+       root_irq_dir = proc_mkdir("irq", NULL);
+       create_prof_cpu_mask(root_irq_dir);
+       /*
+        * Create entries for all existing IRQs.
+        */
+       for (i = 0; i < NR_IRQS; i++)
+               register_irq_proc(i);
+}
+
+
+#ifdef CONFIG_4KSTACKS
+/*
+ * These should really be __section__(".bss.page_aligned") as well, but
+ * gcc's 3.0 and earlier don't handle that correctly.
+ */
+static char softirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE)));
+static char hardirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE)));
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+       union irq_ctx *irqctx;
+
+       if (hardirq_ctx[cpu])
+               return;
+
+       irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       hardirq_ctx[cpu] = irqctx;
+
+       irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       softirq_ctx[cpu] = irqctx;
+
+       printk("CPU %u irqstacks, hard=%p soft=%p\n",
+               cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
+}
+
+extern asmlinkage void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+       unsigned long flags;
+       struct thread_info *curctx;
+       union irq_ctx *irqctx;
+       u32 *isp;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       if (local_softirq_pending()) {
+               curctx = current_thread_info();
+               irqctx = softirq_ctx[smp_processor_id()];
+               irqctx->tinfo.task = curctx->task;
+               irqctx->tinfo.previous_esp = current_stack_pointer();
+
+               /* build the stack frame on the softirq stack */
+               isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+
+
+               asm volatile(
+                       "       xchgl   %%ebx,%%esp     \n"
+                       "       call    __do_softirq    \n"
+                       "       movl    %%ebx,%%esp     \n"
+                       : "=b"(isp)
+                       : "0"(isp)
+                       : "memory", "cc", "edx", "ecx", "eax"
+               );
+       }
+
+       local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(do_softirq);
+#endif
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c
new file mode 100644 (file)
index 0000000..1a762ce
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * linux/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+
+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+static void flush_ldt(void *null)
+{
+       if (current->active_mm) {
+               load_LDT(&current->active_mm->context);
+               flush_page_update_queue();
+       }
+}
+#endif
+
+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+{
+       void *oldldt;
+       void *newldt;
+       int oldsize;
+
+       if (mincount <= pc->size)
+               return 0;
+       oldsize = pc->size;
+       mincount = (mincount+511)&(~511);
+       if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+               newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+       else
+               newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+       if (!newldt)
+               return -ENOMEM;
+
+       if (oldsize)
+               memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+       oldldt = pc->ldt;
+       memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+       pc->ldt = newldt;
+       wmb();
+       pc->size = mincount;
+       wmb();
+
+       if (reload) {
+#ifdef CONFIG_SMP
+               cpumask_t mask;
+               preempt_disable();
+#endif
+               make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
+                                   PAGE_SIZE);
+               load_LDT(pc);
+               flush_page_update_queue();
+#ifdef CONFIG_SMP
+               mask = cpumask_of_cpu(smp_processor_id());
+               if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+                       smp_call_function(flush_ldt, NULL, 1, 1);
+               preempt_enable();
+#endif
+       }
+       if (oldsize) {
+               make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
+                       PAGE_SIZE);
+               flush_page_update_queue();
+               if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+                       vfree(oldldt);
+               else
+                       kfree(oldldt);
+       }
+       return 0;
+}
+
+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+{
+       int err = alloc_ldt(new, old->size, 0);
+       if (err < 0)
+               return err;
+       memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+       make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
+                           PAGE_SIZE);
+       flush_page_update_queue();
+       return 0;
+}
+
+/*
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+       struct mm_struct * old_mm;
+       int retval = 0;
+
+       init_MUTEX(&mm->context.sem);
+       mm->context.size = 0;
+       old_mm = current->mm;
+       if (old_mm && old_mm->context.size > 0) {
+               down(&old_mm->context.sem);
+               retval = copy_ldt(&mm->context, &old_mm->context);
+               up(&old_mm->context.sem);
+       }
+       return retval;
+}
+
+/*
+ * No need to lock the MM as we are the last user
+ */
+void destroy_context(struct mm_struct *mm)
+{
+       if (mm->context.size) {
+               if (mm == current->active_mm)
+                       clear_LDT();
+               make_pages_writable(mm->context.ldt, 
+                                    (mm->context.size * LDT_ENTRY_SIZE) /
+                                    PAGE_SIZE);
+               flush_page_update_queue();
+               if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+                       vfree(mm->context.ldt);
+               else
+                       kfree(mm->context.ldt);
+               mm->context.size = 0;
+       }
+}
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+       int err;
+       unsigned long size;
+       struct mm_struct * mm = current->mm;
+
+       if (!mm->context.size)
+               return 0;
+       if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+               bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+
+       down(&mm->context.sem);
+       size = mm->context.size*LDT_ENTRY_SIZE;
+       if (size > bytecount)
+               size = bytecount;
+
+       err = 0;
+       if (copy_to_user(ptr, mm->context.ldt, size))
+               err = -EFAULT;
+       up(&mm->context.sem);
+       if (err < 0)
+               goto error_return;
+       if (size != bytecount) {
+               /* zero-fill the rest */
+               if (clear_user(ptr+size, bytecount-size) != 0) {
+                       err = -EFAULT;
+                       goto error_return;
+               }
+       }
+       return bytecount;
+error_return:
+       return err;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+       int err;
+       unsigned long size;
+       void *address;
+
+       err = 0;
+       address = &default_ldt[0];
+       size = 5*sizeof(struct desc_struct);
+       if (size > bytecount)
+               size = bytecount;
+
+       err = size;
+       if (copy_to_user(ptr, address, size))
+               err = -EFAULT;
+
+       return err;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
+{
+       struct mm_struct * mm = current->mm;
+       __u32 entry_1, entry_2, *lp;
+       unsigned long phys_lp;
+       int error;
+       struct user_desc ldt_info;
+
+       error = -EINVAL;
+       if (bytecount != sizeof(ldt_info))
+               goto out;
+       error = -EFAULT;        
+       if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+               goto out;
+
+       error = -EINVAL;
+       if (ldt_info.entry_number >= LDT_ENTRIES)
+               goto out;
+       if (ldt_info.contents == 3) {
+               if (oldmode)
+                       goto out;
+               if (ldt_info.seg_not_present == 0)
+                       goto out;
+       }
+
+       down(&mm->context.sem);
+       if (ldt_info.entry_number >= mm->context.size) {
+               error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
+               if (error < 0)
+                       goto out_unlock;
+       }
+
+       lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+       phys_lp = arbitrary_virt_to_phys(lp);
+
+       /* Allow LDTs to be cleared by the user. */
+       if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+               if (oldmode || LDT_empty(&ldt_info)) {
+                       entry_1 = 0;
+                       entry_2 = 0;
+                       goto install;
+               }
+       }
+
+       entry_1 = LDT_entry_a(&ldt_info);
+       entry_2 = LDT_entry_b(&ldt_info);
+       if (oldmode)
+               entry_2 &= ~(1 << 20);
+
+       /* Install the new entry ...  */
+install:
+       error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
+
+out_unlock:
+       up(&mm->context.sem);
+out:
+       return error;
+}
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+{
+       int ret = -ENOSYS;
+
+       switch (func) {
+       case 0:
+               ret = read_ldt(ptr, bytecount);
+               break;
+       case 1:
+               ret = write_ldt(ptr, bytecount, 1);
+               break;
+       case 2:
+               ret = read_default_ldt(ptr, bytecount);
+               break;
+       case 0x11:
+               ret = write_ldt(ptr, bytecount, 0);
+               break;
+       }
+       return ret;
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/pci-dma.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/pci-dma.c
new file mode 100644 (file)
index 0000000..49a5800
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * On i386 there is no hardware dynamic DMA address translation,
+ * so consistent alloc/free are merely page allocation/freeing.
+ * The rest of the dynamic DMA mapping interface is implemented
+ * in asm/pci.h.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <asm/io.h>
+
+struct dma_coherent_mem {
+       void            *virt_base;
+       u32             device_base;
+       int             size;
+       int             flags;
+       unsigned long   *bitmap;
+};
+
+void
+dma_contig_memory(unsigned long vstart, unsigned int order)
+{
+       /*
+        * Ensure multi-page extents are contiguous in machine memory.
+        * This code could be cleaned up some, and the number of
+        * hypercalls reduced.
+        */
+       pgd_t         *pgd; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long  pfn, i;
+
+       scrub_pages(vstart, 1 << order);
+       /* 1. Zap current PTEs, giving away the underlying pages. */
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+               pfn = pte->pte_low >> PAGE_SHIFT;
+               queue_l1_entry_update(pte, 0);
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+               flush_page_update_queue();
+               if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
+                                         &pfn, 1, 0) != 1) BUG();
+       }
+       /* 2. Get a new contiguous memory extent. */
+       if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+                                 &pfn, 1, order) != 1) BUG();
+       /* 3. Map the new extent in place of old pages. */
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+               queue_l1_entry_update(
+                       pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+               queue_machphys_update(
+                       pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       pfn+i;
+       }
+       /* Flush updates through and flush the TLB. */
+       xen_tlb_flush();
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pte_offset_kernel pte_offset
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                          dma_addr_t *dma_handle)
+#else
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                          dma_addr_t *dma_handle, int gfp)
+#endif
+{
+       void *ret;
+       unsigned int order = get_order(size);
+       unsigned long vstart;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+       int gfp = GFP_ATOMIC;
+
+       if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
+               gfp |= GFP_DMA;
+#else
+       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+       /* ignore region specifiers */
+       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+       if (mem) {
+               int page = bitmap_find_free_region(mem->bitmap, mem->size,
+                                                    order);
+               if (page >= 0) {
+                       *dma_handle = mem->device_base + (page << PAGE_SHIFT);
+                       ret = mem->virt_base + (page << PAGE_SHIFT);
+                       memset(ret, 0, size);
+                       return ret;
+               }
+               if (mem->flags & DMA_MEMORY_EXCLUSIVE)
+                       return NULL;
+       }
+
+       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+               gfp |= GFP_DMA;
+#endif
+
+       vstart = __get_free_pages(gfp, order);
+       ret = (void *)vstart;
+       if (ret == NULL)
+               return ret;
+
+       dma_contig_memory(vstart, order);
+
+       memset(ret, 0, size);
+       *dma_handle = virt_to_bus(ret);
+
+       return ret;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                        void *vaddr, dma_addr_t dma_handle)
+#else
+void dma_free_coherent(struct device *dev, size_t size,
+                        void *vaddr, dma_addr_t dma_handle)
+#endif
+{
+       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+       int order = get_order(size);
+       
+       if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+               bitmap_release_region(mem->bitmap, page, order);
+       } else
+               free_pages((unsigned long)vaddr, order);
+}
+
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+                               dma_addr_t device_addr, size_t size, int flags)
+{
+       void __iomem *mem_base;
+       int pages = size >> PAGE_SHIFT;
+       int bitmap_size = (pages + 31)/32;
+
+       if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+               goto out;
+       if (!size)
+               goto out;
+       if (dev->dma_mem)
+               goto out;
+
+       /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+       mem_base = ioremap(bus_addr, size);
+       if (!mem_base)
+               goto out;
+
+       dev->dma_mem = kmalloc(GFP_KERNEL, sizeof(struct dma_coherent_mem));
+       if (!dev->dma_mem)
+               goto out;
+       memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
+       dev->dma_mem->bitmap = kmalloc(GFP_KERNEL, bitmap_size);
+       if (!dev->dma_mem->bitmap)
+               goto free1_out;
+       memset(dev->dma_mem->bitmap, 0, bitmap_size);
+
+       dev->dma_mem->virt_base = mem_base;
+       dev->dma_mem->device_base = device_addr;
+       dev->dma_mem->size = pages;
+       dev->dma_mem->flags = flags;
+
+       if (flags & DMA_MEMORY_MAP)
+               return DMA_MEMORY_MAP;
+
+       return DMA_MEMORY_IO;
+
+ free1_out:
+       kfree(dev->dma_mem->bitmap);
+ out:
+       return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+       struct dma_coherent_mem *mem = dev->dma_mem;
+       
+       if(!mem)
+               return;
+       dev->dma_mem = NULL;
+       kfree(mem->bitmap);
+       kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+                                       dma_addr_t device_addr, size_t size)
+{
+       struct dma_coherent_mem *mem = dev->dma_mem;
+       int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       int pos, err;
+
+       if (!mem)
+               return ERR_PTR(-EINVAL);
+
+       pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+       err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
+       if (err != 0)
+               return ERR_PTR(err);
+       return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/process.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/process.c
new file mode 100644 (file)
index 0000000..930e915
--- /dev/null
@@ -0,0 +1,854 @@
+/*
+ *  linux/arch/i386/kernel/process.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elfcore.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/ptrace.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/irq.h>
+#include <asm/desc.h>
+#include <asm-xen/multicall.h>
+#include <asm/hypervisor-ifs/dom0_ops.h>
+#ifdef CONFIG_MATH_EMULATION
+#include <asm/math_emu.h>
+#endif
+
+#include <linux/irq.h>
+#include <linux/err.h>
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int hlt_counter;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+       return ((unsigned long *)tsk->thread.esp)[3];
+}
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+
+void disable_hlt(void)
+{
+       hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+       hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+/*
+ * We use this if we don't have any better
+ * idle routine..
+ */
+void default_idle(void)
+{
+       if (!hlt_counter && current_cpu_data.hlt_works_ok) {
+               local_irq_disable();
+               if (!need_resched())
+                       safe_halt();
+               else
+                       local_irq_enable();
+       }
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle (void)
+{
+       int oldval;
+
+       local_irq_enable();
+
+       /*
+        * Deal with another CPU just having chosen a thread to
+        * run here:
+        */
+       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+       if (!oldval) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               asm volatile(
+                       "2:"
+                       "testl %0, %1;"
+                       "rep; nop;"
+                       "je 2b;"
+                       : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               set_need_resched();
+       }
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+       /* endless idle loop with no priority at all */
+       while (1) {
+               while (!need_resched()) {
+                       void (*idle)(void);
+                       /*
+                        * Mark this as an RCU critical section so that
+                        * synchronize_kernel() in the unload path waits
+                        * for our completion.
+                        */
+                       rcu_read_lock();
+                       idle = pm_idle;
+
+                       if (!idle)
+                               idle = default_idle;
+
+                       irq_stat[smp_processor_id()].idle_timestamp = jiffies;
+                       idle();
+                       rcu_read_unlock();
+               }
+               schedule();
+       }
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ */
+static void mwait_idle(void)
+{
+       local_irq_enable();
+
+       if (!need_resched()) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               do {
+                       __monitor((void *)&current_thread_info()->flags, 0, 0);
+                       if (need_resched())
+                               break;
+                       __mwait(0, 0);
+               } while (!need_resched());
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       }
+}
+
+void __init select_idle_routine(const struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_MWAIT)) {
+               printk("monitor/mwait feature present.\n");
+               /*
+                * Skip, if setup has overridden idle.
+                * One CPU supports mwait => All CPUs supports mwait
+                */
+               if (!pm_idle) {
+                       printk("using mwait in idle threads.\n");
+                       pm_idle = mwait_idle;
+               }
+       }
+}
+
+static int __init idle_setup (char *str)
+{
+       if (!strncmp(str, "poll", 4)) {
+               printk("using polling idle threads.\n");
+               pm_idle = poll_idle;
+#ifdef CONFIG_X86_SMP
+               if (smp_num_siblings > 1)
+                       printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
+#endif
+       } else if (!strncmp(str, "halt", 4)) {
+               printk("using halt in idle threads.\n");
+               pm_idle = default_idle;
+       }
+
+       return 1;
+}
+
+__setup("idle=", idle_setup);
+
+void show_regs(struct pt_regs * regs)
+{
+       printk("\n");
+       printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
+       printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
+       print_symbol("EIP is at %s\n", regs->eip);
+
+       if (regs->xcs & 2)
+               printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
+       printk(" EFLAGS: %08lx    %s  (%s)\n",regs->eflags, print_tainted(),UTS_RELEASE);
+       printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+               regs->eax,regs->ebx,regs->ecx,regs->edx);
+       printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+               regs->esi, regs->edi, regs->ebp);
+       printk(" DS: %04x ES: %04x\n",
+               0xffff & regs->xds,0xffff & regs->xes);
+
+       show_trace(NULL, &regs->esp);
+}
+
+/*
+ * This gets run with %ebx containing the
+ * function to call, and %edx containing
+ * the "args".
+ */
+extern void kernel_thread_helper(void);
+__asm__(".section .text\n"
+       ".align 4\n"
+       "kernel_thread_helper:\n\t"
+       "movl %edx,%eax\n\t"
+       "pushl %edx\n\t"
+       "call *%ebx\n\t"
+       "pushl %eax\n\t"
+       "call do_exit\n"
+       ".previous");
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+       struct pt_regs regs;
+
+       memset(&regs, 0, sizeof(regs));
+
+       regs.ebx = (unsigned long) fn;
+       regs.edx = (unsigned long) arg;
+
+       regs.xds = __USER_DS;
+       regs.xes = __USER_DS;
+       regs.orig_eax = -1;
+       regs.eip = (unsigned long) kernel_thread_helper;
+       regs.xcs = __KERNEL_CS;
+       regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
+
+       /* Ok, create the new process.. */
+       return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+       struct task_struct *tsk = current;
+       struct thread_struct *t = &tsk->thread;
+
+       /* The process may have allocated an io port bitmap... nuke it. */
+       if (unlikely(NULL != t->io_bitmap_ptr)) {
+               int cpu = get_cpu();
+               struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+               kfree(t->io_bitmap_ptr);
+               t->io_bitmap_ptr = NULL;
+               /*
+                * Careful, clear this in the TSS too:
+                */
+               memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
+               t->io_bitmap_max = 0;
+               tss->io_bitmap_owner = NULL;
+               tss->io_bitmap_max = 0;
+               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+               put_cpu();
+       }
+}
+
+void flush_thread(void)
+{
+       struct task_struct *tsk = current;
+
+       memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
+       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));        
+       /*
+        * Forget coprocessor state..
+        */
+       clear_fpu(tsk);
+       tsk->used_math = 0;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+       if (dead_task->mm) {
+               // temporary debugging check
+               if (dead_task->mm->context.size) {
+                       printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
+                                       dead_task->comm,
+                                       dead_task->mm->context.ldt,
+                                       dead_task->mm->context.size);
+                       BUG();
+               }
+       }
+
+       release_x86_irqs(dead_task);
+}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+       unlazy_fpu(tsk);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+       unsigned long unused,
+       struct task_struct * p, struct pt_regs * regs)
+{
+       struct pt_regs * childregs;
+       struct task_struct *tsk;
+       int err;
+       unsigned long eflags;
+
+       childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+       *childregs = *regs;
+       childregs->eax = 0;
+       childregs->esp = esp;
+       p->set_child_tid = p->clear_child_tid = NULL;
+
+       p->thread.esp = (unsigned long) childregs;
+       p->thread.esp0 = (unsigned long) (childregs+1);
+
+       p->thread.eip = (unsigned long) ret_from_fork;
+
+       savesegment(fs,p->thread.fs);
+       savesegment(gs,p->thread.gs);
+
+       tsk = current;
+       if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
+               p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+               if (!p->thread.io_bitmap_ptr) {
+                       p->thread.io_bitmap_max = 0;
+                       return -ENOMEM;
+               }
+               memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
+                       IO_BITMAP_BYTES);
+       }
+
+       /*
+        * Set a new TLS for the child thread?
+        */
+       if (clone_flags & CLONE_SETTLS) {
+               struct desc_struct *desc;
+               struct user_desc info;
+               int idx;
+
+               err = -EFAULT;
+               if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
+                       goto out;
+               err = -EINVAL;
+               if (LDT_empty(&info))
+                       goto out;
+
+               idx = info.entry_number;
+               if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+                       goto out;
+
+               desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+               desc->a = LDT_entry_a(&info);
+               desc->b = LDT_entry_b(&info);
+       }
+
+
+       __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
+       p->thread.io_pl = (eflags >> 12) & 3;
+
+       err = 0;
+ out:
+       if (err && p->thread.io_bitmap_ptr) {
+               kfree(p->thread.io_bitmap_ptr);
+               p->thread.io_bitmap_max = 0;
+       }
+       return err;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+       int i;
+
+/* changed the size calculations - should hopefully work better. lbt */
+       dump->magic = CMAGIC;
+       dump->start_code = 0;
+       dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
+       dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+       dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+       dump->u_dsize -= dump->u_tsize;
+       dump->u_ssize = 0;
+       for (i = 0; i < 8; i++)
+               dump->u_debugreg[i] = current->thread.debugreg[i];  
+
+       if (dump->start_stack < TASK_SIZE)
+               dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+       dump->regs.ebx = regs->ebx;
+       dump->regs.ecx = regs->ecx;
+       dump->regs.edx = regs->edx;
+       dump->regs.esi = regs->esi;
+       dump->regs.edi = regs->edi;
+       dump->regs.ebp = regs->ebp;
+       dump->regs.eax = regs->eax;
+       dump->regs.ds = regs->xds;
+       dump->regs.es = regs->xes;
+       savesegment(fs,dump->regs.fs);
+       savesegment(gs,dump->regs.gs);
+       dump->regs.orig_eax = regs->orig_eax;
+       dump->regs.eip = regs->eip;
+       dump->regs.cs = regs->xcs;
+       dump->regs.eflags = regs->eflags;
+       dump->regs.esp = regs->esp;
+       dump->regs.ss = regs->xss;
+
+       dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+}
+
+/* 
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+       struct pt_regs ptregs;
+       
+       ptregs = *(struct pt_regs *)
+               ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
+       ptregs.xcs &= 0xffff;
+       ptregs.xds &= 0xffff;
+       ptregs.xes &= 0xffff;
+       ptregs.xss &= 0xffff;
+
+       elf_core_copy_regs(regs, &ptregs);
+
+       return 1;
+}
+
+static inline void
+handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
+{
+       if (!next->io_bitmap_ptr) {
+               /*
+                * Disable the bitmap via an invalid offset. We still cache
+                * the previous bitmap owner and the IO bitmap contents:
+                */
+               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+               return;
+       }
+       if (likely(next == tss->io_bitmap_owner)) {
+               /*
+                * Previous owner of the bitmap (hence the bitmap content)
+                * matches the next task, we dont have to do anything but
+                * to set a valid offset in the TSS:
+                */
+               tss->io_bitmap_base = IO_BITMAP_OFFSET;
+               return;
+       }
+       /*
+        * Lazy TSS's I/O bitmap copy. We set an invalid offset here
+        * and we let the task to get a GPF in case an I/O instruction
+        * is performed.  The handler of the GPF will verify that the
+        * faulting task has a valid I/O bitmap and, it true, does the
+        * real copy and restart the instruction.  This will save us
+        * redundant copies when the currently switched task does not
+        * perform any I/O during its timeslice.
+        */
+       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+}
+/*
+ * This special macro can be used to load a debugging register
+ */
+#define loaddebug(thread,register) \
+               HYPERVISOR_set_debugreg((register),     \
+                       (thread->debugreg[register]))
+
+/*
+ *     switch_to(x,yn) should switch tasks from x to y.
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process. Lazy FP saving no longer makes any sense
+ * with modern CPU's, and this simplifies a lot of things (SMP
+ * and UP become the same).
+ *
+ * NOTE! We used to use the x86 hardware context switching. The
+ * reason for not using it any more becomes apparent when you
+ * try to recover gracefully from saved state that is no longer
+ * valid (stale segment register values in particular). With the
+ * hardware task-switch, there is no way to fix up bad state in
+ * a reasonable manner.
+ *
+ * The fact that Intel documents the hardware task-switching to
+ * be slow is a fairly red herring - this code is not noticeably
+ * faster. However, there _is_ some room for improvement here,
+ * so the performance issues may eventually be a valid point.
+ * More important, however, is the fact that this allows us much
+ * more flexibility.
+ *
+ * The return value (in %eax) will be the "prev" task after
+ * the task-switch, and shows up in ret_from_fork in entry.S,
+ * for example.
+ */
+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+{
+       struct thread_struct *prev = &prev_p->thread,
+                                *next = &next_p->thread;
+       int cpu = smp_processor_id();
+       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       dom0_op_t op;
+
+        /* NB. No need to disable interrupts as already done in sched.c */
+        /* __cli(); */
+
+       /*
+        * Save away %fs and %gs. No need to save %es and %ds, as
+        * those are always kernel segments while inside the kernel.
+        */
+       asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
+       asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+
+       /*
+        * We clobber FS and GS here so that we avoid a GPF when
+        * restoring previous task's FS/GS values in Xen when the LDT
+        * is switched. If we don't do this then we can end up
+        * erroneously re-flushing the page-update queue when we
+        * 'execute_multicall_list'.
+        */
+       __asm__ __volatile__ ( 
+               "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
+               "eax" );
+
+       MULTICALL_flush_page_update_queue();
+
+       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+       /*
+        * This is basically '__unlazy_fpu', except that we queue a
+        * multicall to indicate FPU task switch, rather than
+        * synchronously trapping to Xen.
+        */
+       if (prev_p->thread_info->status & TS_USEDFPU) {
+               save_init_fpu(prev_p);
+               queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+       }
+
+       /*
+        * Reload esp0, LDT and the page table pointer:
+        * This is load_esp0(tss, next) with a multicall.
+        */
+       tss->esp0 = next->esp0;
+       /* This can only happen when SEP is enabled, no need to test
+        * "SEP"arately */
+       if (unlikely(tss->ss1 != next->sysenter_cs)) {
+               tss->ss1 = next->sysenter_cs;
+               wrmsr(MSR_IA32_SYSENTER_CS, next->sysenter_cs, 0);
+       }
+       queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+
+       /*
+        * Load the per-thread Thread-Local Storage descriptor.
+        * This is load_TLS(next, cpu) with multicalls.
+        */
+#define C(i) do {                                                          \
+       if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||        \
+                    next->tls_array[i].b != prev->tls_array[i].b))         \
+               queue_multicall3(__HYPERVISOR_update_descriptor,            \
+                                virt_to_machine(&get_cpu_gdt_table(cpu)    \
+                                                [GDT_ENTRY_TLS_MIN + i]),  \
+                                ((u32 *)&next->tls_array[i])[0],           \
+                                ((u32 *)&next->tls_array[i])[1]);          \
+} while (0)
+       C(0); C(1); C(2);
+#undef C
+
+       if (xen_start_info.flags & SIF_PRIVILEGED) {
+               op.cmd           = DOM0_IOPL;
+               op.u.iopl.domain = DOMID_SELF;
+               op.u.iopl.iopl   = next->io_pl;
+               queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+       }
+
+       /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
+       execute_multicall_list();
+        /* __sti(); */
+
+       /*
+        * Restore %fs and %gs if needed.
+        */
+       if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) {
+               loadsegment(fs, next->fs);
+               loadsegment(gs, next->gs);
+       }
+
+       /*
+        * Now maybe reload the debug registers
+        */
+       if (unlikely(next->debugreg[7])) {
+               loaddebug(next, 0);
+               loaddebug(next, 1);
+               loaddebug(next, 2);
+               loaddebug(next, 3);
+               /* no 4 and 5 */
+               loaddebug(next, 6);
+               loaddebug(next, 7);
+       }
+
+       if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
+               handle_io_bitmap(next, tss);
+
+       return prev_p;
+}
+
+asmlinkage int sys_fork(struct pt_regs regs)
+{
+       return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
+}
+
+asmlinkage int sys_clone(struct pt_regs regs)
+{
+       unsigned long clone_flags;
+       unsigned long newsp;
+       int __user *parent_tidptr, *child_tidptr;
+
+       clone_flags = regs.ebx;
+       newsp = regs.ecx;
+       parent_tidptr = (int __user *)regs.edx;
+       child_tidptr = (int __user *)regs.edi;
+       if (!newsp)
+               newsp = regs.esp;
+       return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(struct pt_regs regs)
+{
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(struct pt_regs regs)
+{
+       int error;
+       char * filename;
+
+       filename = getname((char __user *) regs.ebx);
+       error = PTR_ERR(filename);
+       if (IS_ERR(filename))
+               goto out;
+       error = do_execve(filename,
+                       (char __user * __user *) regs.ecx,
+                       (char __user * __user *) regs.edx,
+                       &regs);
+       if (error == 0) {
+               current->ptrace &= ~PT_DTRACE;
+               /* Make sure we don't return using sysenter.. */
+               set_thread_flag(TIF_IRET);
+       }
+       putname(filename);
+out:
+       return error;
+}
+
+#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
+#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
+
+unsigned long get_wchan(struct task_struct *p)
+{
+       unsigned long ebp, esp, eip;
+       unsigned long stack_page;
+       int count = 0;
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+       stack_page = (unsigned long)p->thread_info;
+       esp = p->thread.esp;
+       if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
+               return 0;
+       /* include/asm-i386/system.h:switch_to() pushes ebp last. */
+       ebp = *(unsigned long *) esp;
+       do {
+               if (ebp < stack_page || ebp > top_ebp+stack_page)
+                       return 0;
+               eip = *(unsigned long *) (ebp+4);
+               if (!in_sched_functions(eip))
+                       return eip;
+               ebp = *(unsigned long *) ebp;
+       } while (count++ < 16);
+       return 0;
+}
+
+/*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+       struct thread_struct *t = &current->thread;
+       int idx;
+
+       for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+               if (desc_empty(t->tls_array + idx))
+                       return idx + GDT_ENTRY_TLS_MIN;
+       return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
+{
+       struct thread_struct *t = &current->thread;
+       struct user_desc info;
+       struct desc_struct *desc;
+       int cpu, idx;
+
+       if (copy_from_user(&info, u_info, sizeof(info)))
+               return -EFAULT;
+       idx = info.entry_number;
+
+       /*
+        * index -1 means the kernel should try to find and
+        * allocate an empty descriptor:
+        */
+       if (idx == -1) {
+               idx = get_free_idx();
+               if (idx < 0)
+                       return idx;
+               if (put_user(idx, &u_info->entry_number))
+                       return -EFAULT;
+       }
+
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+
+       desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+       /*
+        * We must not get preempted while modifying the TLS.
+        */
+       cpu = get_cpu();
+
+       if (LDT_empty(&info)) {
+               desc->a = 0;
+               desc->b = 0;
+       } else {
+               desc->a = LDT_entry_a(&info);
+               desc->b = LDT_entry_b(&info);
+       }
+       load_TLS(t, cpu);
+
+       put_cpu();
+
+       return 0;
+}
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+       (((desc)->a >> 16) & 0x0000ffff) | \
+       (((desc)->b << 16) & 0x00ff0000) | \
+       ( (desc)->b        & 0xff000000)   )
+
+#define GET_LIMIT(desc) ( \
+       ((desc)->a & 0x0ffff) | \
+        ((desc)->b & 0xf0000) )
+       
+#define GET_32BIT(desc)                (((desc)->b >> 22) & 1)
+#define GET_CONTENTS(desc)     (((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc)     (((desc)->b >>  9) & 1)
+#define GET_LIMIT_PAGES(desc)  (((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc)      (((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc)      (((desc)->b >> 20) & 1)
+
+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
+{
+       struct user_desc info;
+       struct desc_struct *desc;
+       int idx;
+
+       if (get_user(idx, &u_info->entry_number))
+               return -EFAULT;
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+
+       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+       info.entry_number = idx;
+       info.base_addr = GET_BASE(desc);
+       info.limit = GET_LIMIT(desc);
+       info.seg_32bit = GET_32BIT(desc);
+       info.contents = GET_CONTENTS(desc);
+       info.read_exec_only = !GET_WRITABLE(desc);
+       info.limit_in_pages = GET_LIMIT_PAGES(desc);
+       info.seg_not_present = !GET_PRESENT(desc);
+       info.useable = GET_USEABLE(desc);
+
+       if (copy_to_user(u_info, &info, sizeof(info)))
+               return -EFAULT;
+       return 0;
+}
+
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c
new file mode 100644 (file)
index 0000000..68a8f46
--- /dev/null
@@ -0,0 +1,1508 @@
+/*
+ *  linux/arch/i386/kernel/setup.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *
+ *  Memory region support
+ *     David Parsons <orc@pell.chi.il.us>, July-August 1999
+ *
+ *  Added E820 sanitization routine (removes overlapping memory regions);
+ *  Brian Moyle <bmoyle@mvista.com>, February 2001
+ *
+ * Moved CPU detection code to cpu/${cpu}.c
+ *    Patrick Mochel <mochel@osdl.org>, March 2002
+ *
+ *  Provisions for empty E820 memory regions (reported by certain BIOSes).
+ *  Alex Achenbach <xela@slit.de>, December 2002.
+ *
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/apm_bios.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/console.h>
+#include <linux/root_dev.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/edd.h>
+#include <video/edid.h>
+#include <asm/e820.h>
+#include <asm/mpspec.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/sections.h>
+#include <asm/io_apic.h>
+#include <asm/ist.h>
+#include <asm/io.h>
+#include <asm-xen/hypervisor.h>
+#include "setup_arch_pre.h"
+#include <bios_ebda.h>
+
+int disable_pse __initdata = 0;
+
+/*
+ * Machine setup..
+ */
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+/* cpu data as detected by the assembly code in head.S */
+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+/* common cpu data for all cpus */
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+
+unsigned long mmu_cr4_features;
+EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+#ifdef CONFIG_ACPI_INTERPRETER
+       int acpi_disabled = 0;
+#else
+       int acpi_disabled = 1;
+#endif
+EXPORT_SYMBOL(acpi_disabled);
+
+#ifdef CONFIG_ACPI_BOOT
+int __initdata acpi_force = 0;
+extern acpi_interrupt_flags    acpi_sci_flags;
+#endif
+
+int MCA_bus;
+/* for MCA, but anyone else can use it if they want */
+unsigned int machine_id;
+unsigned int machine_submodel_id;
+unsigned int BIOS_revision;
+unsigned int mca_pentium_flag;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/* user-defined highmem size */
+static unsigned int highmem_pages = -1;
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct apm_info apm_info;
+struct sys_desc_table_struct {
+       unsigned short length;
+       unsigned char table[0];
+};
+struct edid_info edid_info;
+struct ist_info ist_info;
+struct e820map e820;
+
+unsigned char aux_device_present;
+
+extern void early_cpu_init(void);
+extern void dmi_scan_machine(void);
+extern void generic_apic_probe(char *);
+extern int root_mountflags;
+
+unsigned long saved_videomode;
+
+#define RAMDISK_IMAGE_START_MASK       0x07FF
+#define RAMDISK_PROMPT_FLAG            0x8000
+#define RAMDISK_LOAD_FLAG              0x4000  
+
+static char command_line[COMMAND_LINE_SIZE];
+
+unsigned char __initdata boot_params[PARAM_SIZE];
+
+static struct resource data_resource = {
+       .name   = "Kernel data",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+       .name   = "Kernel code",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static struct resource system_rom_resource = {
+       .name   = "System ROM",
+       .start  = 0xf0000,
+       .end    = 0xfffff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource extension_rom_resource = {
+       .name   = "Extension ROM",
+       .start  = 0xe0000,
+       .end    = 0xeffff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource adapter_rom_resources[] = { {
+       .name   = "Adapter ROM",
+       .start  = 0xc8000,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+} };
+
+#define ADAPTER_ROM_RESOURCES \
+       (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+static struct resource video_rom_resource = {
+       .name   = "Video ROM",
+       .start  = 0xc0000,
+       .end    = 0xc7fff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+#endif
+
+static struct resource video_ram_resource = {
+       .name   = "Video RAM area",
+       .start  = 0xa0000,
+       .end    = 0xbffff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource standard_io_resources[] = { {
+       .name   = "dma1",
+       .start  = 0x0000,
+       .end    = 0x001f,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "pic1",
+       .start  = 0x0020,
+       .end    = 0x0021,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "timer0",
+       .start  = 0x0040,
+       .end    = 0x0043,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "timer1",
+       .start  = 0x0050,
+       .end    = 0x0053,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "keyboard",
+       .start  = 0x0060,
+       .end    = 0x006f,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "dma page reg",
+       .start  = 0x0080,
+       .end    = 0x008f,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "pic2",
+       .start  = 0x00a0,
+       .end    = 0x00a1,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "dma2",
+       .start  = 0x00c0,
+       .end    = 0x00df,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "fpu",
+       .start  = 0x00f0,
+       .end    = 0x00ff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+} };
+
+#define STANDARD_IO_RESOURCES \
+       (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+       unsigned char *p, sum = 0;
+
+       for (p = rom; p < rom + length; p++)
+               sum += *p;
+       return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+       unsigned long start, length, upper;
+       unsigned char *rom;
+       int           i;
+
+       /* video rom */
+       upper = adapter_rom_resources[0].start;
+       for (start = video_rom_resource.start; start < upper; start += 2048) {
+               rom = isa_bus_to_virt(start);
+               if (!romsignature(rom))
+                       continue;
+
+               video_rom_resource.start = start;
+
+               /* 0 < length <= 0x7f * 512, historically */
+               length = rom[2] * 512;
+
+               /* if checksum okay, trust length byte */
+               if (length && romchecksum(rom, length))
+                       video_rom_resource.end = start + length - 1;
+
+               request_resource(&iomem_resource, &video_rom_resource);
+               break;
+       }
+
+       start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+       if (start < upper)
+               start = upper;
+
+       /* system rom */
+       request_resource(&iomem_resource, &system_rom_resource);
+       upper = system_rom_resource.start;
+
+       /* check for extension rom (ignore length byte!) */
+       rom = isa_bus_to_virt(extension_rom_resource.start);
+       if (romsignature(rom)) {
+               length = extension_rom_resource.end - extension_rom_resource.start + 1;
+               if (romchecksum(rom, length)) {
+                       request_resource(&iomem_resource, &extension_rom_resource);
+                       upper = extension_rom_resource.start;
+               }
+       }
+
+       /* check for adapter roms on 2k boundaries */
+       for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+               rom = isa_bus_to_virt(start);
+               if (!romsignature(rom))
+                       continue;
+
+               /* 0 < length <= 0x7f * 512, historically */
+               length = rom[2] * 512;
+
+               /* but accept any length that fits if checksum okay */
+               if (!length || start + length > upper || !romchecksum(rom, length))
+                       continue;
+
+               adapter_rom_resources[i].start = start;
+               adapter_rom_resources[i].end = start + length - 1;
+               request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+               start = adapter_rom_resources[i++].end & ~2047UL;
+       }
+}
+#endif
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
+unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+multicall_entry_t multicall_list[8];
+int nr_multicall_ents = 0;
+
+/* Raw start-of-day parameters from the hypervisor. */
+union xen_start_info_union xen_start_info_union;
+
+extern void (*pm_idle)(void);
+
+static void __init limit_regions(unsigned long long size)
+{
+       unsigned long long current_addr = 0;
+       int i;
+
+       if (efi_enabled) {
+               for (i = 0; i < memmap.nr_map; i++) {
+                       current_addr = memmap.map[i].phys_addr +
+                                      (memmap.map[i].num_pages << 12);
+                       if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
+                               if (current_addr >= size) {
+                                       memmap.map[i].num_pages -=
+                                               (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
+                                       memmap.nr_map = i + 1;
+                                       return;
+                               }
+                       }
+               }
+       }
+       for (i = 0; i < e820.nr_map; i++) {
+               if (e820.map[i].type == E820_RAM) {
+                       current_addr = e820.map[i].addr + e820.map[i].size;
+                       if (current_addr >= size) {
+                               e820.map[i].size -= current_addr-size;
+                               e820.nr_map = i + 1;
+                               return;
+                       }
+               }
+       }
+}
+
+static void __init add_memory_region(unsigned long long start,
+                                  unsigned long long size, int type)
+{
+       int x;
+
+       if (!efi_enabled) {
+                       x = e820.nr_map;
+
+               if (x == E820MAX) {
+                   printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+                   return;
+               }
+
+               e820.map[x].addr = start;
+               e820.map[x].size = size;
+               e820.map[x].type = type;
+               e820.nr_map++;
+       }
+} /* add_memory_region */
+
+#define E820_DEBUG     1
+
+static void __init print_memory_map(char *who)
+{
+       int i;
+
+       for (i = 0; i < e820.nr_map; i++) {
+               printk(" %s: %016Lx - %016Lx ", who,
+                       e820.map[i].addr,
+                       e820.map[i].addr + e820.map[i].size);
+               switch (e820.map[i].type) {
+               case E820_RAM:  printk("(usable)\n");
+                               break;
+               case E820_RESERVED:
+                               printk("(reserved)\n");
+                               break;
+               case E820_ACPI:
+                               printk("(ACPI data)\n");
+                               break;
+               case E820_NVS:
+                               printk("(ACPI NVS)\n");
+                               break;
+               default:        printk("type %lu\n", e820.map[i].type);
+                               break;
+               }
+       }
+}
+
+#if 0
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries.  The following 
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+struct change_member {
+       struct e820entry *pbios; /* pointer to original bios entry */
+       unsigned long long addr; /* address for this change point */
+};
+struct change_member change_point_list[2*E820MAX] __initdata;
+struct change_member *change_point[2*E820MAX] __initdata;
+struct e820entry *overlap_list[E820MAX] __initdata;
+struct e820entry new_bios[E820MAX] __initdata;
+
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+       struct change_member *change_tmp;
+       unsigned long current_type, last_type;
+       unsigned long long last_addr;
+       int chgidx, still_changing;
+       int overlap_entries;
+       int new_bios_entry;
+       int old_nr, new_nr, chg_nr;
+       int i;
+
+       /*
+               Visually we're performing the following (1,2,3,4 = memory types)...
+
+               Sample memory map (w/overlaps):
+                  ____22__________________
+                  ______________________4_
+                  ____1111________________
+                  _44_____________________
+                  11111111________________
+                  ____________________33__
+                  ___________44___________
+                  __________33333_________
+                  ______________22________
+                  ___________________2222_
+                  _________111111111______
+                  _____________________11_
+                  _________________4______
+
+               Sanitized equivalent (no overlap):
+                  1_______________________
+                  _44_____________________
+                  ___1____________________
+                  ____22__________________
+                  ______11________________
+                  _________1______________
+                  __________3_____________
+                  ___________44___________
+                  _____________33_________
+                  _______________2________
+                  ________________1_______
+                  _________________4______
+                  ___________________2____
+                  ____________________33__
+                  ______________________4_
+       */
+
+       /* if there's only one memory region, don't bother */
+       if (*pnr_map < 2)
+               return -1;
+
+       old_nr = *pnr_map;
+
+       /* bail out if we find any unreasonable addresses in bios map */
+       for (i=0; i<old_nr; i++)
+               if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+                       return -1;
+
+       /* create pointers for initial change-point information (for sorting) */
+       for (i=0; i < 2*old_nr; i++)
+               change_point[i] = &change_point_list[i];
+
+       /* record all known change-points (starting and ending addresses),
+          omitting those that are for empty memory regions */
+       chgidx = 0;
+       for (i=0; i < old_nr; i++)      {
+               if (biosmap[i].size != 0) {
+                       change_point[chgidx]->addr = biosmap[i].addr;
+                       change_point[chgidx++]->pbios = &biosmap[i];
+                       change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+                       change_point[chgidx++]->pbios = &biosmap[i];
+               }
+       }
+       chg_nr = chgidx;        /* true number of change-points */
+
+       /* sort change-point list by memory addresses (low -> high) */
+       still_changing = 1;
+       while (still_changing)  {
+               still_changing = 0;
+               for (i=1; i < chg_nr; i++)  {
+                       /* if <current_addr> > <last_addr>, swap */
+                       /* or, if current=<start_addr> & last=<end_addr>, swap */
+                       if ((change_point[i]->addr < change_point[i-1]->addr) ||
+                               ((change_point[i]->addr == change_point[i-1]->addr) &&
+                                (change_point[i]->addr == change_point[i]->pbios->addr) &&
+                                (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+                          )
+                       {
+                               change_tmp = change_point[i];
+                               change_point[i] = change_point[i-1];
+                               change_point[i-1] = change_tmp;
+                               still_changing=1;
+                       }
+               }
+       }
+
+       /* create a new bios memory map, removing overlaps */
+       overlap_entries=0;       /* number of entries in the overlap table */
+       new_bios_entry=0;        /* index for creating new bios map entries */
+       last_type = 0;           /* start with undefined memory type */
+       last_addr = 0;           /* start with 0 as last starting address */
+       /* loop through change-points, determining affect on the new bios map */
+       for (chgidx=0; chgidx < chg_nr; chgidx++)
+       {
+               /* keep track of all overlapping bios entries */
+               if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+               {
+                       /* add map entry to overlap list (> 1 entry implies an overlap) */
+                       overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+               }
+               else
+               {
+                       /* remove entry from list (order independent, so swap with last) */
+                       for (i=0; i<overlap_entries; i++)
+                       {
+                               if (overlap_list[i] == change_point[chgidx]->pbios)
+                                       overlap_list[i] = overlap_list[overlap_entries-1];
+                       }
+                       overlap_entries--;
+               }
+               /* if there are overlapping entries, decide which "type" to use */
+               /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+               current_type = 0;
+               for (i=0; i<overlap_entries; i++)
+                       if (overlap_list[i]->type > current_type)
+                               current_type = overlap_list[i]->type;
+               /* continue building up new bios map based on this information */
+               if (current_type != last_type)  {
+                       if (last_type != 0)      {
+                               new_bios[new_bios_entry].size =
+                                       change_point[chgidx]->addr - last_addr;
+                               /* move forward only if the new size was non-zero */
+                               if (new_bios[new_bios_entry].size != 0)
+                                       if (++new_bios_entry >= E820MAX)
+                                               break;  /* no more space left for new bios entries */
+                       }
+                       if (current_type != 0)  {
+                               new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+                               new_bios[new_bios_entry].type = current_type;
+                               last_addr=change_point[chgidx]->addr;
+                       }
+                       last_type = current_type;
+               }
+       }
+       new_nr = new_bios_entry;   /* retain count for new bios entries */
+
+       /* copy new bios mapping into original location */
+       memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+       *pnr_map = new_nr;
+
+       return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory.  If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+       /* Only one memory region (or negative)? Ignore it */
+       if (nr_map < 2)
+               return -1;
+
+       do {
+               unsigned long long start = biosmap->addr;
+               unsigned long long size = biosmap->size;
+               unsigned long long end = start + size;
+               unsigned long type = biosmap->type;
+
+               /* Overflow in 64 bits? Ignore the memory map. */
+               if (start > end)
+                       return -1;
+
+               /*
+                * Some BIOSes claim RAM in the 640k - 1M region.
+                * Not right. Fix it up.
+                */
+               if (type == E820_RAM) {
+                       if (start < 0x100000ULL && end > 0xA0000ULL) {
+                               if (start < 0xA0000ULL)
+                                       add_memory_region(start, 0xA0000ULL-start, type);
+                               if (end <= 0x100000ULL)
+                                       continue;
+                               start = 0x100000ULL;
+                               size = end - start;
+                       }
+               }
+               add_memory_region(start, size, type);
+       } while (biosmap++,--nr_map);
+       return 0;
+}
+#endif
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+struct edd edd;
+#ifdef CONFIG_EDD_MODULE
+EXPORT_SYMBOL(edd);
+#endif
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ *              from boot_params into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
+     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
+     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+     edd.edd_info_nr = EDD_NR;
+}
+#else
+static inline void copy_edd(void)
+{
+}
+#endif
+
+/*
+ * Do NOT EVER look at the BIOS memory size location.
+ * It does not work on many machines.
+ */
+#define LOWMEMSIZE()   (0x9f000)
+
+static void __init parse_cmdline_early (char ** cmdline_p)
+{
+       char c = ' ', *to = command_line, *from = saved_command_line;
+       int len = 0;
+       int userdef = 0;
+
+        memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
+       /* Save unparsed command line copy for /proc/cmdline */
+       saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+       for (;;) {
+               /*
+                * "mem=nopentium" disables the 4MB page tables.
+                * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+                * to <mem>, overriding the bios size.
+                * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+                * <start> to <start>+<mem>, overriding the bios size.
+                *
+                * HPA tells me bootloaders need to parse mem=, so no new
+                * option should be mem=  [also see Documentation/i386/boot.txt]
+                */
+               if (c == ' ' && !memcmp(from, "mem=", 4)) {
+                       if (to != command_line)
+                               to--;
+                       if (!memcmp(from+4, "nopentium", 9)) {
+                               from += 9+4;
+                               clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+                               disable_pse = 1;
+                       } else {
+                               /* If the user specifies memory size, we
+                                * limit the BIOS-provided memory map to
+                                * that size. exactmap can be used to specify
+                                * the exact map. mem=number can be used to
+                                * trim the existing memory map.
+                                */
+                               unsigned long long mem_size;
+                               mem_size = memparse(from+4, &from);
+                               limit_regions(mem_size);
+                               userdef=1;
+                       }
+               }
+
+               if (c == ' ' && !memcmp(from, "memmap=", 7)) {
+                       if (to != command_line)
+                               to--;
+                       if (!memcmp(from+7, "exactmap", 8)) {
+                               from += 8+7;
+                               e820.nr_map = 0;
+                               userdef = 1;
+                       } else {
+                               /* If the user specifies memory size, we
+                                * limit the BIOS-provided memory map to
+                                * that size. exactmap can be used to specify
+                                * the exact map. mem=number can be used to
+                                * trim the existing memory map.
+                                */
+                               unsigned long long start_at, mem_size;
+                               mem_size = memparse(from+7, &from);
+                               if (*from == '@') {
+                                       start_at = memparse(from+1, &from);
+                                       add_memory_region(start_at, mem_size, E820_RAM);
+                               } else if (*from == '#') {
+                                       start_at = memparse(from+1, &from);
+                                       add_memory_region(start_at, mem_size, E820_ACPI);
+                               } else if (*from == '$') {
+                                       start_at = memparse(from+1, &from);
+                                       add_memory_region(start_at, mem_size, E820_RESERVED);
+                               } else {
+                                       limit_regions(mem_size);
+                                       userdef=1;
+                               }
+                       }
+               }
+
+#ifdef  CONFIG_X86_SMP
+               /*
+                * If the BIOS enumerates physical processors before logical,
+                * maxcpus=N at enumeration-time can be used to disable HT.
+                */
+               else if (!memcmp(from, "maxcpus=", 8)) {
+                       extern unsigned int maxcpus;
+
+                       maxcpus = simple_strtoul(from + 8, NULL, 0);
+               }
+#endif
+
+#ifdef CONFIG_ACPI_BOOT
+               /* "acpi=off" disables both ACPI table parsing and interpreter */
+               else if (!memcmp(from, "acpi=off", 8)) {
+                       disable_acpi();
+               }
+
+               /* acpi=force to over-ride black-list */
+               else if (!memcmp(from, "acpi=force", 10)) {
+                       acpi_force = 1;
+                       acpi_ht = 1;
+                       acpi_disabled = 0;
+               }
+
+               /* acpi=strict disables out-of-spec workarounds */
+               else if (!memcmp(from, "acpi=strict", 11)) {
+                       acpi_strict = 1;
+               }
+
+               /* Limit ACPI just to boot-time to enable HT */
+               else if (!memcmp(from, "acpi=ht", 7)) {
+                       if (!acpi_force)
+                               disable_acpi();
+                       acpi_ht = 1;
+               }
+               
+               /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
+               else if (!memcmp(from, "pci=noacpi", 10)) {
+                       acpi_disable_pci();
+               }
+               /* "acpi=noirq" disables ACPI interrupt routing */
+               else if (!memcmp(from, "acpi=noirq", 10)) {
+                       acpi_noirq_set();
+               }
+
+               else if (!memcmp(from, "acpi_sci=edge", 13))
+                       acpi_sci_flags.trigger =  1;
+
+               else if (!memcmp(from, "acpi_sci=level", 14))
+                       acpi_sci_flags.trigger = 3;
+
+               else if (!memcmp(from, "acpi_sci=high", 13))
+                       acpi_sci_flags.polarity = 1;
+
+               else if (!memcmp(from, "acpi_sci=low", 12))
+                       acpi_sci_flags.polarity = 3;
+
+#ifdef CONFIG_X86_IO_APIC
+               else if (!memcmp(from, "acpi_skip_timer_override", 24))
+                       acpi_skip_timer_override = 1;
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+               /* disable IO-APIC */
+               else if (!memcmp(from, "noapic", 6))
+                       disable_ioapic_setup();
+#endif /* CONFIG_X86_LOCAL_APIC */
+#endif /* CONFIG_ACPI_BOOT */
+
+               /*
+                * highmem=size forces highmem to be exactly 'size' bytes.
+                * This works even on boxes that have no highmem otherwise.
+                * This also works to reduce highmem size on bigger boxes.
+                */
+               if (c == ' ' && !memcmp(from, "highmem=", 8))
+                       highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
+       
+               /*
+                * vmalloc=size forces the vmalloc area to be exactly 'size'
+                * bytes. This can be used to increase (or decrease) the
+                * vmalloc area - the default is 128m.
+                */
+               if (c == ' ' && !memcmp(from, "vmalloc=", 8))
+                       __VMALLOC_RESERVE = memparse(from+8, &from);
+
+               c = *(from++);
+               if (!c)
+                       break;
+               if (COMMAND_LINE_SIZE <= ++len)
+                       break;
+               *(to++) = c;
+       }
+       *to = '\0';
+       *cmdline_p = command_line;
+       if (userdef) {
+               printk(KERN_INFO "user-defined physical RAM map:\n");
+               print_memory_map("user");
+       }
+}
+
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long *max_pfn = arg, pfn;
+
+       if (start < end) {
+               pfn = PFN_UP(end -1);
+               if (pfn > *max_pfn)
+                       *max_pfn = pfn;
+       }
+       return 0;
+}
+
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+       int i;
+
+       max_pfn = 0;
+       if (efi_enabled) {
+               efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+               return;
+       }
+
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long start, end;
+               /* RAM? */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               start = PFN_UP(e820.map[i].addr);
+               end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+               if (start >= end)
+                       continue;
+               if (end > max_pfn)
+                       max_pfn = end;
+       }
+}
+
+/*
+ * Determine low and high memory ranges:
+ */
+unsigned long __init find_max_low_pfn(void)
+{
+       unsigned long max_low_pfn;
+
+       max_low_pfn = max_pfn;
+       if (max_low_pfn > MAXMEM_PFN) {
+               if (highmem_pages == -1)
+                       highmem_pages = max_pfn - MAXMEM_PFN;
+               if (highmem_pages + MAXMEM_PFN < max_pfn)
+                       max_pfn = MAXMEM_PFN + highmem_pages;
+               if (highmem_pages + MAXMEM_PFN > max_pfn) {
+                       printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
+                       highmem_pages = 0;
+               }
+               max_low_pfn = MAXMEM_PFN;
+#ifndef CONFIG_HIGHMEM
+               /* Maximum memory usable is what is directly addressable */
+               printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+                                       MAXMEM>>20);
+               if (max_pfn > MAX_NONPAE_PFN)
+                       printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+               else
+                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+               max_pfn = MAXMEM_PFN;
+#else /* !CONFIG_HIGHMEM */
+#ifndef CONFIG_X86_PAE
+               if (max_pfn > MAX_NONPAE_PFN) {
+                       max_pfn = MAX_NONPAE_PFN;
+                       printk(KERN_WARNING "Warning only 4GB will be used.\n");
+                       printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+               }
+#endif /* !CONFIG_X86_PAE */
+#endif /* !CONFIG_HIGHMEM */
+       } else {
+               if (highmem_pages == -1)
+                       highmem_pages = 0;
+#ifdef CONFIG_HIGHMEM
+               if (highmem_pages >= max_pfn) {
+                       printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
+                       highmem_pages = 0;
+               }
+               if (highmem_pages) {
+                       if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
+                               printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
+                               highmem_pages = 0;
+                       }
+                       max_low_pfn -= highmem_pages;
+               }
+#else
+               if (highmem_pages)
+                       printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
+#endif
+       }
+       return max_low_pfn;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+
+/*
+ * Free all available memory for boot time allocation.  Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+       /* check max_low_pfn */
+       if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
+               return 0;
+       if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
+               end = (max_low_pfn + 1) << PAGE_SHIFT;
+       if (start < end)
+               free_bootmem(start, end - start);
+
+       return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+       int i;
+
+       if (efi_enabled) {
+               efi_memmap_walk(free_available_memory, NULL);
+               return;
+       }
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long curr_pfn, last_pfn, size;
+               /*
+                * Reserve usable low memory
+                */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               /*
+                * We are rounding up the start address of usable memory:
+                */
+               curr_pfn = PFN_UP(e820.map[i].addr);
+               if (curr_pfn >= max_low_pfn)
+                       continue;
+               /*
+                * ... and at the end of the usable range downwards:
+                */
+               last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+               if (last_pfn > max_low_pfn)
+                       last_pfn = max_low_pfn;
+
+               /*
+                * .. finally, did all the rounding and playing
+                * around just make the area go away?
+                */
+               if (last_pfn <= curr_pfn)
+                       continue;
+
+               size = last_pfn - curr_pfn;
+               free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+       }
+}
+
+/*
+ * workaround for Dell systems that neglect to reserve EBDA
+ */
+static void __init reserve_ebda_region(void)
+{
+       unsigned int addr;
+       addr = get_bios_ebda();
+       if (addr)
+               reserve_bootmem(addr, PAGE_SIZE);       
+}
+
+static unsigned long __init setup_memory(void)
+{
+       unsigned long bootmap_size, start_pfn, max_low_pfn;
+
+       /*
+        * partially used pages are not usable - thus
+        * we are rounding upwards:
+        */
+       start_pfn = PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames;
+
+       find_max_pfn();
+
+       max_low_pfn = find_max_low_pfn();
+
+#ifdef CONFIG_HIGHMEM
+       highstart_pfn = highend_pfn = max_pfn;
+       if (max_pfn > max_low_pfn) {
+               highstart_pfn = max_low_pfn;
+       }
+       printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+               pages_to_mb(highend_pfn - highstart_pfn));
+#endif
+       printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
+                       pages_to_mb(max_low_pfn));
+       /*
+        * Initialize the boot-time allocator (with low memory only):
+        */
+       bootmap_size = init_bootmem(start_pfn, max_low_pfn);
+
+       register_bootmem_low_pages(max_low_pfn);
+
+       /*
+        * Reserve the bootmem bitmap itself as well. We do this in two
+        * steps (first step was init_bootmem()) because this catches
+        * the (very unlikely) case of us accidentally initializing the
+        * bootmem allocator with an invalid RAM area.
+        */
+       reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+                        bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+
+       /* reserve EBDA region, it's a 4K region */
+       reserve_ebda_region();
+
+    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
+       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
+       unless you have no PS/2 mouse plugged in. */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+           boot_cpu_data.x86 == 6)
+            reserve_bootmem(0xa0000 - 4096, 4096);
+
+#ifdef CONFIG_SMP
+       /*
+        * But first pinch a few for the stack/trampoline stuff
+        * FIXME: Don't need the extra page at 4K, but need to fix
+        * trampoline before removing it. (see the GDT stuff)
+        */
+       reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
+#endif
+#ifdef CONFIG_ACPI_SLEEP
+       /*
+        * Reserve low memory region for sleep support.
+        */
+       acpi_reserve_bootmem();
+#endif
+#ifdef CONFIG_X86_FIND_SMP_CONFIG
+       /*
+        * Find and reserve possible boot-time SMP configuration:
+        */
+       find_smp_config();
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (xen_start_info.mod_start) {
+               if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+                       /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
+                       initrd_start = INITRD_START + PAGE_OFFSET;
+                       initrd_end = initrd_start+INITRD_SIZE;
+                       initrd_below_start_ok = 1;
+               }
+               else {
+                       printk(KERN_ERR "initrd extends beyond end of memory "
+                           "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+                           INITRD_START + INITRD_SIZE,
+                           max_low_pfn << PAGE_SHIFT);
+                       initrd_start = 0;
+               }
+       }
+#endif
+
+       phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
+
+       return max_low_pfn;
+}
+#else
+extern unsigned long setup_memory(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+{
+       int i;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+       probe_roms();
+#endif
+       for (i = 0; i < e820.nr_map; i++) {
+               struct resource *res;
+               if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+                       continue;
+               res = alloc_bootmem_low(sizeof(struct resource));
+               switch (e820.map[i].type) {
+               case E820_RAM:  res->name = "System RAM"; break;
+               case E820_ACPI: res->name = "ACPI Tables"; break;
+               case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
+               default:        res->name = "reserved";
+               }
+               res->start = e820.map[i].addr;
+               res->end = res->start + e820.map[i].size - 1;
+               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               request_resource(&iomem_resource, res);
+               if (e820.map[i].type == E820_RAM) {
+                       /*
+                        *  We don't know which RAM region contains kernel data,
+                        *  so we try it repeatedly and let the resource manager
+                        *  test it.
+                        */
+                       request_resource(res, code_resource);
+                       request_resource(res, data_resource);
+               }
+       }
+}
+
+/*
+ * Request address space for all standard resources
+ */
+static void __init register_memory(unsigned long max_low_pfn)
+{
+       unsigned long low_mem_size;
+       int           i;
+
+       if (efi_enabled)
+               efi_initialize_iomem_resources(&code_resource, &data_resource);
+       else
+               legacy_init_iomem_resources(&code_resource, &data_resource);
+
+       /* EFI systems may still have VGA */
+       request_resource(&iomem_resource, &video_ram_resource);
+
+       /* request I/O space for devices used on all i[345]86 PCs */
+       for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+               request_resource(&ioport_resource, &standard_io_resources[i]);
+
+       /* Tell the PCI layer not to allocate too close to the RAM area.. */
+       low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
+       if (low_mem_size > pci_mem_start)
+               pci_mem_start = low_mem_size;
+}
+
+/* Use inline assembly to define this because the nops are defined 
+   as inline assembly strings in the include files and we cannot 
+   get them easily into strings. */
+asm("\t.data\nintelnops: " 
+    GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
+    GENERIC_NOP7 GENERIC_NOP8); 
+asm("\t.data\nk8nops: " 
+    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+    K8_NOP7 K8_NOP8); 
+asm("\t.data\nk7nops: " 
+    K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
+    K7_NOP7 K7_NOP8); 
+    
+extern unsigned char intelnops[], k8nops[], k7nops[];
+static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 
+     NULL,
+     intelnops,
+     intelnops + 1,
+     intelnops + 1 + 2,
+     intelnops + 1 + 2 + 3,
+     intelnops + 1 + 2 + 3 + 4,
+     intelnops + 1 + 2 + 3 + 4 + 5,
+     intelnops + 1 + 2 + 3 + 4 + 5 + 6,
+     intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+}; 
+static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
+     NULL,
+     k8nops,
+     k8nops + 1,
+     k8nops + 1 + 2,
+     k8nops + 1 + 2 + 3,
+     k8nops + 1 + 2 + 3 + 4,
+     k8nops + 1 + 2 + 3 + 4 + 5,
+     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+}; 
+static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 
+     NULL,
+     k7nops,
+     k7nops + 1,
+     k7nops + 1 + 2,
+     k7nops + 1 + 2 + 3,
+     k7nops + 1 + 2 + 3 + 4,
+     k7nops + 1 + 2 + 3 + 4 + 5,
+     k7nops + 1 + 2 + 3 + 4 + 5 + 6,
+     k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+}; 
+static struct nop { 
+     int cpuid; 
+     unsigned char **noptable; 
+} noptypes[] = { 
+     { X86_FEATURE_K8, k8_nops }, 
+     { X86_FEATURE_K7, k7_nops }, 
+     { -1, NULL }
+}; 
+
+/* Replace instructions with better alternatives for this CPU type.
+
+   This runs before SMP is initialized to avoid SMP problems with
+   self modifying code. This implies that assymetric systems where
+   APs have less capabilities than the boot processor are not handled. 
+   In this case boot with "noreplacement". */ 
+void apply_alternatives(void *start, void *end) 
+{ 
+       struct alt_instr *a; 
+       int diff, i, k;
+        unsigned char **noptable = intel_nops; 
+       for (i = 0; noptypes[i].cpuid >= 0; i++) { 
+               if (boot_cpu_has(noptypes[i].cpuid)) { 
+                       noptable = noptypes[i].noptable;
+                       break;
+               }
+       } 
+       for (a = start; (void *)a < end; a++) { 
+               if (!boot_cpu_has(a->cpuid))
+                       continue;
+               BUG_ON(a->replacementlen > a->instrlen); 
+               memcpy(a->instr, a->replacement, a->replacementlen); 
+               diff = a->instrlen - a->replacementlen; 
+               /* Pad the rest with nops */
+               for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+                       k = diff;
+                       if (k > ASM_NOP_MAX)
+                               k = ASM_NOP_MAX;
+                       memcpy(a->instr + i, noptable[k], k); 
+               } 
+       }
+} 
+
+static int no_replacement __initdata = 0; 
+void __init alternative_instructions(void)
+{
+       extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+       if (no_replacement) 
+               return;
+       apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
+
+static int __init noreplacement_setup(char *s)
+{ 
+     no_replacement = 1; 
+     return 0; 
+} 
+
+__setup("noreplacement", noreplacement_setup); 
+
+static char * __init machine_specific_memory_setup(void);
+
+/*
+ * Determine if we were loaded by an EFI loader.  If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+ * for initialization.  Note, the efi init code path is determined by the
+ * global efi_enabled. This allows the same kernel image to be used on existing
+ * systems (with a traditional BIOS) as well as on EFI systems.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+        int i,j;
+
+        unsigned long max_low_pfn;
+
+       HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                            VMASST_TYPE_4gb_segments);
+
+       pm_idle = xen_cpu_idle;
+
+       memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+       early_cpu_init();
+
+       /*
+        * FIXME: This isn't an official loader_type right
+        * now but does currently work with elilo.
+        * If we were configured as an EFI kernel, check to make
+        * sure that we were loaded correctly from elilo and that
+        * the system table is valid.  If not, then initialize normally.
+        */
+#ifdef CONFIG_EFI
+       if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
+               efi_enabled = 1;
+#endif
+
+       ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); /*old_decode_dev(ORIG_ROOT_DEV);*/
+       drive_info = DRIVE_INFO;
+       screen_info = SCREEN_INFO;
+       edid_info = EDID_INFO;
+       apm_info.bios = APM_BIOS_INFO;
+       ist_info = IST_INFO;
+       saved_videomode = VIDEO_MODE;
+       if( SYS_DESC_TABLE.length != 0 ) {
+               MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
+               machine_id = SYS_DESC_TABLE.table[0];
+               machine_submodel_id = SYS_DESC_TABLE.table[1];
+               BIOS_revision = SYS_DESC_TABLE.table[2];
+       }
+       aux_device_present = AUX_DEVICE_INFO;
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+       /* This is drawn from a dump from vgacon:startup in standard Linux. */
+       screen_info.orig_video_mode = 3; 
+       screen_info.orig_video_isVGA = 1;
+       screen_info.orig_video_lines = 25;
+       screen_info.orig_video_cols = 80;
+       screen_info.orig_video_ega_bx = 3;
+       screen_info.orig_video_points = 16;
+#endif
+
+#ifdef CONFIG_BLK_DEV_RAM
+       rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+       rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+       rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+       ARCH_SETUP
+       if (efi_enabled)
+               efi_init();
+       else {
+               printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+               print_memory_map(machine_specific_memory_setup());
+       }
+
+       copy_edd();
+
+       if (!MOUNT_ROOT_RDONLY)
+               root_mountflags &= ~MS_RDONLY;
+       init_mm.start_code = (unsigned long) _text;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
+       init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
+
+       code_resource.start = virt_to_phys(_text);
+       code_resource.end = virt_to_phys(_etext)-1;
+       data_resource.start = virt_to_phys(_etext);
+       data_resource.end = virt_to_phys(_edata)-1;
+
+       parse_cmdline_early(cmdline_p);
+
+       max_low_pfn = setup_memory();
+
+       /*
+        * NOTE: before this point _nobody_ is allowed to allocate
+        * any memory using the bootmem allocator.  Although the
+        * alloctor is now initialised only the first 8Mb of the kernel
+        * virtual address space has been mapped.  All allocations before
+        * paging_init() has completed must use the alloc_bootmem_low_pages()
+        * variant (which allocates DMA'able memory) and care must be taken
+        * not to exceed the 8Mb limit.
+        */
+
+#ifdef CONFIG_SMP
+       smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
+#endif
+       paging_init();
+
+       pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+       for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+       {       
+            pfn_to_mfn_frame_list[j] = 
+                 virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+       }
+       HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+            virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+       /*
+        * NOTE: at this point the bootmem allocator is fully available.
+        */
+
+#ifdef CONFIG_EARLY_PRINTK
+       {
+               char *s = strstr(*cmdline_p, "earlyprintk=");
+               if (s) {
+                       extern void setup_early_printk(char *);
+
+                       setup_early_printk(s);
+                       printk("early console enabled\n");
+               }
+       }
+#endif
+
+
+       dmi_scan_machine();
+
+#ifdef CONFIG_X86_GENERICARCH
+       generic_apic_probe(*cmdline_p);
+#endif 
+       if (efi_enabled)
+               efi_map_memmap();
+
+       /*
+        * Parse the ACPI tables for possible boot-time SMP configuration.
+        */
+       acpi_boot_init();
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       if (smp_found_config)
+               get_smp_config();
+#endif
+
+       register_memory(max_low_pfn);
+
+       /* If we are a privileged guest OS then we should request IO privs. */
+       if (xen_start_info.flags & SIF_PRIVILEGED) {
+               dom0_op_t op;
+               op.cmd           = DOM0_IOPL;
+               op.u.iopl.domain = DOMID_SELF;
+               op.u.iopl.iopl   = 1;
+               if (HYPERVISOR_dom0_op(&op) != 0)
+                       panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
+               current->thread.io_pl = 1;
+       }
+
+       if (xen_start_info.flags & SIF_INITDOMAIN) {
+               if (!(xen_start_info.flags & SIF_PRIVILEGED))
+                       panic("Xen granted us console access "
+                             "but not privileged status");
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+               if (!efi_enabled ||
+                   (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+                       conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+               conswitchp = &dummy_con;
+#endif
+#endif
+       } else {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+               extern const struct consw xennull_con;
+               extern int console_use_vt;
+#if defined(CONFIG_VGA_CONSOLE)
+               /* disable VGA driver */
+               ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
+#endif
+               conswitchp = &xennull_con;
+               console_use_vt = 0;
+#endif
+       }
+}
+
+#include "setup_arch_post.h"
+/*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:8
+ * End:
+ */
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/signal.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/signal.c
new file mode 100644 (file)
index 0000000..4c26470
--- /dev/null
@@ -0,0 +1,648 @@
+/*
+ *  linux/arch/i386/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/suspend.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <asm/processor.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+#include "sigframe.h"
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+{
+       struct pt_regs * regs = (struct pt_regs *) &history0;
+       sigset_t saveset;
+
+       mask &= _BLOCKABLE;
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       siginitset(&current->blocked, mask);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->eax = -EINTR;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(regs, &saveset))
+                       return -EINTR;
+       }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(struct pt_regs regs)
+{
+       sigset_t saveset, newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (regs.ecx != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, ~_BLOCKABLE);
+
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs.eax = -EINTR;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(&regs, &saveset))
+                       return -EINTR;
+       }
+}
+
+asmlinkage int 
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+             struct old_sigaction __user *oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+       if (act) {
+               old_sigset_t mask;
+               if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                       return -EFAULT;
+               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+               __get_user(mask, &act->sa_mask);
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                       return -EFAULT;
+               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+       }
+
+       return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(unsigned long ebx)
+{
+       /* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
+       struct pt_regs *regs = (struct pt_regs *)&ebx;
+       const stack_t __user *uss = (const stack_t __user *)ebx;
+       stack_t __user *uoss = (stack_t __user *)regs->ecx;
+
+       return do_sigaltstack(uss, uoss, regs->esp);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax)
+{
+       unsigned int err = 0;
+
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x)                err |= __get_user(regs->x, &sc->x)
+
+#define COPY_SEG(seg)                                                  \
+       { unsigned short tmp;                                           \
+         err |= __get_user(tmp, &sc->seg);                             \
+         regs->x##seg = tmp; }
+
+#define COPY_SEG_STRICT(seg)                                           \
+       { unsigned short tmp;                                           \
+         err |= __get_user(tmp, &sc->seg);                             \
+         regs->x##seg = tmp|3; }
+
+#define GET_SEG(seg)                                                   \
+       { unsigned short tmp;                                           \
+         err |= __get_user(tmp, &sc->seg);                             \
+         loadsegment(seg,tmp); }
+
+#define        FIX_EFLAGS      (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \
+                        X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
+                        X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
+
+       GET_SEG(gs);
+       GET_SEG(fs);
+       COPY_SEG(es);
+       COPY_SEG(ds);
+       COPY(edi);
+       COPY(esi);
+       COPY(ebp);
+       COPY(esp);
+       COPY(ebx);
+       COPY(edx);
+       COPY(ecx);
+       COPY(eip);
+       COPY_SEG_STRICT(cs);
+       COPY_SEG_STRICT(ss);
+       
+       {
+               unsigned int tmpflags;
+               err |= __get_user(tmpflags, &sc->eflags);
+               regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               regs->orig_eax = -1;            /* disable syscall checks */
+       }
+
+       {
+               struct _fpstate __user * buf;
+               err |= __get_user(buf, &sc->fpstate);
+               if (buf) {
+                       if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+                               goto badframe;
+                       err |= restore_i387(buf);
+               }
+       }
+
+       err |= __get_user(*peax, &sc->eax);
+       return err;
+
+badframe:
+       return 1;
+}
+
+asmlinkage int sys_sigreturn(unsigned long __unused)
+{
+       struct pt_regs *regs = (struct pt_regs *) &__unused;
+       struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
+       sigset_t set;
+       int eax;
+
+       if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__get_user(set.sig[0], &frame->sc.oldmask)
+           || (_NSIG_WORDS > 1
+               && __copy_from_user(&set.sig[1], &frame->extramask,
+                                   sizeof(frame->extramask))))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+       
+       if (restore_sigcontext(regs, &frame->sc, &eax))
+               goto badframe;
+       return eax;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}      
+
+asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+{
+       struct pt_regs *regs = (struct pt_regs *) &__unused;
+       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
+       sigset_t set;
+       int eax;
+
+       if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+       
+       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
+               goto badframe;
+
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
+               goto badframe;
+
+       return eax;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}      
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
+                struct pt_regs *regs, unsigned long mask)
+{
+       int tmp, err = 0;
+       unsigned long eflags;
+
+       tmp = 0;
+       __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
+       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+       __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
+       err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
+
+       err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
+       err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
+       err |= __put_user(regs->edi, &sc->edi);
+       err |= __put_user(regs->esi, &sc->esi);
+       err |= __put_user(regs->ebp, &sc->ebp);
+       err |= __put_user(regs->esp, &sc->esp);
+       err |= __put_user(regs->ebx, &sc->ebx);
+       err |= __put_user(regs->edx, &sc->edx);
+       err |= __put_user(regs->ecx, &sc->ecx);
+       err |= __put_user(regs->eax, &sc->eax);
+       err |= __put_user(current->thread.trap_no, &sc->trapno);
+       err |= __put_user(current->thread.error_code, &sc->err);
+       err |= __put_user(regs->eip, &sc->eip);
+       err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
+       eflags = regs->eflags;
+       if (current->ptrace & PT_PTRACED) {
+               eflags &= ~TF_MASK;
+       }
+       err |= __put_user(eflags, &sc->eflags);
+       err |= __put_user(regs->esp, &sc->esp_at_signal);
+       err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
+
+       tmp = save_i387(fpstate);
+       if (tmp < 0)
+         err = 1;
+       else
+         err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+
+       /* non-iBCS2 extensions.. */
+       err |= __put_user(mask, &sc->oldmask);
+       err |= __put_user(current->thread.cr2, &sc->cr2);
+
+       return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+       unsigned long esp;
+
+       /* Default to using normal stack */
+       esp = regs->esp;
+
+       /* This is the X/Open sanctioned signal stack switching.  */
+       if (ka->sa.sa_flags & SA_ONSTACK) {
+               if (sas_ss_flags(esp) == 0)
+                       esp = current->sas_ss_sp + current->sas_ss_size;
+       }
+
+       /* This is the legacy signal stack switching. */
+       else if ((regs->xss & 0xffff) != __USER_DS &&
+                !(ka->sa.sa_flags & SA_RESTORER) &&
+                ka->sa.sa_restorer) {
+               esp = (unsigned long) ka->sa.sa_restorer;
+       }
+
+       return (void __user *)((esp - frame_size) & -8ul);
+}
+
+/* These symbols are defined with the addresses in the vsyscall page.
+   See vsyscall-sigreturn.S.  */
+extern void __user __kernel_sigreturn;
+extern void __user __kernel_rt_sigreturn;
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+                       sigset_t *set, struct pt_regs * regs)
+{
+       void __user *restorer;
+       struct sigframe __user *frame;
+       int err = 0;
+       int usig;
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto give_sigsegv;
+
+       usig = current_thread_info()->exec_domain
+               && current_thread_info()->exec_domain->signal_invmap
+               && sig < 32
+               ? current_thread_info()->exec_domain->signal_invmap[sig]
+               : sig;
+
+       err |= __put_user(usig, &frame->sig);
+       if (err)
+               goto give_sigsegv;
+
+       err |= setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
+       if (err)
+               goto give_sigsegv;
+
+       if (_NSIG_WORDS > 1) {
+               err |= __copy_to_user(&frame->extramask, &set->sig[1],
+                                     sizeof(frame->extramask));
+       }
+       if (err)
+               goto give_sigsegv;
+
+       restorer = &__kernel_sigreturn;
+       if (ka->sa.sa_flags & SA_RESTORER)
+               restorer = ka->sa.sa_restorer;
+
+       /* Set up to return from userspace.  */
+       err |= __put_user(restorer, &frame->pretcode);
+        
+       /*
+        * This is popl %eax ; movl $,%eax ; int $0x80
+        *
+        * WE DO NOT USE IT ANY MORE! It's only left here for historical
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+       err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
+       err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
+       err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
+
+       if (err)
+               goto give_sigsegv;
+
+       /* Set up registers for signal handler */
+       regs->esp = (unsigned long) frame;
+       regs->eip = (unsigned long) ka->sa.sa_handler;
+       regs->eax = (unsigned long) sig;
+       regs->edx = (unsigned long) 0;
+       regs->ecx = (unsigned long) 0;
+
+       set_fs(USER_DS);
+       regs->xds = __USER_DS;
+       regs->xes = __USER_DS;
+       regs->xss = __USER_DS;
+       regs->xcs = __USER_CS;
+       if (regs->eflags & TF_MASK) {
+               if (current->ptrace & PT_PTRACED) {
+                       ptrace_notify(SIGTRAP);
+               } else {
+                       regs->eflags &= ~TF_MASK;
+               }
+       }
+
+#if DEBUG_SIG
+       printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+               current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+       return;
+
+give_sigsegv:
+       force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+                          sigset_t *set, struct pt_regs * regs)
+{
+       void __user *restorer;
+       struct rt_sigframe __user *frame;
+       int err = 0;
+       int usig;
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto give_sigsegv;
+
+       usig = current_thread_info()->exec_domain
+               && current_thread_info()->exec_domain->signal_invmap
+               && sig < 32
+               ? current_thread_info()->exec_domain->signal_invmap[sig]
+               : sig;
+
+       err |= __put_user(usig, &frame->sig);
+       err |= __put_user(&frame->info, &frame->pinfo);
+       err |= __put_user(&frame->uc, &frame->puc);
+       err |= copy_siginfo_to_user(&frame->info, info);
+       if (err)
+               goto give_sigsegv;
+
+       /* Create the ucontext.  */
+       err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(0, &frame->uc.uc_link);
+       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+       err |= __put_user(sas_ss_flags(regs->esp),
+                         &frame->uc.uc_stack.ss_flags);
+       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+       err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
+                               regs, set->sig[0]);
+       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+       if (err)
+               goto give_sigsegv;
+
+       /* Set up to return from userspace.  */
+       restorer = &__kernel_rt_sigreturn;
+       if (ka->sa.sa_flags & SA_RESTORER)
+               restorer = ka->sa.sa_restorer;
+       err |= __put_user(restorer, &frame->pretcode);
+        
+       /*
+        * This is movl $,%eax ; int $0x80
+        *
+        * WE DO NOT USE IT ANY MORE! It's only left here for historical
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+       err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
+       err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
+       err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
+
+       if (err)
+               goto give_sigsegv;
+
+       /* Set up registers for signal handler */
+       regs->esp = (unsigned long) frame;
+       regs->eip = (unsigned long) ka->sa.sa_handler;
+       regs->eax = (unsigned long) usig;
+       regs->edx = (unsigned long) &frame->info;
+       regs->ecx = (unsigned long) &frame->uc;
+
+       set_fs(USER_DS);
+       regs->xds = __USER_DS;
+       regs->xes = __USER_DS;
+       regs->xss = __USER_DS;
+       regs->xcs = __USER_CS;
+       if (regs->eflags & TF_MASK) {
+               if (current->ptrace & PT_PTRACED) {
+                       ptrace_notify(SIGTRAP);
+               } else {
+                       regs->eflags &= ~TF_MASK;
+               }
+       }
+
+#if DEBUG_SIG
+       printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+               current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+       return;
+
+give_sigsegv:
+       force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */    
+
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+             sigset_t *oldset, struct pt_regs * regs)
+{
+       /* Are we from a system call? */
+       if (regs->orig_eax >= 0) {
+               /* If so, check system call restarting.. */
+               switch (regs->eax) {
+                       case -ERESTART_RESTARTBLOCK:
+                       case -ERESTARTNOHAND:
+                               regs->eax = -EINTR;
+                               break;
+
+                       case -ERESTARTSYS:
+                               if (!(ka->sa.sa_flags & SA_RESTART)) {
+                                       regs->eax = -EINTR;
+                                       break;
+                               }
+                       /* fallthrough */
+                       case -ERESTARTNOINTR:
+                               regs->eax = regs->orig_eax;
+                               regs->eip -= 2;
+               }
+       }
+
+       /* Set up the stack frame */
+       if (ka->sa.sa_flags & SA_SIGINFO)
+               setup_rt_frame(sig, ka, info, oldset, regs);
+       else
+               setup_frame(sig, ka, oldset, regs);
+
+       if (!(ka->sa.sa_flags & SA_NODEFER)) {
+               spin_lock_irq(&current->sighand->siglock);
+               sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+               sigaddset(&current->blocked,sig);
+               recalc_sigpending();
+               spin_unlock_irq(&current->sighand->siglock);
+       }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+       siginfo_t info;
+       int signr;
+       struct k_sigaction ka;
+
+       /*
+        * We want the common case to go fast, which
+        * is why we may in certain cases get here from
+        * kernel mode. Just return without doing anything
+        * if so.
+        */
+       if ((regs->xcs & 2) != 2)
+               return 1;
+
+       if (current->flags & PF_FREEZE) {
+               refrigerator(0);
+               goto no_signal;
+       }
+
+       if (!oldset)
+               oldset = &current->blocked;
+
+       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+       if (signr > 0) {
+               /* Reenable any watchpoints before delivering the
+                * signal to user space. The processor register will
+                * have been cleared if the watchpoint triggered
+                * inside the kernel.
+                */
+               if (current->thread.debugreg[7])
+                       HYPERVISOR_set_debugreg(7,
+                                               current->thread.debugreg[7]);
+
+               /* Whee!  Actually deliver the signal.  */
+               handle_signal(signr, &info, &ka, oldset, regs);
+               return 1;
+       }
+
+ no_signal:
+       /* Did we come from a system call? */
+       if (regs->orig_eax >= 0) {
+               /* Restart the system call - no handlers present */
+               if (regs->eax == -ERESTARTNOHAND ||
+                   regs->eax == -ERESTARTSYS ||
+                   regs->eax == -ERESTARTNOINTR) {
+                       regs->eax = regs->orig_eax;
+                       regs->eip -= 2;
+               }
+               if (regs->eax == -ERESTART_RESTARTBLOCK){
+                       regs->eax = __NR_restart_syscall;
+                       regs->eip -= 2;
+               }
+       }
+       return 0;
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+__attribute__((regparm(3)))
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+                     __u32 thread_info_flags)
+{
+       /* Pending single-step? */
+       if (thread_info_flags & _TIF_SINGLESTEP) {
+               regs->eflags |= TF_MASK;
+               clear_thread_flag(TIF_SINGLESTEP);
+       }
+       /* deal with pending signal delivery */
+       if (thread_info_flags & _TIF_SIGPENDING)
+               do_signal(regs,oldset);
+       
+       clear_thread_flag(TIF_IRET);
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/sysenter.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/sysenter.c
new file mode 100644 (file)
index 0000000..93bdc0f
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * linux/arch/i386/kernel/sysenter.c
+ *
+ * (C) Copyright 2002 Linus Torvalds
+ *
+ * This file contains the needed initializations to support sysenter.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/elf.h>
+
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+#include <asm/pgtable.h>
+#include <asm/unistd.h>
+
+extern asmlinkage void sysenter_entry(void);
+
+void enable_sep_cpu(void *info)
+{
+       int cpu = get_cpu();
+       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+       tss->ss1 = __KERNEL_CS;
+       tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+       wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+       put_cpu();      
+}
+
+/*
+ * These symbols are defined by vsyscall.o to mark the bounds
+ * of the ELF DSO images included therein.
+ */
+extern const char vsyscall_int80_start, vsyscall_int80_end;
+extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+
+static int __init sysenter_setup(void)
+{
+       void *page = (void *)get_zeroed_page(GFP_ATOMIC);
+
+       __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
+
+       if (1 /* XXXcl not yet */ || !boot_cpu_has(X86_FEATURE_SEP)) {
+               memcpy(page,
+                      &vsyscall_int80_start,
+                      &vsyscall_int80_end - &vsyscall_int80_start);
+               return 0;
+       }
+
+       memcpy(page,
+              &vsyscall_sysenter_start,
+              &vsyscall_sysenter_end - &vsyscall_sysenter_start);
+
+       on_each_cpu(enable_sep_cpu, NULL, 1, 1);
+       return 0;
+}
+
+__initcall(sysenter_setup);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/time.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/time.c
new file mode 100644 (file)
index 0000000..15bc102
--- /dev/null
@@ -0,0 +1,710 @@
+/*
+ *  linux/arch/i386/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02    Alan Modra
+ *     fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26    Markus Kuhn
+ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ *      precision CMOS clock update
+ * 1996-05-03    Ingo Molnar
+ *      fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
+ *             "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-09-05    (Various)
+ *     More robust do_fast_gettimeoffset() algorithm implemented
+ *     (works with APM, Cyrix 6x86MX and Centaur C6),
+ *     monotonic gettimeofday() with fast_get_timeoffset(),
+ *     drift-proof precision TSC calibration on boot
+ *     (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
+ *     Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
+ *     ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
+ * 1998-12-16    Andrea Arcangeli
+ *     Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
+ *     because was not accounting lost_ticks.
+ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
+ *     Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ *     serialize accesses to xtime/lost_ticks).
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/bcd.h>
+#include <linux/efi.h>
+#include <linux/sysctl.h>
+
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+#include <asm/delay.h>
+#include <asm/mpspec.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/timer.h>
+
+#include "mach_time.h"
+
+#include <linux/timex.h>
+#include <linux/config.h>
+
+#include <asm/hpet.h>
+
+#include <asm/arch_hooks.h>
+
+#include "io_ports.h"
+
+extern spinlock_t i8259A_lock;
+int pit_latch_buggy;              /* extern */
+
+#include "do_timer.h"
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+
+extern unsigned long wall_jiffies;
+
+spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+
+spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
+EXPORT_SYMBOL(i8253_lock);
+
+struct timer_opts *cur_timer = &timer_none;
+
+/* These are peridically updated in shared_info, and then copied here. */
+u32 shadow_tsc_stamp;
+u64 shadow_system_time;
+static u32 shadow_time_version;
+static struct timeval shadow_tv;
+extern u64 processed_system_time;
+
+/*
+ * We use this to ensure that gettimeofday() is monotonically increasing. We
+ * only break this guarantee if the wall clock jumps backwards "a long way".
+ */
+static struct timeval last_seen_tv = {0,0};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+/* Periodically propagate synchronised time base to the RTC and to Xen. */
+static long last_rtc_update, last_update_to_xen;
+#endif
+
+/* Periodically take synchronised time base from Xen, if we need it. */
+static long last_update_from_xen;   /* UTC seconds when last read Xen clock. */
+
+/* Keep track of last time we did processing/updating of jiffies and xtime. */
+u64 processed_system_time;   /* System time (ns) at last processing. */
+
+#define NS_PER_TICK (1000000000ULL/HZ)
+
+#define HANDLE_USEC_UNDERFLOW(_tv) do {                \
+       while ((_tv).tv_usec < 0) {             \
+               (_tv).tv_usec += USEC_PER_SEC;  \
+               (_tv).tv_sec--;                 \
+       }                                       \
+} while (0)
+#define HANDLE_USEC_OVERFLOW(_tv) do {         \
+       while ((_tv).tv_usec >= USEC_PER_SEC) { \
+               (_tv).tv_usec -= USEC_PER_SEC;  \
+               (_tv).tv_sec++;                 \
+       }                                       \
+} while (0)
+static inline void __normalize_time(time_t *sec, s64 *nsec)
+{
+       while (*nsec >= NSEC_PER_SEC) {
+               (*nsec) -= NSEC_PER_SEC;
+               (*sec)++;
+       }
+       while (*nsec < 0) {
+               (*nsec) += NSEC_PER_SEC;
+               (*sec)--;
+       }
+}
+
+/* Does this guest OS track Xen time, or set its wall clock independently? */
+static int independent_wallclock = 0;
+static int __init __independent_wallclock(char *str)
+{
+       independent_wallclock = 1;
+       return 1;
+}
+__setup("independent_wallclock", __independent_wallclock);
+#define INDEPENDENT_WALLCLOCK() \
+    (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area. Must be called with the xtime_lock held for writing.
+ */
+static void __get_time_values_from_xen(void)
+{
+       shared_info_t *s = HYPERVISOR_shared_info;
+
+       do {
+               shadow_time_version = s->time_version2;
+               rmb();
+               shadow_tv.tv_sec    = s->wc_sec;
+               shadow_tv.tv_usec   = s->wc_usec;
+               shadow_tsc_stamp    = (u32)s->tsc_timestamp;
+               shadow_system_time  = s->system_time;
+               rmb();
+       }
+       while (shadow_time_version != s->time_version1);
+
+       cur_timer->mark_offset();
+}
+
+#define TIME_VALUES_UP_TO_DATE \
+ ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
+
+/*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+       unsigned long seq;
+       unsigned long usec, sec;
+       unsigned long max_ntp_tick;
+       unsigned long flags;
+       s64 nsec;
+
+       do {
+               unsigned long lost;
+
+               seq = read_seqbegin(&xtime_lock);
+
+               usec = cur_timer->get_offset();
+               lost = jiffies - wall_jiffies;
+
+               /*
+                * If time_adjust is negative then NTP is slowing the clock
+                * so make sure not to go into next possible interval.
+                * Better to lose some accuracy than have time go backwards..
+                */
+               if (unlikely(time_adjust < 0)) {
+                       max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+                       usec = min(usec, max_ntp_tick);
+
+                       if (lost)
+                               usec += lost * max_ntp_tick;
+               }
+               else if (unlikely(lost))
+                       usec += lost * (USEC_PER_SEC / HZ);
+
+               sec = xtime.tv_sec;
+               usec += (xtime.tv_nsec / NSEC_PER_USEC);
+
+               nsec = shadow_system_time - processed_system_time;
+               __normalize_time(&sec, &nsec);
+               usec += (long)nsec / NSEC_PER_USEC;
+
+               if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+                       /*
+                        * We may have blocked for a long time,
+                        * rendering our calculations invalid
+                        * (e.g. the time delta may have
+                        * overflowed). Detect that and recalculate
+                        * with fresh values.
+                        */
+                       write_seqlock_irqsave(&xtime_lock, flags);
+                       __get_time_values_from_xen();
+                       write_sequnlock_irqrestore(&xtime_lock, flags);
+                       continue;
+               }
+       } while (read_seqretry(&xtime_lock, seq));
+
+       while (usec >= USEC_PER_SEC) {
+               usec -= USEC_PER_SEC;
+               sec++;
+       }
+
+       /* Ensure that time-of-day is monotonically increasing. */
+       if ((sec < last_seen_tv.tv_sec) ||
+           ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
+               sec = last_seen_tv.tv_sec;
+               usec = last_seen_tv.tv_usec;
+       } else {
+               last_seen_tv.tv_sec = sec;
+               last_seen_tv.tv_usec = usec;
+       }
+
+       tv->tv_sec = sec;
+       tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+       time_t wtm_sec, sec = tv->tv_sec;
+       long wtm_nsec;
+       s64 nsec;
+       struct timespec xentime;
+
+       if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+               return -EINVAL;
+
+       if (!INDEPENDENT_WALLCLOCK())
+               return 0; /* Silent failure? */
+
+       write_seqlock_irq(&xtime_lock);
+
+       /*
+        * Ensure we don't get blocked for a long time so that our time delta
+        * overflows. If that were to happen then our shadow time values would
+        * be stale, so we can retry with fresh ones.
+        */
+ again:
+       nsec = tv->tv_nsec - cur_timer->get_offset() * NSEC_PER_USEC;
+       if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+               __get_time_values_from_xen();
+               goto again;
+       }
+
+       __normalize_time(&sec, &nsec);
+       set_normalized_timespec(&xentime, sec, nsec);
+
+       /*
+        * This is revolting. We need to set "xtime" correctly. However, the
+        * value in this location is the value at the most recent update of
+        * wall time.  Discover what correction gettimeofday() would have
+        * made, and then undo it!
+        */
+       nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+       nsec -= (shadow_system_time - processed_system_time);
+
+       __normalize_time(&sec, &nsec);
+       wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+       wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+       set_normalized_timespec(&xtime, sec, nsec);
+       set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+       time_adjust = 0;                /* stop active adjtime() */
+       time_status |= STA_UNSYNC;
+       time_maxerror = NTP_PHASE_LIMIT;
+       time_esterror = NTP_PHASE_LIMIT;
+
+       /* Reset all our running time counts. They make no sense now. */
+       last_seen_tv.tv_sec = 0;
+       last_update_from_xen = 0;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+       if (xen_start_info.flags & SIF_INITDOMAIN) {
+               dom0_op_t op;
+               last_rtc_update = last_update_to_xen = 0;
+               op.cmd = DOM0_SETTIME;
+               op.u.settime.secs        = xentime.tv_sec;
+               op.u.settime.usecs       = xentime.tv_nsec / NSEC_PER_USEC;
+               op.u.settime.system_time = shadow_system_time;
+               write_sequnlock_irq(&xtime_lock);
+               HYPERVISOR_dom0_op(&op);
+       } else
+#endif
+               write_sequnlock_irq(&xtime_lock);
+
+       clock_was_set();
+       return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static int set_rtc_mmss(unsigned long nowtime)
+{
+       int retval;
+
+       /* gets recalled with irq locally disabled */
+       spin_lock(&rtc_lock);
+       if (efi_enabled)
+               retval = efi_set_rtc_mmss(nowtime);
+       else
+               retval = mach_set_rtc_mmss(nowtime);
+       spin_unlock(&rtc_lock);
+
+       return retval;
+}
+#endif
+
+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
+ *             Note: This function is required to return accurate
+ *             time even in the absence of multiple timer ticks.
+ */
+unsigned long long monotonic_clock(void)
+{
+       return cur_timer->monotonic_clock();
+}
+EXPORT_SYMBOL(monotonic_clock);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+unsigned long profile_pc(struct pt_regs *regs)
+{
+       unsigned long pc = instruction_pointer(regs);
+
+       if (in_lock_functions(pc))
+               return *(unsigned long *)(regs->ebp + 4);
+
+       return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(int irq, void *dev_id,
+                                       struct pt_regs *regs)
+{
+       time_t wtm_sec, sec;
+       s64 delta, nsec;
+       long sec_diff, wtm_nsec;
+
+       do {
+               __get_time_values_from_xen();
+
+               delta = (s64)(shadow_system_time +
+                             (cur_timer->get_offset() * NSEC_PER_USEC) -
+                             processed_system_time);
+       }
+       while (!TIME_VALUES_UP_TO_DATE);
+
+       if (unlikely(delta < 0)) {
+               printk("Timer ISR: Time went backwards: %lld %lld %ld %lld\n",
+                      delta, shadow_system_time,
+                      (cur_timer->get_offset() * NSEC_PER_USEC), 
+                      processed_system_time);
+               return;
+       }
+
+       /* Process elapsed jiffies since last call. */
+       while (delta >= NS_PER_TICK) {
+               delta -= NS_PER_TICK;
+               processed_system_time += NS_PER_TICK;
+               do_timer_interrupt_hook(regs);
+       }
+
+       /*
+        * Take synchronised time from Xen once a minute if we're not
+        * synchronised ourselves, and we haven't chosen to keep an independent
+        * time base.
+        */
+       if (!INDEPENDENT_WALLCLOCK() &&
+           ((time_status & STA_UNSYNC) != 0) &&
+           (xtime.tv_sec > (last_update_from_xen + 60))) {
+               /* Adjust shadow for jiffies that haven't updated xtime yet. */
+               shadow_tv.tv_usec -= 
+                       (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
+               HANDLE_USEC_UNDERFLOW(shadow_tv);
+
+               /*
+                * Reset our running time counts if they are invalidated by
+                * a warp backwards of more than 500ms.
+                */
+               sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
+               if (unlikely(abs(sec_diff) > 1) ||
+                   unlikely(((sec_diff * USEC_PER_SEC) +
+                             (xtime.tv_nsec / NSEC_PER_USEC) -
+                             shadow_tv.tv_usec) > 500000)) {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+                       last_rtc_update = last_update_to_xen = 0;
+#endif
+                       last_seen_tv.tv_sec = 0;
+               }
+
+               /* Update our unsynchronised xtime appropriately. */
+               sec = shadow_tv.tv_sec;
+               nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+
+               __normalize_time(&sec, &nsec);
+               wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+               wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+               set_normalized_timespec(&xtime, sec, nsec);
+               set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+               last_update_from_xen = sec;
+       }
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+       if (!(xen_start_info.flags & SIF_INITDOMAIN))
+               return;
+
+       /* Send synchronised time to Xen approximately every minute. */
+       if (((time_status & STA_UNSYNC) == 0) &&
+           (xtime.tv_sec > (last_update_to_xen + 60))) {
+               dom0_op_t op;
+               struct timeval tv;
+
+               tv.tv_sec   = xtime.tv_sec;
+               tv.tv_usec  = xtime.tv_nsec / NSEC_PER_USEC;
+               tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
+               HANDLE_USEC_OVERFLOW(tv);
+
+               op.cmd = DOM0_SETTIME;
+               op.u.settime.secs        = tv.tv_sec;
+               op.u.settime.usecs       = tv.tv_usec;
+               op.u.settime.system_time = shadow_system_time;
+               HYPERVISOR_dom0_op(&op);
+
+               last_update_to_xen = xtime.tv_sec;
+       }
+
+       /*
+        * If we have an externally synchronized Linux clock, then update
+        * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+        * called as close as possible to 500 ms before the new second starts.
+        */
+       if ((time_status & STA_UNSYNC) == 0 &&
+           xtime.tv_sec > last_rtc_update + 660 &&
+           (xtime.tv_nsec / 1000)
+                       >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+           (xtime.tv_nsec / 1000)
+                       <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
+               /* horrible...FIXME */
+               if (efi_enabled) {
+                       if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
+                               last_rtc_update = xtime.tv_sec;
+                       else
+                               last_rtc_update = xtime.tv_sec - 600;
+               } else if (set_rtc_mmss(xtime.tv_sec) == 0)
+                       last_rtc_update = xtime.tv_sec;
+               else
+                       last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+       }
+#endif
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       /*
+        * Here we are in the timer irq handler. We just have irqs locally
+        * disabled but we don't know if the timer_bh is running on the other
+        * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+        * the irq version of write_lock because as just said we have irq
+        * locally disabled. -arca
+        */
+       write_seqlock(&xtime_lock);
+       do_timer_interrupt(irq, NULL, regs);
+       write_sequnlock(&xtime_lock);
+       return IRQ_HANDLED;
+}
+
+/* not static: needed by APM */
+unsigned long get_cmos_time(void)
+{
+       unsigned long retval;
+
+       spin_lock(&rtc_lock);
+
+       if (efi_enabled)
+               retval = efi_get_time();
+       else
+               retval = mach_get_cmos_time();
+
+       spin_unlock(&rtc_lock);
+
+       return retval;
+}
+
+static long clock_cmos_diff;
+
+static int __time_suspend(struct sys_device *dev, u32 state)
+{
+       /*
+        * Estimate time zone so that set_time can update the clock
+        */
+       clock_cmos_diff = -get_cmos_time();
+       clock_cmos_diff += get_seconds();
+       return 0;
+}
+
+static int __time_resume(struct sys_device *dev)
+{
+       unsigned long flags;
+       unsigned long sec = get_cmos_time() + clock_cmos_diff;
+       write_seqlock_irqsave(&xtime_lock, flags);
+       xtime.tv_sec = sec;
+       xtime.tv_nsec = 0;
+       write_sequnlock_irqrestore(&xtime_lock, flags);
+       return 0;
+}
+
+static struct sysdev_class pit_sysclass = {
+       .resume = __time_resume,
+       .suspend = __time_suspend,
+       set_kset_name("pit"),
+};
+
+
+/* XXX this driverfs stuff should probably go elsewhere later -john */
+static struct sys_device device_i8253 = {
+       .id     = 0,
+       .cls    = &pit_sysclass,
+};
+
+static int time_init_device(void)
+{
+       int error = sysdev_class_register(&pit_sysclass);
+       if (!error)
+               error = sysdev_register(&device_i8253);
+       return error;
+}
+
+device_initcall(time_init_device);
+
+#ifdef CONFIG_HPET_TIMER
+extern void (*late_time_init)(void);
+/* Duplicate of time_init() below, with hpet_enable part added */
+void __init hpet_time_init(void)
+{
+       xtime.tv_sec = get_cmos_time();
+       wall_to_monotonic.tv_sec = -xtime.tv_sec;
+       xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+       wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
+
+       if (hpet_enable() >= 0) {
+               printk("Using HPET for base-timer\n");
+       }
+
+       cur_timer = select_timer();
+       printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+       time_init_hook();
+}
+#endif
+
+/* Dynamically-mapped IRQ. */
+static int time_irq;
+
+static struct irqaction irq_timer = {
+       timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
+       NULL, NULL
+};
+
+void __init time_init(void)
+{
+#ifdef CONFIG_HPET_TIMER
+       if (is_hpet_capable()) {
+               /*
+                * HPET initialization needs to do memory-mapped io. So, let
+                * us do a late initialization after mem_init().
+                */
+               late_time_init = hpet_time_init;
+               return;
+       }
+#endif
+       __get_time_values_from_xen();
+       xtime.tv_sec = shadow_tv.tv_sec;
+       wall_to_monotonic.tv_sec = -xtime.tv_sec;
+       xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+       wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
+       processed_system_time = shadow_system_time;
+
+       cur_timer = select_timer();
+       printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+       time_irq = bind_virq_to_irq(VIRQ_TIMER);
+
+       (void)setup_irq(time_irq, &irq_timer);
+}
+
+/* Convert jiffies to system time. Call with xtime_lock held for reading. */
+static inline u64 __jiffies_to_st(unsigned long j) 
+{
+       return processed_system_time + ((j - jiffies) * NS_PER_TICK);
+}
+
+/*
+ * This function works out when the the next timer function has to be
+ * executed (by looking at the timer list) and sets the Xen one-shot
+ * domain timer to the appropriate value. This is typically called in
+ * cpu_idle() before the domain blocks.
+ * 
+ * The function returns a non-0 value on error conditions.
+ * 
+ * It must be called with interrupts disabled.
+ */
+int set_timeout_timer(void)
+{
+       u64 alarm = 0;
+       int ret = 0;
+
+       /*
+        * This is safe against long blocking (since calculations are
+        * not based on TSC deltas). It is also safe against warped
+        * system time since suspend-resume is cooperative and we
+        * would first get locked out. It is safe against normal
+        * updates of jiffies since interrupts are off.
+        */
+       alarm = __jiffies_to_st(next_timer_interrupt());
+
+       /* Failure is pretty bad, but we'd best soldier on. */
+       if ( HYPERVISOR_set_timer_op(alarm) != 0 )
+               ret = -1;
+
+       return ret;
+}
+
+void time_suspend(void)
+{
+}
+
+void time_resume(void)
+{
+    unsigned long flags;
+    write_lock_irqsave(&xtime_lock, flags);
+    /* Get timebases for new environment. */ 
+    __get_time_values_from_xen();
+    /* Reset our own concept of passage of system time. */
+    processed_system_time = shadow_system_time;
+    /* Accept a warp in UTC (wall-clock) time. */
+    last_seen_tv.tv_sec = 0;
+    /* Make sure we resync UTC time with Xen on next timer interrupt. */
+    last_update_from_xen = 0;
+    write_unlock_irqrestore(&xtime_lock, flags);
+}
+
+/*
+ * /proc/sys/xen: This really belongs in another file. It can stay here for
+ * now however.
+ */
+static ctl_table xen_subtable[] = {
+       {1, "independent_wallclock", &independent_wallclock,
+        sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
+       {0}
+};
+static ctl_table xen_table[] = {
+       {123, "xen", NULL, 0, 0555, xen_subtable},
+       {0}
+};
+static int __init xen_sysctl_init(void)
+{
+       (void)register_sysctl_table(xen_table, 0);
+       return 0;
+}
+__initcall(xen_sysctl_init);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/Makefile b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/Makefile
new file mode 100644 (file)
index 0000000..f85f7de
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# Makefile for x86 timers
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+obj-y :=       timer_tsc.o
+c-obj-y :=     timer.o timer_none.o timer_pit.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/kernel/timers/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
new file mode 100644 (file)
index 0000000..683ad98
--- /dev/null
@@ -0,0 +1,382 @@
+/*
+ * This code largely moved from arch/i386/kernel/time.c.
+ * See comments there for proper credits.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/cpufreq.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <asm/timer.h>
+#include <asm/io.h>
+/* processor.h for distable_tsc flag */
+#include <asm/processor.h>
+
+#include "io_ports.h"
+#include "mach_timer.h"
+
+#include <asm/hpet.h>
+
+#ifdef CONFIG_HPET_TIMER
+static unsigned long hpet_usec_quotient;
+static unsigned long hpet_last;
+struct timer_opts timer_tsc;
+#endif
+
+static inline void cpufreq_delayed_get(void);
+
+int tsc_disable __initdata = 0;
+
+extern spinlock_t i8253_lock;
+
+static int use_tsc;
+
+static unsigned long long monotonic_base;
+static u32 monotonic_offset;
+static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
+
+/* convert from cycles(64bits) => nanoseconds (64bits)
+ *  basic equation:
+ *             ns = cycles / (freq / ns_per_sec)
+ *             ns = cycles * (ns_per_sec / freq)
+ *             ns = cycles * (10^9 / (cpu_mhz * 10^6))
+ *             ns = cycles * (10^3 / cpu_mhz)
+ *
+ *     Then we use scaling math (suggested by george@mvista.com) to get:
+ *             ns = cycles * (10^3 * SC / cpu_mhz) / SC
+ *             ns = cycles * cyc2ns_scale / SC
+ *
+ *     And since SC is a constant power of two, we can convert the div
+ *  into a shift.   
+ *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+static unsigned long cyc2ns_scale; 
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
+{
+       cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+       return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+#if 0
+static int count2; /* counter for mark_offset_tsc() */
+#endif
+
+/* Cached *multiplier* to convert TSC counts to microseconds.
+ * (see the equation below).
+ * Equal to 2^32 * (1 / (clocks per usec) ).
+ * Initialized in time_init.
+ */
+static unsigned long fast_gettimeoffset_quotient;
+
+extern u32 shadow_tsc_stamp;
+extern u64 shadow_system_time;
+
+static unsigned long get_offset_tsc(void)
+{
+       register unsigned long eax, edx;
+
+       /* Read the Time Stamp Counter */
+
+       rdtsc(eax,edx);
+
+       /* .. relative to previous jiffy (32 bits is enough) */
+       eax -= shadow_tsc_stamp;
+
+       /*
+        * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
+        *             = (tsc_low delta) * (usecs_per_clock)
+        *             = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
+        *
+        * Using a mull instead of a divl saves up to 31 clock cycles
+        * in the critical path.
+        */
+
+       __asm__("mull %2"
+               :"=a" (eax), "=d" (edx)
+               :"rm" (fast_gettimeoffset_quotient),
+                "0" (eax));
+
+       /* our adjusted time offset in microseconds */
+       return edx;
+}
+
+static unsigned long long monotonic_clock_tsc(void)
+{
+       unsigned long long last_offset, this_offset, base;
+       unsigned seq;
+       
+       /* atomically read monotonic base & last_offset */
+       do {
+               seq = read_seqbegin(&monotonic_lock);
+               last_offset = monotonic_offset;
+               base = monotonic_base;
+       } while (read_seqretry(&monotonic_lock, seq));
+
+       /* Read the Time Stamp Counter */
+       rdtscll(this_offset);
+
+       /* return the value in ns */
+       return base + cycles_2_ns(this_offset - last_offset);
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+       unsigned long long this_offset;
+
+       /*
+        * In the NUMA case we dont use the TSC as they are not
+        * synchronized across all CPUs.
+        */
+#ifndef CONFIG_NUMA
+       if (!use_tsc)
+#endif
+               /* no locking but a rare wrong value is not a big deal */
+               return jiffies_64 * (1000000000 / HZ);
+
+       /* Read the Time Stamp Counter */
+       rdtscll(this_offset);
+
+       /* return the value in ns */
+       return cycles_2_ns(this_offset);
+}
+
+
+static void mark_offset_tsc(void)
+{
+
+       /* update the monotonic base value */
+       write_seqlock(&monotonic_lock);
+       monotonic_base = shadow_system_time;
+       monotonic_offset = shadow_tsc_stamp;
+       write_sequnlock(&monotonic_lock);
+}
+
+static void delay_tsc(unsigned long loops)
+{
+       unsigned long bclock, now;
+       
+       rdtscl(bclock);
+       do
+       {
+               rep_nop();
+               rdtscl(now);
+       } while ((now-bclock) < loops);
+}
+
+#ifdef CONFIG_HPET_TIMER
+static void mark_offset_tsc_hpet(void)
+{
+       unsigned long long this_offset, last_offset;
+       unsigned long offset, temp, hpet_current;
+
+       write_seqlock(&monotonic_lock);
+       last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+       /*
+        * It is important that these two operations happen almost at
+        * the same time. We do the RDTSC stuff first, since it's
+        * faster. To avoid any inconsistencies, we need interrupts
+        * disabled locally.
+        */
+       /*
+        * Interrupts are just disabled locally since the timer irq
+        * has the SA_INTERRUPT flag set. -arca
+        */
+       /* read Pentium cycle counter */
+
+       hpet_current = hpet_readl(HPET_COUNTER);
+       rdtsc(last_tsc_low, last_tsc_high);
+
+       /* lost tick compensation */
+       offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
+       if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) {
+               int lost_ticks = (offset - hpet_last) / hpet_tick;
+               jiffies_64 += lost_ticks;
+       }
+       hpet_last = hpet_current;
+
+       /* update the monotonic base value */
+       this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+       monotonic_base += cycles_2_ns(this_offset - last_offset);
+       write_sequnlock(&monotonic_lock);
+
+       /* calculate delay_at_last_interrupt */
+       /*
+        * Time offset = (hpet delta) * ( usecs per HPET clock )
+        *             = (hpet delta) * ( usecs per tick / HPET clocks per tick)
+        *             = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
+        * Where,
+        * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
+        */
+       delay_at_last_interrupt = hpet_current - offset;
+       ASM_MUL64_REG(temp, delay_at_last_interrupt,
+                       hpet_usec_quotient, delay_at_last_interrupt);
+}
+#endif
+
+
+#ifdef CONFIG_CPU_FREQ
+#include <linux/workqueue.h>
+
+static unsigned int cpufreq_delayed_issched = 0;
+static unsigned int cpufreq_init = 0;
+static struct work_struct cpufreq_delayed_get_work;
+
+static void handle_cpufreq_delayed_get(void *v)
+{
+       unsigned int cpu;
+       for_each_online_cpu(cpu) {
+               cpufreq_get(cpu);
+       }
+       cpufreq_delayed_issched = 0;
+}
+
+/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
+ * to verify the CPU frequency the timing core thinks the CPU is running
+ * at is still correct.
+ */
+static inline void cpufreq_delayed_get(void) 
+{
+       if (cpufreq_init && !cpufreq_delayed_issched) {
+               cpufreq_delayed_issched = 1;
+               printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
+               schedule_work(&cpufreq_delayed_get_work);
+       }
+}
+
+/* If the CPU frequency is scaled, TSC-based delays will need a different
+ * loops_per_jiffy value to function properly.
+ */
+
+static unsigned int  ref_freq = 0;
+static unsigned long loops_per_jiffy_ref = 0;
+
+#ifndef CONFIG_SMP
+static unsigned long fast_gettimeoffset_ref = 0;
+static unsigned long cpu_khz_ref = 0;
+#endif
+
+static int
+time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+                     void *data)
+{
+       struct cpufreq_freqs *freq = data;
+
+       if (val != CPUFREQ_RESUMECHANGE)
+               write_seqlock_irq(&xtime_lock);
+       if (!ref_freq) {
+               ref_freq = freq->old;
+               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+#ifndef CONFIG_SMP
+               fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
+               cpu_khz_ref = cpu_khz;
+#endif
+       }
+
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE)) {
+               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+                       cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+#ifndef CONFIG_SMP
+               if (cpu_khz)
+                       cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
+               if (use_tsc) {
+                       if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
+                               fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
+                               set_cyc2ns_scale(cpu_khz/1000);
+                       }
+               }
+#endif
+       }
+
+       if (val != CPUFREQ_RESUMECHANGE)
+               write_sequnlock_irq(&xtime_lock);
+
+       return 0;
+}
+
+static struct notifier_block time_cpufreq_notifier_block = {
+       .notifier_call  = time_cpufreq_notifier
+};
+
+
+static int __init cpufreq_tsc(void)
+{
+       int ret;
+       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+       ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+       if (!ret)
+               cpufreq_init = 1;
+       return ret;
+}
+core_initcall(cpufreq_tsc);
+
+#else /* CONFIG_CPU_FREQ */
+static inline void cpufreq_delayed_get(void) { return; }
+#endif 
+
+
+static int __init init_tsc(char* override)
+{
+       unsigned long long alarm;
+       u64 __cpu_khz;
+
+       __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
+       do_div(__cpu_khz, 1000);
+       cpu_khz = (u32)__cpu_khz;
+       printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n", 
+              cpu_khz / 1000, cpu_khz % 1000);
+
+       /* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz =
+          (2^32 * 1 / (clocks/us)) */
+       {       
+               unsigned long eax=0, edx=1000;
+               __asm__("divl %2"
+                   :"=a" (fast_gettimeoffset_quotient), "=d" (edx)
+                   :"r" (cpu_khz),
+                   "0" (eax), "1" (edx));
+       }
+
+       set_cyc2ns_scale(cpu_khz/1000);
+
+       rdtscll(alarm);
+
+       use_tsc=1;
+
+       return 0;
+}
+
+static int __init tsc_setup(char *str)
+{
+       printk(KERN_WARNING "notsc: cannot disable TSC in Xen/Linux.\n");
+       return 1;
+}
+__setup("notsc", tsc_setup);
+
+
+
+/************************************************************/
+
+/* tsc timer_opts struct */
+struct timer_opts timer_tsc = {
+       .name =         "tsc",
+       .init =         init_tsc,
+       .mark_offset =  mark_offset_tsc, 
+       .get_offset =   get_offset_tsc,
+       .monotonic_clock =      monotonic_clock_tsc,
+       .delay = delay_tsc,
+};
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c
new file mode 100644 (file)
index 0000000..dd1c42b
--- /dev/null
@@ -0,0 +1,1129 @@
+/*
+ *  linux/arch/i386/traps.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kallsyms.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <linux/kprobes.h>
+
+#ifdef CONFIG_EISA
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#endif
+
+#ifdef CONFIG_MCA
+#include <linux/mca.h>
+#endif
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
+#include <asm/nmi.h>
+
+#include <asm/smp.h>
+#include <asm/arch_hooks.h>
+#include <asm/kdebug.h>
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "mach_traps.h"
+
+asmlinkage int system_call(void);
+asmlinkage void lcall7(void);
+asmlinkage void lcall27(void);
+
+asmlinkage void safe_page_fault(void);
+
+/* Do we ignore FPU interrupts ? */
+char ignore_fpu_irq = 0;
+
+/*
+ * The IDT has to be page-aligned to simplify the Pentium
+ * F0 0F bug workaround.. We have a special link segment
+ * for this.
+ */
+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void nmi(void);
+asmlinkage void int3(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+asmlinkage void double_fault(void);
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void simd_coprocessor_error(void);
+asmlinkage void alignment_check(void);
+asmlinkage void fixup_4gb_segment(void);
+asmlinkage void machine_check(void);
+
+static int kstack_depth_to_print = 24;
+struct notifier_block *i386die_chain;
+static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
+
+int register_die_notifier(struct notifier_block *nb)
+{
+       int err = 0;
+       unsigned long flags;
+       spin_lock_irqsave(&die_notifier_lock, flags);
+       err = notifier_chain_register(&i386die_chain, nb);
+       spin_unlock_irqrestore(&die_notifier_lock, flags);
+       return err;
+}
+
+static int valid_stack_ptr(struct task_struct *task, void *p)
+{
+       if (p <= (void *)task->thread_info)
+               return 0;
+       if (kstack_end(p))
+               return 0;
+       return 1;
+}
+
+#ifdef CONFIG_FRAME_POINTER
+static void print_context_stack(struct task_struct *task, unsigned long *stack,
+                        unsigned long ebp)
+{
+       unsigned long addr;
+
+       while (valid_stack_ptr(task, (void *)ebp)) {
+               addr = *(unsigned long *)(ebp + 4);
+               printk(" [<%08lx>] ", addr);
+               print_symbol("%s", addr);
+               printk("\n");
+               ebp = *(unsigned long *)ebp;
+       }
+}
+#else
+static void print_context_stack(struct task_struct *task, unsigned long *stack,
+                        unsigned long ebp)
+{
+       unsigned long addr;
+
+       while (!kstack_end(stack)) {
+               addr = *stack++;
+               if (__kernel_text_address(addr)) {
+                       printk(" [<%08lx>]", addr);
+                       print_symbol(" %s\n", addr);
+                       printk("\n");
+               }
+       }
+}
+#endif
+
+void show_trace(struct task_struct *task, unsigned long * stack)
+{
+       unsigned long ebp;
+
+       if (!task)
+               task = current;
+
+       if (!valid_stack_ptr(task, stack)) {
+               printk("Stack pointer is garbage, not printing trace\n");
+               return;
+       }
+
+       if (task == current) {
+               /* Grab ebp right from our regs */
+               asm ("movl %%ebp, %0" : "=r" (ebp) : );
+       } else {
+               /* ebp is the last reg pushed by switch_to */
+               ebp = *(unsigned long *) task->thread.esp;
+       }
+
+       while (1) {
+               struct thread_info *context;
+               context = (struct thread_info *)
+                       ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+               print_context_stack(task, stack, ebp);
+               stack = (unsigned long*)context->previous_esp;
+               if (!stack)
+                       break;
+               printk(" =======================\n");
+       }
+}
+
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+       unsigned long *stack;
+       int i;
+
+       if (esp == NULL) {
+               if (task)
+                       esp = (unsigned long*)task->thread.esp;
+               else
+                       esp = (unsigned long *)&esp;
+       }
+
+       stack = esp;
+       for(i = 0; i < kstack_depth_to_print; i++) {
+               if (kstack_end(stack))
+                       break;
+               if (i && ((i % 8) == 0))
+                       printk("\n       ");
+               printk("%08lx ", *stack++);
+       }
+       printk("\nCall Trace:\n");
+       show_trace(task, esp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+       unsigned long stack;
+
+       show_trace(current, &stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+       int i;
+       int in_kernel = 1;
+       unsigned long esp;
+       unsigned short ss;
+
+       esp = (unsigned long) (&regs->esp);
+       ss = __KERNEL_DS;
+       if (regs->xcs & 2) {
+               in_kernel = 0;
+               esp = regs->esp;
+               ss = regs->xss & 0xffff;
+       }
+       print_modules();
+       printk("CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\nEFLAGS: %08lx"
+                       "   (%s) \n",
+               smp_processor_id(), 0xffff & regs->xcs, regs->eip,
+               print_tainted(), regs->eflags, UTS_RELEASE);
+       print_symbol("EIP is at %s\n", regs->eip);
+       printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
+               regs->eax, regs->ebx, regs->ecx, regs->edx);
+       printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
+               regs->esi, regs->edi, regs->ebp, esp);
+       printk("ds: %04x   es: %04x   ss: %04x\n",
+               regs->xds & 0xffff, regs->xes & 0xffff, ss);
+       printk("Process %s (pid: %d, threadinfo=%p task=%p)",
+               current->comm, current->pid, current_thread_info(), current);
+       /*
+        * When in-kernel, we also print out the stack and code at the
+        * time of the fault..
+        */
+       if (in_kernel) {
+               u8 *eip;
+
+               printk("\nStack: ");
+               show_stack(NULL, (unsigned long*)esp);
+
+               printk("Code: ");
+
+               eip = (u8 *)regs->eip - 43;
+               for (i = 0; i < 64; i++, eip++) {
+                       unsigned char c;
+
+                       if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
+                               printk(" Bad EIP value.");
+                               break;
+                       }
+                       if (eip == (u8 *)regs->eip)
+                               printk("<%02x> ", c);
+                       else
+                               printk("%02x ", c);
+               }
+       }
+       printk("\n");
+}      
+
+static void handle_BUG(struct pt_regs *regs)
+{
+       unsigned short ud2;
+       unsigned short line;
+       char *file;
+       char c;
+       unsigned long eip;
+
+       if (regs->xcs & 2)
+               goto no_bug;            /* Not in kernel */
+
+       eip = regs->eip;
+
+       if (eip < PAGE_OFFSET)
+               goto no_bug;
+       if (__get_user(ud2, (unsigned short *)eip))
+               goto no_bug;
+       if (ud2 != 0x0b0f)
+               goto no_bug;
+       if (__get_user(line, (unsigned short *)(eip + 2)))
+               goto bug;
+       if (__get_user(file, (char **)(eip + 4)) ||
+               (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+               file = "<bad filename>";
+
+       printk("------------[ cut here ]------------\n");
+       printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
+
+no_bug:
+       return;
+
+       /* Here we know it was a BUG but file-n-line is unavailable */
+bug:
+       printk("Kernel BUG\n");
+}
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+       static struct {
+               spinlock_t lock;
+               u32 lock_owner;
+               int lock_owner_depth;
+       } die = {
+               .lock =                 SPIN_LOCK_UNLOCKED,
+               .lock_owner =           -1,
+               .lock_owner_depth =     0
+       };
+       static int die_counter;
+
+       if (die.lock_owner != smp_processor_id()) {
+               console_verbose();
+               spin_lock_irq(&die.lock);
+               die.lock_owner = smp_processor_id();
+               die.lock_owner_depth = 0;
+               bust_spinlocks(1);
+       }
+
+       if (++die.lock_owner_depth < 3) {
+               int nl = 0;
+               handle_BUG(regs);
+               printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+               printk("PREEMPT ");
+               nl = 1;
+#endif
+#ifdef CONFIG_SMP
+               printk("SMP ");
+               nl = 1;
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+               printk("DEBUG_PAGEALLOC");
+               nl = 1;
+#endif
+               if (nl)
+                       printk("\n");
+       notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+               show_registers(regs);
+       } else
+               printk(KERN_ERR "Recursive die() failure, output suppressed\n");
+
+       bust_spinlocks(0);
+       die.lock_owner = -1;
+       spin_unlock_irq(&die.lock);
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops) {
+               printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(5 * HZ);
+               panic("Fatal exception");
+       }
+       do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+       if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
+               die(str, regs, err);
+}
+
+static inline unsigned long get_cr2(void)
+{
+       unsigned long address;
+
+       /* get the address */
+       __asm__("movl %%cr2,%0":"=r" (address));
+       return address;
+}
+
+static inline void do_trap(int trapnr, int signr, char *str, int vm86,
+                          struct pt_regs * regs, long error_code, siginfo_t *info)
+{
+       if (regs->eflags & VM_MASK) {
+               if (vm86)
+                       goto vm86_trap;
+               goto trap_signal;
+       }
+
+       if (!(regs->xcs & 2))
+               goto kernel_trap;
+
+       trap_signal: {
+               struct task_struct *tsk = current;
+               tsk->thread.error_code = error_code;
+               tsk->thread.trap_no = trapnr;
+               if (info)
+                       force_sig_info(signr, info, tsk);
+               else
+                       force_sig(signr, tsk);
+               return;
+       }
+
+       kernel_trap: {
+               if (!fixup_exception(regs))
+                       die(str, regs, error_code);
+               return;
+       }
+
+       vm86_trap: {
+               int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
+               if (ret) goto trap_signal;
+               return;
+       }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+                                               == NOTIFY_STOP) \
+               return; \
+       do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       siginfo_t info; \
+       info.si_signo = signr; \
+       info.si_errno = 0; \
+       info.si_code = sicode; \
+       info.si_addr = (void __user *)siaddr; \
+       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+                                               == NOTIFY_STOP) \
+               return; \
+       do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
+}
+
+#define DO_VM86_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+                                               == NOTIFY_STOP) \
+               return; \
+       do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
+}
+
+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       siginfo_t info; \
+       info.si_signo = signr; \
+       info.si_errno = 0; \
+       info.si_code = sicode; \
+       info.si_addr = (void __user *)siaddr; \
+       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+                                               == NOTIFY_STOP) \
+               return; \
+       do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
+}
+
+DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
+#ifndef CONFIG_KPROBES
+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+#endif
+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
+DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
+DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+#ifdef CONFIG_X86_MCE
+DO_ERROR(18, SIGBUS, "machine check", machine_check)
+#endif
+
+asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
+{
+       /*
+        * If we trapped on an LDT access then ensure that the default_ldt is
+        * loaded, if nothing else. We load default_ldt lazily because LDT
+        * switching costs time and many applications don't need it.
+        */
+       if (unlikely((error_code & 6) == 4)) {
+               unsigned long ldt;
+               __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
+               if (ldt == 0) {
+                       mmu_update_t u;
+                       u.ptr = MMU_EXTENDED_COMMAND;
+                       u.ptr |= (unsigned long)&default_ldt[0];
+                       u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
+                       if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
+                               show_trace(NULL, (unsigned long *)&u);
+                               panic("Failed to install default LDT");
+                       }
+                       return;
+               }
+       }
+
+       if (regs->eflags & VM_MASK)
+               goto gp_in_vm86;
+
+       if (!(regs->xcs & 2))
+               goto gp_in_kernel;
+
+       current->thread.error_code = error_code;
+       current->thread.trap_no = 13;
+       force_sig(SIGSEGV, current);
+       return;
+
+gp_in_vm86:
+       local_irq_enable();
+       handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+       return;
+
+gp_in_kernel:
+       if (!fixup_exception(regs)) {
+               if (notify_die(DIE_GPF, "general protection fault", regs,
+                               error_code, 13, SIGSEGV) == NOTIFY_STOP);
+                       return;
+               die("general protection fault", regs, error_code);
+       }
+}
+
+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+{
+       printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+       printk("You probably have a hardware problem with your RAM chips\n");
+
+       /* Clear and disable the memory parity error line. */
+       clear_mem_error(reason);
+}
+
+static void io_check_error(unsigned char reason, struct pt_regs * regs)
+{
+       unsigned long i;
+
+       printk("NMI: IOCK error (debug interrupt?)\n");
+       show_registers(regs);
+
+       /* Re-enable the IOCK line, wait for a few seconds */
+       reason = (reason & 0xf) | 8;
+       outb(reason, 0x61);
+       i = 2000;
+       while (--i) udelay(1000);
+       reason &= ~8;
+       outb(reason, 0x61);
+}
+
+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+{
+#ifdef CONFIG_MCA
+       /* Might actually be able to figure out what the guilty party
+       * is. */
+       if( MCA_bus ) {
+               mca_handle_nmi();
+               return;
+       }
+#endif
+       printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+               reason, smp_processor_id());
+       printk("Dazed and confused, but trying to continue\n");
+       printk("Do you have a strange power saving mode enabled?\n");
+}
+
+static spinlock_t nmi_print_lock = SPIN_LOCK_UNLOCKED;
+
+void die_nmi (struct pt_regs *regs, const char *msg)
+{
+       spin_lock(&nmi_print_lock);
+       /*
+       * We are in trouble anyway, lets at least try
+       * to get a message out.
+       */
+       bust_spinlocks(1);
+       printk(msg);
+       printk(" on CPU%d, eip %08lx, registers:\n",
+               smp_processor_id(), regs->eip);
+       show_registers(regs);
+       printk("console shuts up ...\n");
+       console_silent();
+       spin_unlock(&nmi_print_lock);
+       bust_spinlocks(0);
+       do_exit(SIGSEGV);
+}
+
+static void default_do_nmi(struct pt_regs * regs)
+{
+       unsigned char reason = get_nmi_reason();
+       if (!(reason & 0xc0)) {
+               if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+                                                       == NOTIFY_STOP)
+                       return;
+#ifdef CONFIG_X86_LOCAL_APIC
+               /*
+                * Ok, so this is none of the documented NMI sources,
+                * so it must be the NMI watchdog.
+                */
+               if (nmi_watchdog) {
+                       nmi_watchdog_tick(regs);
+                       return;
+               }
+#endif
+               unknown_nmi_error(reason, regs);
+               return;
+       }
+       if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+               return;
+       if (reason & 0x80)
+               mem_parity_error(reason, regs);
+       if (reason & 0x40)
+               io_check_error(reason, regs);
+       /*
+        * Reassert NMI in case it became active meanwhile
+        * as it's edge-triggered.
+        */
+       reassert_nmi();
+}
+
+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+{
+       return 0;
+}
+static nmi_callback_t nmi_callback = dummy_nmi_callback;
+asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+{
+       int cpu;
+
+       nmi_enter();
+
+       cpu = smp_processor_id();
+       ++nmi_count(cpu);
+
+       if (!nmi_callback(regs, cpu))
+               default_do_nmi(regs);
+
+       nmi_exit();
+}
+
+void set_nmi_callback(nmi_callback_t callback)
+{
+       nmi_callback = callback;
+}
+
+void unset_nmi_callback(void)
+{
+       nmi_callback = dummy_nmi_callback;
+}
+
+#ifdef CONFIG_KPROBES
+asmlinkage int do_int3(struct pt_regs *regs, long error_code)
+{
+       if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+                       == NOTIFY_STOP)
+               return 1;
+       /* This is an interrupt gate, because kprobes wants interrupts
+       disabled.  Normal trap handlers don't. */
+       restore_interrupts(regs);
+       do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
+       return 0;
+}
+#endif
+
+/*
+ * Our handling of the processor debug registers is non-trivial.
+ * We do not clear them on entry and exit from the kernel. Therefore
+ * it is possible to get a watchpoint trap here from inside the kernel.
+ * However, the code in ./ptrace.c has ensured that the user can
+ * only set watchpoints on userspace addresses. Therefore the in-kernel
+ * watchpoint trap can only occur in code which is reading/writing
+ * from user space. Such code must not hold kernel locks (since it
+ * can equally take a page fault), therefore it is safe to call
+ * force_sig_info even though that claims and releases locks.
+ * 
+ * Code in ./signal.c ensures that the debug control register
+ * is restored before we deliver any signal, and therefore that
+ * user code runs with the correct debug control register even though
+ * we clear it here.
+ *
+ * Being careful here means that we don't have to be as careful in a
+ * lot of more complicated places (task switching can be a bit lazy
+ * about restoring all the debug state, and ptrace doesn't have to
+ * find every occurrence of the TF bit that could be saved away even
+ * by user code)
+ */
+asmlinkage void do_debug(struct pt_regs * regs, long error_code)
+{
+       unsigned int condition;
+       struct task_struct *tsk = current;
+       siginfo_t info;
+
+       condition = HYPERVISOR_get_debugreg(6);
+
+       if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+                                       SIGTRAP) == NOTIFY_STOP)
+               return;
+#if 0
+       /* It's safe to allow irq's after DR6 has been saved */
+       if (regs->eflags & X86_EFLAGS_IF)
+               local_irq_enable();
+#endif
+
+       /* Mask out spurious debug traps due to lazy DR7 setting */
+       if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+               if (!tsk->thread.debugreg[7])
+                       goto clear_dr7;
+       }
+
+       if (regs->eflags & VM_MASK)
+               goto debug_vm86;
+
+       /* Save debug status register where ptrace can see it */
+       tsk->thread.debugreg[6] = condition;
+
+       /* Mask out spurious TF errors due to lazy TF clearing */
+       if (condition & DR_STEP) {
+               /*
+                * The TF error should be masked out only if the current
+                * process is not traced and if the TRAP flag has been set
+                * previously by a tracing process (condition detected by
+                * the PT_DTRACE flag); remember that the i386 TRAP flag
+                * can be modified by the process itself in user mode,
+                * allowing programs to debug themselves without the ptrace()
+                * interface.
+                */
+               if ((regs->xcs & 2) == 0)
+                       goto clear_TF_reenable;
+               if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
+                       goto clear_TF;
+       }
+
+       /* Ok, finally something we can handle */
+       tsk->thread.trap_no = 1;
+       tsk->thread.error_code = error_code;
+       info.si_signo = SIGTRAP;
+       info.si_errno = 0;
+       info.si_code = TRAP_BRKPT;
+       
+       /* If this is a kernel mode trap, save the user PC on entry to 
+        * the kernel, that's what the debugger can make sense of.
+        */
+       info.si_addr = ((regs->xcs & 2) == 0) ? (void __user *)tsk->thread.eip : 
+                                               (void __user *)regs->eip;
+       force_sig_info(SIGTRAP, &info, tsk);
+
+       /* Disable additional traps. They'll be re-enabled when
+        * the signal is delivered.
+        */
+clear_dr7:
+       HYPERVISOR_set_debugreg(7, 0);
+       return;
+
+debug_vm86:
+       handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
+       return;
+
+clear_TF_reenable:
+       set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+clear_TF:
+       regs->eflags &= ~TF_MASK;
+       return;
+}
+
+/*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+void math_error(void __user *eip)
+{
+       struct task_struct * task;
+       siginfo_t info;
+       unsigned short cwd, swd;
+
+       /*
+        * Save the info for the exception handler and clear the error.
+        */
+       task = current;
+       save_init_fpu(task);
+       task->thread.trap_no = 16;
+       task->thread.error_code = 0;
+       info.si_signo = SIGFPE;
+       info.si_errno = 0;
+       info.si_code = __SI_FAULT;
+       info.si_addr = eip;
+       /*
+        * (~cwd & swd) will mask out exceptions that are not set to unmasked
+        * status.  0x3f is the exception bits in these regs, 0x200 is the
+        * C1 reg you need in case of a stack fault, 0x040 is the stack
+        * fault bit.  We should only be taking one exception at a time,
+        * so if this combination doesn't produce any single exception,
+        * then we have a bad program that isn't syncronizing its FPU usage
+        * and it will suffer the consequences since we won't be able to
+        * fully reproduce the context of the exception
+        */
+       cwd = get_fpu_cwd(task);
+       swd = get_fpu_swd(task);
+       switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+               case 0x000:
+               default:
+                       break;
+               case 0x001: /* Invalid Op */
+               case 0x041: /* Stack Fault */
+               case 0x241: /* Stack Fault | Direction */
+                       info.si_code = FPE_FLTINV;
+                       /* Should we clear the SF or let user space do it ???? */
+                       break;
+               case 0x002: /* Denormalize */
+               case 0x010: /* Underflow */
+                       info.si_code = FPE_FLTUND;
+                       break;
+               case 0x004: /* Zero Divide */
+                       info.si_code = FPE_FLTDIV;
+                       break;
+               case 0x008: /* Overflow */
+                       info.si_code = FPE_FLTOVF;
+                       break;
+               case 0x020: /* Precision */
+                       info.si_code = FPE_FLTRES;
+                       break;
+       }
+       force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
+{
+       ignore_fpu_irq = 1;
+       math_error((void __user *)regs->eip);
+}
+
+void simd_math_error(void __user *eip)
+{
+       struct task_struct * task;
+       siginfo_t info;
+       unsigned short mxcsr;
+
+       /*
+        * Save the info for the exception handler and clear the error.
+        */
+       task = current;
+       save_init_fpu(task);
+       task->thread.trap_no = 19;
+       task->thread.error_code = 0;
+       info.si_signo = SIGFPE;
+       info.si_errno = 0;
+       info.si_code = __SI_FAULT;
+       info.si_addr = eip;
+       /*
+        * The SIMD FPU exceptions are handled a little differently, as there
+        * is only a single status/control register.  Thus, to determine which
+        * unmasked exception was caught we must mask the exception mask bits
+        * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+        */
+       mxcsr = get_fpu_mxcsr(task);
+       switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+               case 0x000:
+               default:
+                       break;
+               case 0x001: /* Invalid Op */
+                       info.si_code = FPE_FLTINV;
+                       break;
+               case 0x002: /* Denormalize */
+               case 0x010: /* Underflow */
+                       info.si_code = FPE_FLTUND;
+                       break;
+               case 0x004: /* Zero Divide */
+                       info.si_code = FPE_FLTDIV;
+                       break;
+               case 0x008: /* Overflow */
+                       info.si_code = FPE_FLTOVF;
+                       break;
+               case 0x020: /* Precision */
+                       info.si_code = FPE_FLTRES;
+                       break;
+       }
+       force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
+                                         long error_code)
+{
+       if (cpu_has_xmm) {
+               /* Handle SIMD FPU exceptions on PIII+ processors. */
+               ignore_fpu_irq = 1;
+               simd_math_error((void __user *)regs->eip);
+       } else {
+               /*
+                * Handle strange cache flush from user space exception
+                * in all other cases.  This is undocumented behaviour.
+                */
+               if (regs->eflags & VM_MASK) {
+                       handle_vm86_fault((struct kernel_vm86_regs *)regs,
+                                         error_code);
+                       return;
+               }
+               die_if_kernel("cache flush denied", regs, error_code);
+               current->thread.trap_no = 19;
+               current->thread.error_code = error_code;
+               force_sig(SIGSEGV, current);
+       }
+}
+
+/*
+ *  'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ *
+ * Must be called with kernel preemption disabled (in this case,
+ * local interrupts are disabled at the call-site in entry.S).
+ */
+asmlinkage void math_state_restore(struct pt_regs regs)
+{
+       struct thread_info *thread = current_thread_info();
+       struct task_struct *tsk = thread->task;
+
+       /*
+        * A trap in kernel mode can be ignored. It'll be the fast XOR or
+        * copying libraries, which will correctly save/restore state and
+        * reset the TS bit in CR0.
+        */
+       if ((regs.xcs & 2) == 0)
+               return;
+
+       clts();         /* Allow maths ops (or we recurse) */
+       if (!tsk->used_math)
+               init_fpu(tsk);
+       restore_fpu(tsk);
+       thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
+}
+
+#ifndef CONFIG_MATH_EMULATION
+
+asmlinkage void math_emulate(long arg)
+{
+       printk("math-emulation not enabled and no coprocessor found.\n");
+       printk("killing %s.\n",current->comm);
+       force_sig(SIGFPE,current);
+       schedule();
+}
+
+#endif /* CONFIG_MATH_EMULATION */
+
+#ifdef CONFIG_X86_F00F_BUG
+void __init trap_init_f00f_bug(void)
+{
+       __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
+
+       /*
+        * Update the IDT descriptor and reload the IDT so that
+        * it uses the read-only mapped virtual address.
+        */
+       idt_descr.address = fix_to_virt(FIX_F00F_IDT);
+       __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
+}
+#endif
+
+#define _set_gate(gate_addr,type,dpl,addr,seg) \
+do { \
+  int __d0, __d1; \
+  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+       "movw %4,%%dx\n\t" \
+       "movl %%eax,%0\n\t" \
+       "movl %%edx,%1" \
+       :"=m" (*((long *) (gate_addr))), \
+        "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
+       :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+        "3" ((char *) (addr)),"2" ((seg) << 16)); \
+} while (0)
+
+
+/*
+ * This needs to use 'idt_table' rather than 'idt', and
+ * thus use the _nonmapped_ version of the IDT, as the
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+void set_intr_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
+}
+
+#if 0
+/*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+static inline void set_system_intr_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);
+}
+
+static void __init set_trap_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
+}
+
+static void __init set_system_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
+}
+#endif
+
+static void __init set_call_gate(void *a, void *addr)
+{
+       _set_gate(a,12,3,addr,__KERNEL_CS);
+}
+
+#if 0
+static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
+{
+       _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
+}
+#endif
+
+
+/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
+static trap_info_t trap_table[] = {
+       {  0, 0, __KERNEL_CS, (unsigned long)divide_error               },
+       {  1, 0, __KERNEL_CS, (unsigned long)debug                      },
+       {  3, 3, __KERNEL_CS, (unsigned long)int3                       },
+       {  4, 3, __KERNEL_CS, (unsigned long)overflow                   },
+       {  5, 3, __KERNEL_CS, (unsigned long)bounds                     },
+       {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                 },
+       {  7, 0, __KERNEL_CS, (unsigned long)device_not_available       },
+       {  8, 0, __KERNEL_CS, (unsigned long)double_fault               },
+       {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
+       { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                },
+       { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present        },
+       { 12, 0, __KERNEL_CS, (unsigned long)stack_segment              },
+       { 13, 0, __KERNEL_CS, (unsigned long)general_protection         },
+       { 14, 0, __KERNEL_CS, (unsigned long)page_fault                 },
+       { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment          },
+       { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error          },
+       { 17, 0, __KERNEL_CS, (unsigned long)alignment_check            },
+#ifdef CONFIG_X86_MCE
+       { 18, 0, __KERNEL_CS, (unsigned long)machine_check              },
+#endif
+       { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error     },
+       { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call    },
+       {  0, 0,           0, 0                                         }
+};
+
+void __init trap_init(void)
+{
+       HYPERVISOR_set_trap_table(trap_table);    
+        HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
+
+       /*
+        * default LDT is a single-entry callgate to lcall7 for iBCS
+        * and a callgate to lcall27 for Solaris/x86 binaries
+        */
+       clear_page(&default_ldt[0]);
+       set_call_gate(&default_ldt[0],lcall7);
+       set_call_gate(&default_ldt[4],lcall27);
+       __make_page_readonly(&default_ldt[0]);
+       xen_flush_page_update_queue();
+
+       /*
+        * Should be a barrier for any external CPU state.
+        */
+       cpu_init();
+}
+
+
+/*
+ * install_safe_pf_handler / install_normal_pf_handler:
+ * 
+ * These are used within the failsafe_callback handler in entry.S to avoid
+ * taking a full page fault when reloading FS and GS. This is because FS and 
+ * GS could be invalid at pretty much any point while Xen Linux executes (we 
+ * don't set them to safe values on entry to the kernel). At *any* point Xen 
+ * may be entered due to a hardware interrupt --- on exit from Xen an invalid 
+ * FS/GS will cause our failsafe_callback to be executed. This could occur, 
+ * for example, while the mmmu_update_queue is in an inconsistent state. This
+ * is disastrous because the normal page-fault handler touches the update
+ * queue!
+ * 
+ * Fortunately, within the failsafe handler it is safe to force DS/ES/FS/GS
+ * to zero if they cannot be reloaded -- at this point executing a normal
+ * page fault would not change this effect. The safe page-fault handler
+ * ensures this end result (blow away the selector value) without the dangers
+ * of the normal page-fault handler.
+ * 
+ * NB. Perhaps this can all go away after we have implemented writable
+ * page tables. :-)
+ */
+
+asmlinkage void do_safe_page_fault(struct pt_regs *regs, 
+                                   unsigned long error_code,
+                                   unsigned long address)
+{
+       if (!fixup_exception(regs))
+               die("Unhandleable 'safe' page fault!", regs, error_code);
+}
+
+unsigned long install_safe_pf_handler(void)
+{
+       static trap_info_t safe_pf[] = { 
+               { 14, 0, __KERNEL_CS, (unsigned long)safe_page_fault },
+               {  0, 0,           0, 0                              }
+       };
+       unsigned long flags;
+       local_irq_save(flags);
+       HYPERVISOR_set_trap_table(safe_pf);
+       return flags; /* This is returned in %%eax */
+}
+
+__attribute__((regparm(3))) /* This function take its arg in %%eax */
+void install_normal_pf_handler(unsigned long flags)
+{
+       static trap_info_t normal_pf[] = { 
+               { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
+               {  0, 0,           0, 0                         }
+       };
+       HYPERVISOR_set_trap_table(normal_pf);
+       local_irq_restore(flags);
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..829f1c9
--- /dev/null
@@ -0,0 +1,135 @@
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(startup_32)
+jiffies = jiffies_64;
+SECTIONS
+{
+  . = 0xC0000000 + 0x100000;
+  /* read-only */
+  _text = .;                   /* Text and read-only data */
+  .text : {
+       *(.text)
+       SCHED_TEXT
+       *(.fixup)
+       *(.gnu.warning)
+       } = 0x9090
+
+  _etext = .;                  /* End of text section */
+
+  . = ALIGN(16);               /* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  RODATA
+
+  /* writeable */
+  .data : {                    /* Data */
+       *(.data)
+       CONSTRUCTORS
+       }
+
+  . = ALIGN(4096);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(4096);
+  __nosave_end = .;
+
+  . = ALIGN(4096);
+  .data.page_aligned : { *(.data.idt) }
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata = .;                  /* End of data section */
+
+  . = ALIGN(THREAD_SIZE);      /* init_task */
+  .data.init_task : { *(.data.init_task) }
+
+  /* will be freed after init */
+  . = ALIGN(4096);             /* Init code and data */
+  __init_begin = .;
+  .init.text : { 
+       _sinittext = .;
+       *(.init.text)
+       _einittext = .;
+  }
+  .init.data : { *(.init.data) }
+  . = ALIGN(16);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __start___param = .;
+  __param : { *(__param) }
+  __stop___param = .;
+  __initcall_start = .;
+  .initcall.init : {
+       *(.initcall1.init) 
+       *(.initcall2.init) 
+       *(.initcall3.init) 
+       *(.initcall4.init) 
+       *(.initcall5.init) 
+       *(.initcall6.init) 
+       *(.initcall7.init)
+  }
+  __initcall_end = .;
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  SECURITY_INIT
+  . = ALIGN(4);
+  __alt_instructions = .;
+  .altinstructions : { *(.altinstructions) } 
+  __alt_instructions_end = .; 
+ .altinstr_replacement : { *(.altinstr_replacement) } 
+  /* .exit.text is discard at runtime, not link time, to deal with references
+     from .altinstructions and .eh_frame */
+  .exit.text : { *(.exit.text) }
+  .exit.data : { *(.exit.data) }
+  . = ALIGN(4096);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.ramfs) }
+  __initramfs_end = .;
+  . = ALIGN(32);
+  __per_cpu_start = .;
+  .data.percpu  : { *(.data.percpu) }
+  __per_cpu_end = .;
+  . = ALIGN(4096);
+  __init_end = .;
+  /* freed after init ends here */
+       
+  __bss_start = .;             /* BSS */
+  .bss : {
+       *(.bss.page_aligned)
+       *(.bss)
+  }
+  . = ALIGN(4);
+  __bss_stop = .; 
+
+  _end = . ;
+
+  /* This is where the kernel creates the early boot page tables */
+  . = ALIGN(4096);
+  pg0 = .;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+       *(.exitcall.exit)
+       }
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.S b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.S
new file mode 100644 (file)
index 0000000..f2bcb7f
--- /dev/null
@@ -0,0 +1,15 @@
+#include <linux/init.h>
+
+__INITDATA
+
+       .globl vsyscall_int80_start, vsyscall_int80_end
+vsyscall_int80_start:
+       .incbin "arch/xen/i386/kernel/vsyscall-int80.so"
+vsyscall_int80_end:
+
+       .globl vsyscall_sysenter_start, vsyscall_sysenter_end
+vsyscall_sysenter_start:
+       .incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
+vsyscall_sysenter_end:
+
+__FINIT
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.lds b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
new file mode 100644 (file)
index 0000000..befbdb9
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
+ * object prelinked to its virtual address, and with only one read-only
+ * segment (that fits in one page).  This script controls its layout.
+ */
+
+/* This must match <asm/fixmap.h>.  */
+/* = FIXADDR_TOP - PAGE_SIZE
+   = HYPERVISOR_VIRT_START - 2 * PAGE_SIZE - PAGE_SIZE */
+VSYSCALL_BASE = 0xfbffd000;
+
+SECTIONS
+{
+  . = VSYSCALL_BASE + SIZEOF_HEADERS;
+
+  .hash           : { *(.hash) }               :text
+  .dynsym         : { *(.dynsym) }
+  .dynstr         : { *(.dynstr) }
+  .gnu.version    : { *(.gnu.version) }
+  .gnu.version_d  : { *(.gnu.version_d) }
+  .gnu.version_r  : { *(.gnu.version_r) }
+
+  /* This linker script is used both with -r and with -shared.
+     For the layouts to match, we need to skip more than enough
+     space for the dynamic symbol table et al.  If this amount
+     is insufficient, ld -shared will barf.  Just increase it here.  */
+  . = VSYSCALL_BASE + 0x400;
+
+  .text           : { *(.text) }               :text =0x90909090
+
+  .eh_frame_hdr   : { *(.eh_frame_hdr) }       :text :eh_frame_hdr
+  .eh_frame       : { KEEP (*(.eh_frame)) }    :text
+  .dynamic        : { *(.dynamic) }            :text :dynamic
+  .useless        : {
+       *(.got.plt) *(.got)
+       *(.data .data.* .gnu.linkonce.d.*)
+       *(.dynbss)
+       *(.bss .bss.* .gnu.linkonce.b.*)
+  }                                            :text
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+  LINUX_2.5 {
+    global:
+       __kernel_vsyscall;
+       __kernel_sigreturn;
+       __kernel_rt_sigreturn;
+
+    local: *;
+  };
+}
+
+/* The ELF entry point can be used to set the AT_SYSINFO value.  */
+ENTRY(__kernel_vsyscall);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/Makefile b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/Makefile
new file mode 100644 (file)
index 0000000..016d205
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Makefile for the linux i386-specific parts of the memory manager.
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/mm
+
+obj-y  := init.o pgtable.o fault.o ioremap.o pageattr.o hypervisor.o
+c-obj-y        := extable.o mmap.o
+
+c-obj-$(CONFIG_DISCONTIGMEM)   += discontig.o
+c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/fault.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/fault.c
new file mode 100644 (file)
index 0000000..b8d7e46
--- /dev/null
@@ -0,0 +1,552 @@
+/*
+ *  linux/arch/i386/mm/fault.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>             /* For unblank_screen() */
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/desc.h>
+#include <asm/kdebug.h>
+
+extern void die(const char *,struct pt_regs *,long);
+
+pgd_t *cur_pgd;                        /* XXXsmp */
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out 
+ */
+void bust_spinlocks(int yes)
+{
+       int loglevel_save = console_loglevel;
+
+       if (yes) {
+               oops_in_progress = 1;
+               return;
+       }
+#ifdef CONFIG_VT
+       unblank_screen();
+#endif
+       oops_in_progress = 0;
+       /*
+        * OK, the message is on the console.  Now we call printk()
+        * without oops_in_progress set so that printk will give klogd
+        * a poke.  Hold onto your hats...
+        */
+       console_loglevel = 15;          /* NMI oopser may have shut the console up */
+       printk(" ");
+       console_loglevel = loglevel_save;
+}
+
+/*
+ * Return EIP plus the CS segment base.  The segment limit is also
+ * adjusted, clamped to the kernel/user address space (whichever is
+ * appropriate), and returned in *eip_limit.
+ *
+ * The segment is checked, because it might have been changed by another
+ * task between the original faulting instruction and here.
+ *
+ * If CS is no longer a valid code segment, or if EIP is beyond the
+ * limit, or if it is a kernel address when CS is not a kernel segment,
+ * then the returned value will be greater than *eip_limit.
+ * 
+ * This is slow, but is very rarely executed.
+ */
+static inline unsigned long get_segment_eip(struct pt_regs *regs,
+                                           unsigned long *eip_limit)
+{
+       unsigned long eip = regs->eip;
+       unsigned seg = regs->xcs & 0xffff;
+       u32 seg_ar, seg_limit, base, *desc;
+
+       /* The standard kernel/user address space limit. */
+       *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
+
+       /* Unlikely, but must come before segment checks. */
+       if (unlikely((regs->eflags & VM_MASK) != 0))
+               return eip + (seg << 4);
+       
+       /* By far the most common cases. */
+       if (likely(seg == __USER_CS || seg == __KERNEL_CS))
+               return eip;
+
+       /* Check the segment exists, is within the current LDT/GDT size,
+          that kernel/user (ring 0..3) has the appropriate privilege,
+          that it's a code segment, and get the limit. */
+       __asm__ ("larl %3,%0; lsll %3,%1"
+                : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
+       if ((~seg_ar & 0x9800) || eip > seg_limit) {
+               *eip_limit = 0;
+               return 1;        /* So that returned eip > *eip_limit. */
+       }
+
+       /* Get the GDT/LDT descriptor base. 
+          When you look for races in this code remember that
+          LDT and other horrors are only used in user space. */
+       if (seg & (1<<2)) {
+               /* Must lock the LDT while reading it. */
+               down(&current->mm->context.sem);
+               desc = current->mm->context.ldt;
+               desc = (void *)desc + (seg & ~7);
+       } else {
+               /* Must disable preemption while reading the GDT. */
+               desc = (u32 *)get_cpu_gdt_table(get_cpu());
+               desc = (void *)desc + (seg & ~7);
+       }
+
+       /* Decode the code segment base from the descriptor */
+       base =   (desc[0] >> 16) |
+               ((desc[1] & 0xff) << 16) |
+                (desc[1] & 0xff000000);
+
+       if (seg & (1<<2)) { 
+               up(&current->mm->context.sem);
+       } else
+               put_cpu();
+
+       /* Adjust EIP and segment limit, and clamp at the kernel limit.
+          It's legitimate for segments to wrap at 0xffffffff. */
+       seg_limit += base;
+       if (seg_limit < *eip_limit && seg_limit >= base)
+               *eip_limit = seg_limit;
+       return eip + base;
+}
+
+/* 
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ * Check that here and ignore it.
+ */
+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
+{ 
+       unsigned long limit;
+       unsigned long instr = get_segment_eip (regs, &limit);
+       int scan_more = 1;
+       int prefetch = 0; 
+       int i;
+
+       for (i = 0; scan_more && i < 15; i++) { 
+               unsigned char opcode;
+               unsigned char instr_hi;
+               unsigned char instr_lo;
+
+               if (instr > limit)
+                       break;
+               if (__get_user(opcode, (unsigned char *) instr))
+                       break; 
+
+               instr_hi = opcode & 0xf0; 
+               instr_lo = opcode & 0x0f; 
+               instr++;
+
+               switch (instr_hi) { 
+               case 0x20:
+               case 0x30:
+                       /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
+                       scan_more = ((instr_lo & 7) == 0x6);
+                       break;
+                       
+               case 0x60:
+                       /* 0x64 thru 0x67 are valid prefixes in all modes. */
+                       scan_more = (instr_lo & 0xC) == 0x4;
+                       break;          
+               case 0xF0:
+                       /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
+                       scan_more = !instr_lo || (instr_lo>>1) == 1;
+                       break;                  
+               case 0x00:
+                       /* Prefetch instruction is 0x0F0D or 0x0F18 */
+                       scan_more = 0;
+                       if (instr > limit)
+                               break;
+                       if (__get_user(opcode, (unsigned char *) instr)) 
+                               break;
+                       prefetch = (instr_lo == 0xF) &&
+                               (opcode == 0x0D || opcode == 0x18);
+                       break;                  
+               default:
+                       scan_more = 0;
+                       break;
+               } 
+       }
+       return prefetch;
+}
+
+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+                             unsigned long error_code)
+{
+       if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+                    boot_cpu_data.x86 >= 6)) {
+               /* Catch an obscure case of prefetch inside an NX page. */
+               if (nx_enabled && (error_code & 16))
+                       return 0;
+               return __is_prefetch(regs, addr);
+       }
+       return 0;
+} 
+
+asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ *     bit 0 == 0 means no page found, 1 means protection fault
+ *     bit 1 == 0 means read, 1 means write
+ *     bit 2 == 0 means kernel, 1 means user-mode
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
+                             unsigned long address)
+{
+       struct task_struct *tsk;
+       struct mm_struct *mm;
+       struct vm_area_struct * vma;
+       unsigned long page;
+       int write;
+       siginfo_t info;
+
+       /* Set the "privileged fault" bit to something sane. */
+       error_code &= 3;
+       error_code |= (regs->xcs & 2) << 1;
+
+       if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+                                       SIGSEGV) == NOTIFY_STOP)
+               return;
+#if 0
+       /* It's safe to allow irq's after cr2 has been saved */
+       if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+               local_irq_enable();
+#endif
+
+       tsk = current;
+
+       info.si_code = SEGV_MAPERR;
+
+       /*
+        * We fault-in kernel-space virtual memory on-demand. The
+        * 'reference' page table is init_mm.pgd.
+        *
+        * NOTE! We MUST NOT take any locks for this case. We may
+        * be in an interrupt or a critical region, and should
+        * only copy the information from the master page table,
+        * nothing more.
+        *
+        * This verifies that the fault happens in kernel space
+        * (error_code & 4) == 0, and that the fault was not a
+        * protection error (error_code & 1) == 0.
+        */
+       if (unlikely(address >= TASK_SIZE)) { 
+               if (!(error_code & 5))
+                       goto vmalloc_fault;
+               /* 
+                * Don't take the mm semaphore here. If we fixup a prefetch
+                * fault we could otherwise deadlock.
+                */
+               goto bad_area_nosemaphore;
+       } 
+
+       mm = tsk->mm;
+
+       /*
+        * If we're in an interrupt, have no user context or are running in an
+        * atomic region then we must not take the fault..
+        */
+       if (in_atomic() || !mm)
+               goto bad_area_nosemaphore;
+
+       /* When running in the kernel we expect faults to occur only to
+        * addresses in user space.  All other faults represent errors in the
+        * kernel and should generate an OOPS.  Unfortunatly, in the case of an
+        * erroneous fault occuring in a code path which already holds mmap_sem
+        * we will deadlock attempting to validate the fault against the
+        * address space.  Luckily the kernel only validly references user
+        * space from well defined areas of code, which are listed in the
+        * exceptions table.
+        *
+        * As the vast majority of faults will be valid we will only perform
+        * the source reference check when there is a possibilty of a deadlock.
+        * Attempt to lock the address space, if we cannot we then validate the
+        * source.  If this is invalid we can skip the address space check,
+        * thus avoiding the deadlock.
+        */
+       if (!down_read_trylock(&mm->mmap_sem)) {
+               if ((error_code & 4) == 0 &&
+                   !search_exception_tables(regs->eip))
+                       goto bad_area_nosemaphore;
+               down_read(&mm->mmap_sem);
+       }
+
+       vma = find_vma(mm, address);
+       if (!vma)
+               goto bad_area;
+       if (vma->vm_start <= address)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+       if (error_code & 4) {
+               /*
+                * accessing the stack below %esp is always a bug.
+                * The "+ 32" is there due to some instructions (like
+                * pusha) doing post-decrement on the stack and that
+                * doesn't show up until later..
+                */
+               if (address + 32 < regs->esp)
+                       goto bad_area;
+       }
+       if (expand_stack(vma, address))
+               goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+       info.si_code = SEGV_ACCERR;
+       write = 0;
+       switch (error_code & 3) {
+               default:        /* 3: write, present */
+#ifdef TEST_VERIFY_AREA
+                       if (regs->cs == KERNEL_CS)
+                               printk("WP fault at %08lx\n", regs->eip);
+#endif
+                       /* fall through */
+               case 2:         /* write, not present */
+                       if (!(vma->vm_flags & VM_WRITE))
+                               goto bad_area;
+                       write++;
+                       break;
+               case 1:         /* read, present */
+                       goto bad_area;
+               case 0:         /* read, not present */
+                       if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                               goto bad_area;
+       }
+
+ survive:
+       /*
+        * If for any reason at all we couldn't handle the fault,
+        * make sure we exit gracefully rather than endlessly redo
+        * the fault.
+        */
+       switch (handle_mm_fault(mm, vma, address, write)) {
+               case VM_FAULT_MINOR:
+                       tsk->min_flt++;
+                       break;
+               case VM_FAULT_MAJOR:
+                       tsk->maj_flt++;
+                       break;
+               case VM_FAULT_SIGBUS:
+                       goto do_sigbus;
+               case VM_FAULT_OOM:
+                       goto out_of_memory;
+               default:
+                       BUG();
+       }
+
+       /*
+        * Did it hit the DOS screen memory VA from vm86 mode?
+        */
+       if (regs->eflags & VM_MASK) {
+               unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
+               if (bit < 32)
+                       tsk->thread.screen_bitmap |= 1 << bit;
+       }
+       up_read(&mm->mmap_sem);
+       return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+       up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+       /* User mode accesses just cause a SIGSEGV */
+       if (error_code & 4) {
+               /* 
+                * Valid to do another page fault here because this one came 
+                * from user space.
+                */
+               if (is_prefetch(regs, address, error_code))
+                       return;
+
+               tsk->thread.cr2 = address;
+               /* Kernel addresses are always protection faults */
+               tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+               tsk->thread.trap_no = 14;
+               info.si_signo = SIGSEGV;
+               info.si_errno = 0;
+               /* info.si_code has been set above */
+               info.si_addr = (void __user *)address;
+               force_sig_info(SIGSEGV, &info, tsk);
+               return;
+       }
+
+#ifdef CONFIG_X86_F00F_BUG
+       /*
+        * Pentium F0 0F C7 C8 bug workaround.
+        */
+       if (boot_cpu_data.f00f_bug) {
+               unsigned long nr;
+               
+               nr = (address - idt_descr.address) >> 3;
+
+               if (nr == 6) {
+                       do_invalid_op(regs, 0);
+                       return;
+               }
+       }
+#endif
+
+no_context:
+       /* Are we prepared to handle this kernel fault?  */
+       if (fixup_exception(regs))
+               return;
+
+       /* 
+        * Valid to do another page fault here, because if this fault
+        * had been triggered by is_prefetch fixup_exception would have 
+        * handled it.
+        */
+       if (is_prefetch(regs, address, error_code))
+               return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+       bust_spinlocks(1);
+
+#ifdef CONFIG_X86_PAE
+       if (error_code & 16) {
+               pte_t *pte = lookup_address(address);
+
+               if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+                       printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
+       }
+#endif
+       if (address < PAGE_SIZE)
+               printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+       else
+               printk(KERN_ALERT "Unable to handle kernel paging request");
+       printk(" at virtual address %08lx\n",address);
+       printk(KERN_ALERT " printing eip:\n");
+       printk("%08lx\n", regs->eip);
+       page = ((unsigned long *) cur_pgd)[address >> 22];
+       printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page, machine_to_phys(page));
+       /*
+        * We must not directly access the pte in the highpte
+        * case, the page table might be allocated in highmem.
+        * And lets rather not kmap-atomic the pte, just in case
+        * it's allocated already.
+        */
+#ifndef CONFIG_HIGHPTE
+       if (page & 1) {
+               page &= PAGE_MASK;
+               address &= 0x003ff000;
+               page = machine_to_phys(page);
+               page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+               printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page, machine_to_phys(page));
+       }
+#endif
+       show_trace(NULL, (unsigned long *)&regs[1]);
+       die("Oops", regs, error_code);
+       bust_spinlocks(0);
+       do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+       up_read(&mm->mmap_sem);
+       if (tsk->pid == 1) {
+               yield();
+               down_read(&mm->mmap_sem);
+               goto survive;
+       }
+       printk("VM: killing process %s\n", tsk->comm);
+       if (error_code & 4)
+               do_exit(SIGKILL);
+       goto no_context;
+
+do_sigbus:
+       up_read(&mm->mmap_sem);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!(error_code & 4))
+               goto no_context;
+
+       /* User space => ok to do another page fault */
+       if (is_prefetch(regs, address, error_code))
+               return;
+
+       tsk->thread.cr2 = address;
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_no = 14;
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRERR;
+       info.si_addr = (void __user *)address;
+       force_sig_info(SIGBUS, &info, tsk);
+       return;
+
+vmalloc_fault:
+       {
+               /*
+                * Synchronize this task's top level page-table
+                * with the 'reference' page table.
+                *
+                * Do _not_ use "tsk" here. We might be inside
+                * an interrupt in the middle of a task switch..
+                */
+               int index = pgd_index(address);
+               pgd_t *pgd, *pgd_k;
+               pmd_t *pmd, *pmd_k;
+               pte_t *pte_k;
+
+               pgd = index + cur_pgd;
+               pgd_k = init_mm.pgd + index;
+
+               if (!pgd_present(*pgd_k))
+                       goto no_context;
+
+               /*
+                * set_pgd(pgd, *pgd_k); here would be useless on PAE
+                * and redundant with the set_pmd() on non-PAE.
+                */
+
+               pmd = pmd_offset(pgd, address);
+               pmd_k = pmd_offset(pgd_k, address);
+               if (!pmd_present(*pmd_k))
+                       goto no_context;
+               set_pmd(pmd, *pmd_k);
+               xen_flush_page_update_queue(); /* flush PMD update */
+
+               pte_k = pte_offset_kernel(pmd_k, address);
+               if (!pte_present(*pte_k))
+                       goto no_context;
+               return;
+       }
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/highmem.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/highmem.c
new file mode 100644 (file)
index 0000000..6b6fd16
--- /dev/null
@@ -0,0 +1,137 @@
+#include <linux/highmem.h>
+
+void *kmap(struct page *page)
+{
+       might_sleep();
+       if (page < highmem_start_page)
+               return page_address(page);
+       return kmap_high(page);
+}
+
+void kunmap(struct page *page)
+{
+       if (in_interrupt())
+               BUG();
+       if (page < highmem_start_page)
+               return;
+       kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+void *kmap_atomic(struct page *page, enum km_type type)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+
+       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       inc_preempt_count();
+       if (page < highmem_start_page)
+               return page_address(page);
+
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       if (!pte_none(*(kmap_pte-idx)))
+               BUG();
+#endif
+       set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+       __flush_tlb_one(vaddr);
+
+       return (void*) vaddr;
+}
+
+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection */
+void *kmap_atomic_pte(struct page *page, enum km_type type)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+
+       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       inc_preempt_count();
+       if (page < highmem_start_page)
+               return page_address(page);
+
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       if (!pte_none(*(kmap_pte-idx)))
+               BUG();
+#endif
+       set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL_RO));
+       __flush_tlb_one(vaddr);
+
+       return (void*) vaddr;
+}
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+       if (vaddr < FIXADDR_START) { // FIXME
+               dec_preempt_count();
+               preempt_check_resched();
+               return;
+       }
+
+       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+               BUG();
+
+       /*
+        * force other mappings to Oops if they'll try to access
+        * this pte without first remap it
+        */
+       pte_clear(kmap_pte-idx);
+       __flush_tlb_one(vaddr);
+#endif
+
+       dec_preempt_count();
+       preempt_check_resched();
+}
+
+void kunmap_atomic_force(void *kvaddr, enum km_type type)
+{
+       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+       if (vaddr < FIXADDR_START) { // FIXME
+               dec_preempt_count();
+               preempt_check_resched();
+               return;
+       }
+
+       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+               BUG();
+
+       /*
+        * force other mappings to Oops if they'll try to access
+        * this pte without first remap it
+        */
+       pte_clear(kmap_pte-idx);
+       __flush_tlb_one(vaddr);
+
+       dec_preempt_count();
+       preempt_check_resched();
+}
+
+struct page *kmap_atomic_to_page(void *ptr)
+{
+       unsigned long idx, vaddr = (unsigned long)ptr;
+       pte_t *pte;
+
+       if (vaddr < FIXADDR_START)
+               return virt_to_page(ptr);
+
+       idx = virt_to_fix(vaddr);
+       pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+       return pte_page(*pte);
+}
+
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c
new file mode 100644 (file)
index 0000000..c6078aa
--- /dev/null
@@ -0,0 +1,510 @@
+/******************************************************************************
+ * mm/hypervisor.c
+ * 
+ * Update page tables via the hypervisor.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/multicall.h>
+
+/*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
+/* Linux 2.6 isn't using the traditional batched interface. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define QUEUE_SIZE 2048
+#define pte_offset_kernel pte_offset
+#else
+#define QUEUE_SIZE 128
+#endif
+
+static mmu_update_t update_queue[QUEUE_SIZE];
+unsigned int mmu_update_queue_idx = 0;
+#define idx mmu_update_queue_idx
+
+#if MMU_UPDATE_DEBUG > 0
+page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}};
+#undef queue_l1_entry_update
+#undef queue_l2_entry_update
+#endif
+#if MMU_UPDATE_DEBUG > 3
+static void DEBUG_allow_pt_reads(void)
+{
+    pte_t *pte;
+    mmu_update_t update;
+    int i;
+    for ( i = idx-1; i >= 0; i-- )
+    {
+        pte = update_debug_queue[i].ptep;
+        if ( pte == NULL ) continue;
+        update_debug_queue[i].ptep = NULL;
+        update.ptr = virt_to_machine(pte);
+        update.val = update_debug_queue[i].pteval;
+        HYPERVISOR_mmu_update(&update, 1, NULL);
+    }
+}
+static void DEBUG_disallow_pt_read(unsigned long va)
+{
+    pte_t *pte;
+    pmd_t *pmd;
+    pgd_t *pgd;
+    unsigned long pteval;
+    /*
+     * We may fault because of an already outstanding update.
+     * That's okay -- it'll get fixed up in the fault handler.
+     */
+    mmu_update_t update;
+    pgd = pgd_offset_k(va);
+    pmd = pmd_offset(pgd, va);
+    pte = pte_offset_kernel(pmd, va); /* XXXcl */
+    update.ptr = virt_to_machine(pte);
+    pteval = *(unsigned long *)pte;
+    update.val = pteval & ~_PAGE_PRESENT;
+    HYPERVISOR_mmu_update(&update, 1, NULL);
+    update_debug_queue[idx].ptep = pte;
+    update_debug_queue[idx].pteval = pteval;
+}
+#endif
+
+#if MMU_UPDATE_DEBUG > 1
+#undef queue_pt_switch
+#undef queue_tlb_flush
+#undef queue_invlpg
+#undef queue_pgd_pin
+#undef queue_pgd_unpin
+#undef queue_pte_pin
+#undef queue_pte_unpin
+#undef queue_set_ldt
+#endif
+
+
+/*
+ * MULTICALL_flush_page_update_queue:
+ *   This is a version of the flush which queues as part of a multicall.
+ */
+void MULTICALL_flush_page_update_queue(void)
+{
+    unsigned long flags;
+    unsigned int _idx;
+    spin_lock_irqsave(&update_lock, flags);
+    if ( (_idx = idx) != 0 ) 
+    {
+#if MMU_UPDATE_DEBUG > 1
+           if (idx > 1)
+        printk("Flushing %d entries from pt update queue\n", idx);
+#endif
+#if MMU_UPDATE_DEBUG > 3
+        DEBUG_allow_pt_reads();
+#endif
+        idx = 0;
+        wmb(); /* Make sure index is cleared first to avoid double updates. */
+        queue_multicall3(__HYPERVISOR_mmu_update, 
+                         (unsigned long)update_queue, 
+                         (unsigned long)_idx, 
+                         (unsigned long)NULL);
+    }
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void __flush_page_update_queue(void)
+{
+    unsigned int _idx = idx;
+#if MMU_UPDATE_DEBUG > 1
+    if (idx > 1)
+    printk("Flushing %d entries from pt update queue\n", idx);
+#endif
+#if MMU_UPDATE_DEBUG > 3
+    DEBUG_allow_pt_reads();
+#endif
+    idx = 0;
+    wmb(); /* Make sure index is cleared first to avoid double updates. */
+    if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
+    {
+        printk(KERN_ALERT "Failed to execute MMU updates.\n");
+        BUG();
+    }
+}
+
+void _flush_page_update_queue(void)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    if ( idx != 0 ) __flush_page_update_queue();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void increment_index(void)
+{
+    idx++;
+    if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
+}
+
+static inline void increment_index_and_flush(void)
+{
+    idx++;
+    __flush_page_update_queue();
+}
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+#if MMU_UPDATE_DEBUG > 3
+    DEBUG_disallow_pt_read((unsigned long)ptr);
+#endif
+    update_queue[idx].ptr = virt_to_machine(ptr);
+    update_queue[idx].val = val;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr = virt_to_machine(ptr);
+    update_queue[idx].val = val;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pt_switch(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_NEW_BASEPTR;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_tlb_flush(void)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_TLB_FLUSH;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_invlpg(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
+    update_queue[idx].ptr |= ptr & PAGE_MASK;
+    update_queue[idx].val  = MMUEXT_INVLPG;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_pin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_PIN_L2_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_unpin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_pin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_PIN_L1_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_unpin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_set_ldt(unsigned long ptr, unsigned long len)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND | ptr;
+    update_queue[idx].val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+    update_queue[idx].val = pfn;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+/* queue and flush versions of the above */
+void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+#if MMU_UPDATE_DEBUG > 3
+    DEBUG_disallow_pt_read((unsigned long)ptr);
+#endif
+    update_queue[idx].ptr = virt_to_machine(ptr);
+    update_queue[idx].val = val;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr = virt_to_machine(ptr);
+    update_queue[idx].val = val;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pt_switch(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_NEW_BASEPTR;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_tlb_flush(void)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_TLB_FLUSH;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_invlpg(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
+    update_queue[idx].ptr |= ptr & PAGE_MASK;
+    update_queue[idx].val  = MMUEXT_INVLPG;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_pin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_PIN_L2_TABLE;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_unpin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_pin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_PIN_L1_TABLE;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_unpin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_set_ldt(unsigned long ptr, unsigned long len)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND | ptr;
+    update_queue[idx].val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+    update_queue[idx].val = pfn;
+    increment_index_and_flush();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+
+unsigned long allocate_empty_lowmem_region(unsigned long pages)
+{
+    pgd_t         *pgd; 
+    pmd_t         *pmd;
+    pte_t         *pte;
+    unsigned long *pfn_array;
+    unsigned long  vstart;
+    unsigned long  i;
+    int            ret;
+    unsigned int   order = get_order(pages*PAGE_SIZE);
+
+    vstart = __get_free_pages(GFP_KERNEL, order);
+    if ( vstart == 0 )
+        return 0UL;
+
+    scrub_pages(vstart, 1 << order);
+
+    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+    if ( pfn_array == NULL )
+        BUG();
+
+    for ( i = 0; i < (1<<order); i++ )
+    {
+        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
+        pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
+        queue_l1_entry_update(pte, 0);
+        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+    }
+
+    /* Flush updates through and flush the TLB. */
+    xen_tlb_flush();
+
+    ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
+                                pfn_array, 1<<order, 0);
+    if ( unlikely(ret != (1<<order)) )
+    {
+        printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret);
+        BUG();
+    }
+
+    vfree(pfn_array);
+
+    return vstart;
+}
+
+void deallocate_lowmem_region(unsigned long vstart, unsigned long pages)
+{
+    pgd_t         *pgd; 
+    pmd_t         *pmd;
+    pte_t         *pte;
+    unsigned long *pfn_array;
+    unsigned long  i;
+    int            ret;
+    unsigned int   order = get_order(pages*PAGE_SIZE);
+
+    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+    if ( pfn_array == NULL )
+        BUG();
+
+    ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+                                pfn_array, 1<<order, 0);
+    if ( unlikely(ret != (1<<order)) )
+    {
+        printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
+               ret);
+        BUG();
+    }
+
+    for ( i = 0; i < (1<<order); i++ )
+    {
+        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+        queue_l1_entry_update(pte, (pfn_array[i]<<PAGE_SHIFT)|__PAGE_KERNEL);
+        queue_machphys_update(pfn_array[i], __pa(vstart)>>PAGE_SHIFT);
+        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = pfn_array[i];
+    }
+
+    flush_page_update_queue();
+
+    vfree(pfn_array);
+
+    free_pages(vstart, order);
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c
new file mode 100644 (file)
index 0000000..7470411
--- /dev/null
@@ -0,0 +1,816 @@
+/*
+ *  linux/arch/i386/mm/init.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm-xen/hypervisor.h>
+
+unsigned int __VMALLOC_RESERVE = 128 << 20;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+unsigned long highstart_pfn, highend_pfn;
+
+static int noinline do_test_wp_bit(void);
+
+/*
+ * Creates a middle page table and puts a pointer to it in the
+ * given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
+{
+       pmd_t *pmd_table;
+               
+#ifdef CONFIG_X86_PAE
+       pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+       set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+       if (pmd_table != pmd_offset(pgd, 0)) 
+               BUG();
+#else
+       pmd_table = pmd_offset(pgd, 0);
+#endif
+
+       return pmd_table;
+}
+
+/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry.
+ */
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+       if (pmd_none(*pmd)) {
+               pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+               set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+               if (page_table != pte_offset_kernel(pmd, 0))
+                       BUG();  
+
+               return page_table;
+       }
+       
+       return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This function initializes a certain range of kernel virtual memory 
+ * with new bootmem page tables, everywhere page tables are missing in
+ * the given range.
+ */
+
+/*
+ * NOTE: The pagetables are allocated contiguous on the physical space 
+ * so we can cache the place of the first one and move around without 
+ * checking the pgd every time.
+ */
+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       int pgd_idx, pmd_idx;
+       unsigned long vaddr;
+
+       vaddr = start;
+       pgd_idx = pgd_index(vaddr);
+       pmd_idx = pmd_index(vaddr);
+       pgd = pgd_base + pgd_idx;
+
+       for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++, pgd_idx++) {
+               if (pgd_none(*pgd)) 
+                       one_md_table_init(pgd);
+
+               pmd = pmd_offset(pgd, vaddr);
+               for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
+                       if (pmd_none(*pmd)) 
+                               one_page_table_init(pmd);
+
+                       vaddr += PMD_SIZE;
+               }
+               pmd_idx = 0;
+       }
+}
+
+void __init protect_page(pgd_t *pgd, void *page, int mode)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long addr;
+
+       addr = (unsigned long)page;
+       pgd += pgd_index(addr);
+       pmd = pmd_offset(pgd, addr);
+       pte = pte_offset_kernel(pmd, addr);
+       if (!pte_present(*pte))
+               return;
+       queue_l1_entry_update(pte, mode ? pte_val_ma(*pte) & ~_PAGE_RW :
+                                       pte_val_ma(*pte) | _PAGE_RW);
+}
+
+void __init protect_pagetable(pgd_t *dpgd, pgd_t *spgd, int mode)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       int pgd_idx, pmd_idx;
+
+       protect_page(dpgd, spgd, mode);
+
+       for (pgd_idx = 0; pgd_idx < PTRS_PER_PGD_NO_HV; spgd++, pgd_idx++) {
+               pmd = pmd_offset(spgd, 0);
+               if (pmd_none(*pmd))
+                       continue;
+               for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+                       pte = pte_offset_kernel(pmd, 0);
+                       protect_page(dpgd, pte, mode);
+               }
+       }
+}
+
+static inline int is_kernel_text(unsigned long addr)
+{
+       if (addr >= (unsigned long)_stext && addr <= (unsigned long)__init_end)
+               return 1;
+       return 0;
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total 
+ * of max_low_pfn pages, by creating page tables starting from address 
+ * PAGE_OFFSET.
+ */
+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
+{
+       unsigned long pfn;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int pgd_idx, pmd_idx, pte_ofs;
+
+       pgd_idx = pgd_index(PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
+       pfn = 0;
+       pmd_idx = pmd_index(PAGE_OFFSET);
+       pte_ofs = pte_index(PAGE_OFFSET);
+
+       for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
+               pmd = one_md_table_init(pgd);
+               if (pfn >= max_low_pfn)
+                       continue;
+               pmd += pmd_idx;
+               for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
+                       unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+                       /* Map with big pages if possible, otherwise create normal page tables. */
+                       if (cpu_has_pse) {
+                               unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
+
+                               if (is_kernel_text(address) || is_kernel_text(address2))
+                                       set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+                               else
+                                       set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
+                               pfn += PTRS_PER_PTE;
+                       } else {
+                               pte = one_page_table_init(pmd);
+
+                               pte += pte_ofs;
+                               for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
+                                               if (is_kernel_text(address))
+                                                       set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+                                               else
+                                                       set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+                               }
+                               pte_ofs = 0;
+                       }
+                       flush_page_update_queue();
+               }
+               pmd_idx = 0;
+       }       
+}
+
+static inline int page_kills_ppro(unsigned long pagenr)
+{
+       if (pagenr >= 0x70000 && pagenr <= 0x7003F)
+               return 1;
+       return 0;
+}
+
+extern int is_available_memory(efi_memory_desc_t *);
+
+static inline int page_is_ram(unsigned long pagenr)
+{
+       int i;
+       unsigned long addr, end;
+
+       if (efi_enabled) {
+               efi_memory_desc_t *md;
+
+               for (i = 0; i < memmap.nr_map; i++) {
+                       md = &memmap.map[i];
+                       if (!is_available_memory(md))
+                               continue;
+                       addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+                       end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
+
+                       if ((pagenr >= addr) && (pagenr < end))
+                               return 1;
+               }
+               return 0;
+       }
+
+       for (i = 0; i < e820.nr_map; i++) {
+
+               if (e820.map[i].type != E820_RAM)       /* not usable memory */
+                       continue;
+               /*
+                *      !!!FIXME!!! Some BIOSen report areas as RAM that
+                *      are not. Notably the 640->1Mb area. We need a sanity
+                *      check here.
+                */
+               addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+               end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
+               if  ((pagenr >= addr) && (pagenr < end))
+                       return 1;
+       }
+       return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+
+#define kmap_get_fixmap_pte(vaddr)                                     \
+       pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
+
+void __init kmap_init(void)
+{
+       unsigned long kmap_vstart;
+
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+
+       kmap_prot = PAGE_KERNEL;
+}
+
+void __init permanent_kmaps_init(pgd_t *pgd_base)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long vaddr;
+
+       vaddr = PKMAP_BASE;
+       page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       pmd = pmd_offset(pgd, vaddr);
+       pte = pte_offset_kernel(pmd, vaddr);
+       pkmap_page_table = pte; 
+}
+
+void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
+{
+       if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
+               ClearPageReserved(page);
+               set_bit(PG_highmem, &page->flags);
+               set_page_count(page, 1);
+               __free_page(page);
+               totalhigh_pages++;
+       } else
+               SetPageReserved(page);
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init set_highmem_pages_init(int bad_ppro) 
+{
+       int pfn;
+       for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
+               one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+       totalram_pages += totalhigh_pages;
+}
+#else
+extern void set_highmem_pages_init(int);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#else
+#define kmap_init() do { } while (0)
+#define permanent_kmaps_init(pgd_base) do { } while (0)
+#define set_highmem_pages_init(bad_ppro) do { } while (0)
+#endif /* CONFIG_HIGHMEM */
+
+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
+
+#ifndef CONFIG_DISCONTIGMEM
+#define remap_numa_kva() do {} while (0)
+#else
+extern void __init remap_numa_kva(void);
+#endif
+
+static void __init pagetable_init (void)
+{
+       unsigned long vaddr;
+       pgd_t *pgd_base = swapper_pg_dir;
+
+#ifdef CONFIG_X86_PAE
+       int i;
+       /* Init entries of the first-level page table to the zero page */
+       for (i = 0; i < PTRS_PER_PGD; i++)
+               set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+
+       /* Enable PSE if available */
+       if (cpu_has_pse) {
+               set_in_cr4(X86_CR4_PSE);
+       }
+
+       /* Enable PGE if available */
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __PAGE_KERNEL |= _PAGE_GLOBAL;
+               __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
+       }
+
+       kernel_physical_mapping_init(pgd_base);
+       remap_numa_kva();
+
+       /*
+        * Fixed mappings, only the page table structure has to be
+        * created - mappings will be set by set_fixmap():
+        */
+       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+       page_table_range_init(vaddr, 0, pgd_base);
+
+       permanent_kmaps_init(pgd_base);
+
+#ifdef CONFIG_X86_PAE
+       /*
+        * Add low memory identity-mappings - SMP needs it when
+        * starting up on an AP from real-mode. In the non-PAE
+        * case we already have these mappings through head.S.
+        * All user-space mappings are explicitly cleared after
+        * SMP startup.
+        */
+       pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
+#endif
+}
+
+#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
+/*
+ * Swap suspend & friends need this for resume because things like the intel-agp
+ * driver might have split up a kernel 4MB mapping.
+ */
+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
+       __attribute__ ((aligned (PAGE_SIZE)));
+
+static inline void save_pg_dir(void)
+{
+       memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+}
+#else
+static inline void save_pg_dir(void)
+{
+}
+#endif
+
+void zap_low_mappings (void)
+{
+       int i;
+
+       save_pg_dir();
+
+       /*
+        * Zap initial low-memory mappings.
+        *
+        * Note that "pgd_clear()" doesn't do it for
+        * us, because pgd_clear() is a no-op on i386.
+        */
+       for (i = 0; i < USER_PTRS_PER_PGD; i++)
+#ifdef CONFIG_X86_PAE
+               set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
+#else
+               set_pgd(swapper_pg_dir+i, __pgd(0));
+#endif
+       flush_tlb_all();
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init zone_sizes_init(void)
+{
+       unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+       unsigned int max_dma, high, low;
+       
+       max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+       low = max_low_pfn;
+       high = highend_pfn;
+       
+       if (low < max_dma)
+               zones_size[ZONE_DMA] = low;
+       else {
+               zones_size[ZONE_DMA] = max_dma;
+               zones_size[ZONE_NORMAL] = low - max_dma;
+#ifdef CONFIG_HIGHMEM
+               zones_size[ZONE_HIGHMEM] = high - low;
+#endif
+       }
+       free_area_init(zones_size);     
+}
+#else
+extern void zone_sizes_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static int disable_nx __initdata = 0;
+u64 __supported_pte_mask = ~_PAGE_NX;
+
+/*
+ * noexec = on|off
+ *
+ * Control non executable mappings.
+ *
+ * on      Enable
+ * off     Disable
+ */
+static int __init noexec_setup(char *str)
+{
+       if (!strncmp(str, "on",2) && cpu_has_nx) {
+               __supported_pte_mask |= _PAGE_NX;
+               disable_nx = 0;
+       } else if (!strncmp(str,"off",3)) {
+               disable_nx = 1;
+               __supported_pte_mask &= ~_PAGE_NX;
+       }
+       return 1;
+}
+
+__setup("noexec=", noexec_setup);
+
+#ifdef CONFIG_X86_PAE
+int nx_enabled = 0;
+
+static void __init set_nx(void)
+{
+       unsigned int v[4], l, h;
+
+       if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+               cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+               if ((v[3] & (1 << 20)) && !disable_nx) {
+                       rdmsr(MSR_EFER, l, h);
+                       l |= EFER_NX;
+                       wrmsr(MSR_EFER, l, h);
+                       nx_enabled = 1;
+                       __supported_pte_mask |= _PAGE_NX;
+               }
+       }
+}
+
+/*
+ * Enables/disables executability of a given kernel page and
+ * returns the previous setting.
+ */
+int __init set_kernel_exec(unsigned long vaddr, int enable)
+{
+       pte_t *pte;
+       int ret = 1;
+
+       if (!nx_enabled)
+               goto out;
+
+       pte = lookup_address(vaddr);
+       BUG_ON(!pte);
+
+       if (!pte_exec_kernel(*pte))
+               ret = 0;
+
+       if (enable)
+               pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+       else
+               pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
+       __flush_tlb_all();
+out:
+       return ret;
+}
+
+#endif
+
+/*
+ * paging_init() sets up the page tables - note that the first 8MB are
+ * already mapped by head.S.
+ *
+ * This routines also unmaps the page at virtual kernel address 0, so
+ * that we can trap those pesky NULL-reference errors in the kernel.
+ */
+void __init paging_init(void)
+{
+       pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
+       pgd_t *new_pgd = swapper_pg_dir;
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+       int i;
+#endif
+
+#ifdef CONFIG_X86_PAE
+       set_nx();
+       if (nx_enabled)
+               printk("NX (Execute Disable) protection: active\n");
+#endif
+
+       pagetable_init();
+
+       /*
+        * Write-protect both page tables within both page tables.
+        * That's three ops, as the old p.t. is already protected
+        * within the old p.t. Then pin the new table, switch tables,
+        * and unprotect the old table.
+        */
+       protect_pagetable(new_pgd, old_pgd, PROT_ON);
+       protect_pagetable(new_pgd, new_pgd, PROT_ON);
+       protect_pagetable(old_pgd, new_pgd, PROT_ON);
+       queue_pgd_pin(__pa(new_pgd));
+       load_cr3(new_pgd);
+       queue_pgd_unpin(__pa(old_pgd));
+       __flush_tlb_all(); /* implicit flush */
+       protect_pagetable(new_pgd, old_pgd, PROT_OFF);
+       flush_page_update_queue();
+
+       /* Completely detached from old tables, so free them. */
+       free_bootmem(__pa(old_pgd), xen_start_info.nr_pt_frames << PAGE_SHIFT);
+
+#ifdef CONFIG_X86_PAE
+       /*
+        * We will bail out later - printk doesn't work right now so
+        * the user would just see a hanging kernel.
+        */
+       if (cpu_has_pae)
+               set_in_cr4(X86_CR4_PAE);
+#endif
+       __flush_tlb_all();
+
+       kmap_init();
+       zone_sizes_init();
+
+       /* Switch to the real shared_info page, and clear the dummy page. */
+       flush_page_update_queue();
+       set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+       HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+       memset(empty_zero_page, 0, sizeof(empty_zero_page));
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+       /* Setup mapping of lower 1st MB */
+       for (i = 0; i < NR_FIX_ISAMAPS; i++)
+               if (xen_start_info.flags & SIF_PRIVILEGED)
+                       set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+               else
+                       set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+                                        virt_to_machine(empty_zero_page));
+#endif
+}
+
+/*
+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
+ * used to involve black magic jumps to work around some nasty CPU bugs,
+ * but fortunately the switch to using exceptions got rid of all that.
+ */
+
+void __init test_wp_bit(void)
+{
+       printk("Checking if this processor honours the WP bit even in supervisor mode... ");
+
+       /* Any page-aligned address will do, the test is non-destructive */
+       __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
+       boot_cpu_data.wp_works_ok = do_test_wp_bit();
+       clear_fixmap(FIX_WP_TEST);
+
+       if (!boot_cpu_data.wp_works_ok) {
+               printk("No.\n");
+#ifdef CONFIG_X86_WP_WORKS_OK
+               panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
+#endif
+       } else {
+               printk("Ok.\n");
+       }
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init set_max_mapnr_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+       highmem_start_page = pfn_to_page(highstart_pfn);
+       max_mapnr = num_physpages = highend_pfn;
+#else
+       max_mapnr = num_physpages = max_low_pfn;
+#endif
+}
+#define __free_all_bootmem() free_all_bootmem()
+#else
+#define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
+extern void set_max_mapnr_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static struct kcore_list kcore_mem, kcore_vmalloc; 
+
+void __init mem_init(void)
+{
+       extern int ppro_with_ram_bug(void);
+       int codesize, reservedpages, datasize, initsize;
+       int tmp;
+       int bad_ppro;
+
+#ifndef CONFIG_DISCONTIGMEM
+       if (!mem_map)
+               BUG();
+#endif
+       
+       bad_ppro = ppro_with_ram_bug();
+
+#ifdef CONFIG_HIGHMEM
+       /* check that fixmap and pkmap do not overlap */
+       if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+               printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
+               printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+                               PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
+               BUG();
+       }
+#endif
+       set_max_mapnr_init();
+
+#ifdef CONFIG_HIGHMEM
+       high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
+#else
+       high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+#endif
+
+       /* this will put all low memory onto the freelists */
+       totalram_pages += __free_all_bootmem();
+
+       reservedpages = 0;
+       for (tmp = 0; tmp < max_low_pfn; tmp++)
+               /*
+                * Only count reserved RAM pages
+                */
+               if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+                       reservedpages++;
+
+       set_highmem_pages_init(bad_ppro);
+
+       codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+       datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+       kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
+       kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
+                  VMALLOC_END-VMALLOC_START);
+
+       printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+               (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+               num_physpages << (PAGE_SHIFT-10),
+               codesize >> 10,
+               reservedpages << (PAGE_SHIFT-10),
+               datasize >> 10,
+               initsize >> 10,
+               (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+              );
+
+#ifdef CONFIG_X86_PAE
+       if (!cpu_has_pae)
+               panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
+#endif
+       if (boot_cpu_data.wp_works_ok < 0)
+               test_wp_bit();
+
+       /*
+        * Subtle. SMP is doing it's boot stuff late (because it has to
+        * fork idle threads) - but it also needs low mappings for the
+        * protected-mode entry to work. We zap these entries only after
+        * the WP-bit has been tested.
+        */
+#ifndef CONFIG_SMP
+       zap_low_mappings();
+#endif
+}
+
+kmem_cache_t *pgd_cache;
+kmem_cache_t *pmd_cache;
+kmem_cache_t *pte_cache;
+
+void __init pgtable_cache_init(void)
+{
+       pte_cache = kmem_cache_create("pte",
+                               PTRS_PER_PTE*sizeof(pte_t),
+                               PTRS_PER_PTE*sizeof(pte_t),
+                               0,
+                               pte_ctor,
+                               pte_dtor);
+       if (!pte_cache)
+               panic("pgtable_cache_init(): Cannot create pte cache");
+       if (PTRS_PER_PMD > 1) {
+               pmd_cache = kmem_cache_create("pmd",
+                                       PTRS_PER_PMD*sizeof(pmd_t),
+                                       PTRS_PER_PMD*sizeof(pmd_t),
+                                       0,
+                                       pmd_ctor,
+                                       NULL);
+               if (!pmd_cache)
+                       panic("pgtable_cache_init(): cannot create pmd cache");
+       }
+       pgd_cache = kmem_cache_create("pgd",
+                               PTRS_PER_PGD*sizeof(pgd_t),
+                               PTRS_PER_PGD*sizeof(pgd_t),
+                               0,
+                               pgd_ctor,
+                               pgd_dtor);
+       if (!pgd_cache)
+               panic("pgtable_cache_init(): Cannot create pgd cache");
+}
+
+/*
+ * This function cannot be __init, since exceptions don't work in that
+ * section.  Put this after the callers, so that it cannot be inlined.
+ */
+static int noinline do_test_wp_bit(void)
+{
+       char tmp_reg;
+       int flag;
+
+       __asm__ __volatile__(
+               "       movb %0,%1      \n"
+               "1:     movb %1,%0      \n"
+               "       xorl %2,%2      \n"
+               "2:                     \n"
+               ".section __ex_table,\"a\"\n"
+               "       .align 4        \n"
+               "       .long 1b,2b     \n"
+               ".previous              \n"
+               :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
+                "=q" (tmp_reg),
+                "=r" (flag)
+               :"2" (1)
+               :"memory");
+       
+       return flag;
+}
+
+void free_initmem(void)
+{
+       unsigned long addr;
+
+       addr = (unsigned long)(&__init_begin);
+       for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
+               free_page(addr);
+               totalram_pages++;
+       }
+       printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       if (start < end)
+               printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+       for (; start < end; start += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
+               free_page(start);
+               totalram_pages++;
+       }
+}
+#endif
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c
new file mode 100644 (file)
index 0000000..06a0255
--- /dev/null
@@ -0,0 +1,481 @@
+/*
+ * arch/i386/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/fixmap.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+
+#ifndef CONFIG_XEN_PHYSDEV_ACCESS
+
+void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{ return NULL; }
+
+void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{ return NULL; }
+
+void iounmap(void *addr)
+{ }
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{ return NULL; }
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{ }
+
+#else
+
+static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
+       unsigned long phys_addr, unsigned long flags)
+{
+       unsigned long end;
+       unsigned long pfn;
+
+       address &= ~PMD_MASK;
+       end = address + size;
+       if (end > PMD_SIZE)
+               end = PMD_SIZE;
+       if (address >= end)
+               BUG();
+       pfn = phys_addr >> PAGE_SHIFT;
+       do {
+               if (!pte_none(*pte)) {
+                       printk("remap_area_pte: page already exists\n");
+                       BUG();
+               }
+               set_pte(pte, pfn_pte_ma(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
+                                       _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
+               address += PAGE_SIZE;
+               pfn++;
+               pte++;
+       } while (address && (address < end));
+}
+
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
+       unsigned long phys_addr, unsigned long flags)
+{
+       unsigned long end;
+
+       address &= ~PGDIR_MASK;
+       end = address + size;
+       if (end > PGDIR_SIZE)
+               end = PGDIR_SIZE;
+       phys_addr -= address;
+       if (address >= end)
+               BUG();
+       do {
+               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               if (!pte)
+                       return -ENOMEM;
+               remap_area_pte(pte, address, end - address, address + phys_addr, flags);
+               address = (address + PMD_SIZE) & PMD_MASK;
+               pmd++;
+       } while (address && (address < end));
+       return 0;
+}
+
+static int remap_area_pages(unsigned long address, unsigned long phys_addr,
+                                unsigned long size, unsigned long flags)
+{
+       int error;
+       pgd_t * dir;
+       unsigned long end = address + size;
+
+       phys_addr -= address;
+       dir = pgd_offset(&init_mm, address);
+       flush_cache_all();
+       if (address >= end)
+               BUG();
+       spin_lock(&init_mm.page_table_lock);
+       do {
+               pmd_t *pmd;
+               pmd = pmd_alloc(&init_mm, dir, address);
+               error = -ENOMEM;
+               if (!pmd)
+                       break;
+               if (remap_area_pmd(pmd, address, end - address,
+                                        phys_addr + address, flags))
+                       break;
+               error = 0;
+               address = (address + PGDIR_SIZE) & PGDIR_MASK;
+               dir++;
+       } while (address && (address < end));
+       spin_unlock(&init_mm.page_table_lock);
+       flush_tlb_all();
+       return error;
+}
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+       void __iomem * addr;
+       struct vm_struct * area;
+       unsigned long offset, last_addr;
+
+       /* Don't allow wraparound or zero size */
+       last_addr = phys_addr + size - 1;
+       if (!size || last_addr < phys_addr)
+               return NULL;
+
+        if (phys_addr >= 0x0 && last_addr < 0x100000)
+                return isa_bus_to_virt(phys_addr);
+
+       /*
+        * Don't remap the low PCI/ISA area, it's always mapped..
+        */
+       if (phys_addr >= 0xA0000 && last_addr < 0x100000)
+               return (void __iomem *) phys_to_virt(phys_addr);
+
+       /*
+        * Don't allow anybody to remap normal RAM that we're using..
+        */
+       if (machine_to_phys(phys_addr) < virt_to_phys(high_memory)) {
+               char *t_addr, *t_end;
+               struct page *page;
+
+               t_addr = bus_to_virt(phys_addr);
+               t_end = t_addr + (size - 1);
+          
+               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+                       if(!PageReserved(page))
+                               return NULL;
+       }
+
+       /*
+        * Mappings have to be page-aligned
+        */
+       offset = phys_addr & ~PAGE_MASK;
+       phys_addr &= PAGE_MASK;
+       size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
+       /*
+        * Ok, go for it..
+        */
+       area = get_vm_area(size, VM_IOREMAP);
+       if (!area)
+               return NULL;
+       area->phys_addr = phys_addr;
+       addr = (void __iomem *) area->addr;
+       if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+               vunmap((void __force *) addr);
+               return NULL;
+       }
+       return (void __iomem *) (offset + (char __iomem *)addr);
+}
+
+
+/**
+ * ioremap_nocache     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address. 
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many 
+ * busses. In particular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ * 
+ * Must be freed with iounmap.
+ */
+
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+       unsigned long last_addr;
+       void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+       if (!p) 
+               return p; 
+
+       /* Guaranteed to be > phys_addr, as per __ioremap() */
+       last_addr = phys_addr + size - 1;
+
+       if (machine_to_phys(last_addr) < virt_to_phys(high_memory)) { 
+               struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
+               unsigned long npages;
+
+               phys_addr &= PAGE_MASK;
+
+               /* This might overflow and become zero.. */
+               last_addr = PAGE_ALIGN(last_addr);
+
+               /* .. but that's ok, because modulo-2**n arithmetic will make
+               * the page-aligned "last - first" come out right.
+               */
+               npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+
+               if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
+                       iounmap(p); 
+                       p = NULL;
+               }
+               global_flush_tlb();
+       }
+
+       return p;                                       
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+       struct vm_struct *p;
+        if ((unsigned long)addr <= 0x100000)
+                return;
+       if ((void __force *) addr <= high_memory) 
+               return; 
+       p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+       if (!p) { 
+               printk("__iounmap: bad address %p\n", addr);
+               return;
+       } 
+
+       if (p->flags && machine_to_phys(p->phys_addr) < virt_to_phys(high_memory)) { 
+               change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
+                                p->size >> PAGE_SHIFT,
+                                PAGE_KERNEL);                           
+               global_flush_tlb();
+       } 
+       kfree(p); 
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+       unsigned long offset, last_addr;
+       unsigned int nrpages;
+       enum fixed_addresses idx;
+
+       /* Don't allow wraparound or zero size */
+       last_addr = phys_addr + size - 1;
+       if (!size || last_addr < phys_addr)
+               return NULL;
+
+        if (phys_addr >= 0x0 && last_addr < 0x100000)
+                return isa_bus_to_virt(phys_addr);
+
+       /*
+        * Don't remap the low PCI/ISA area, it's always mapped..
+        */
+       if (phys_addr >= 0xA0000 && last_addr < 0x100000)
+               return phys_to_virt(phys_addr);
+
+       /*
+        * Mappings have to be page-aligned
+        */
+       offset = phys_addr & ~PAGE_MASK;
+       phys_addr &= PAGE_MASK;
+       size = PAGE_ALIGN(last_addr) - phys_addr;
+
+       /*
+        * Mappings have to fit in the FIX_BTMAP area.
+        */
+       nrpages = size >> PAGE_SHIFT;
+       if (nrpages > NR_FIX_BTMAPS)
+               return NULL;
+
+       /*
+        * Ok, go for it..
+        */
+       idx = FIX_BTMAP_BEGIN;
+       while (nrpages > 0) {
+               set_fixmap_ma(idx, phys_addr);
+               phys_addr += PAGE_SIZE;
+               --idx;
+               --nrpages;
+       }
+       return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+       unsigned long virt_addr;
+       unsigned long offset;
+       unsigned int nrpages;
+       enum fixed_addresses idx;
+
+       virt_addr = (unsigned long)addr;
+        if (virt_addr < 0x100000)
+                return;
+       if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+               return;
+       offset = virt_addr & ~PAGE_MASK;
+       nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
+
+       idx = FIX_BTMAP_BEGIN;
+       while (nrpages > 0) {
+               clear_fixmap(idx);
+               --idx;
+               --nrpages;
+       }
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void direct_remap_area_pte(pte_t *pte, 
+                                        unsigned long address, 
+                                        unsigned long size,
+                                       mmu_update_t **v)
+{
+    unsigned long end;
+
+    address &= ~PMD_MASK;
+    end = address + size;
+    if (end > PMD_SIZE)
+        end = PMD_SIZE;
+    if (address >= end)
+        BUG();
+
+    do {
+        (*v)->ptr = virt_to_machine(pte);
+        (*v)++;
+        address += PAGE_SIZE;
+        pte++;
+    } while (address && (address < end));
+}
+
+static inline int direct_remap_area_pmd(struct mm_struct *mm,
+                                        pmd_t *pmd, 
+                                        unsigned long address, 
+                                        unsigned long size,
+                                       mmu_update_t **v)
+{
+    unsigned long end;
+
+    address &= ~PGDIR_MASK;
+    end = address + size;
+    if (end > PGDIR_SIZE)
+        end = PGDIR_SIZE;
+    if (address >= end)
+        BUG();
+    do {
+        pte_t *pte = pte_alloc_map(mm, pmd, address);
+        if (!pte)
+            return -ENOMEM;
+        direct_remap_area_pte(pte, address, end - address, v);
+       pte_unmap(pte);
+        address = (address + PMD_SIZE) & PMD_MASK;
+        pmd++;
+    } while (address && (address < end));
+    return 0;
+}
+int __direct_remap_area_pages(struct mm_struct *mm,
+                             unsigned long address, 
+                             unsigned long size, 
+                             mmu_update_t *v)
+{
+    pgd_t * dir;
+    unsigned long end = address + size;
+
+    dir = pgd_offset(mm, address);
+    flush_cache_all();
+    if (address >= end)
+        BUG();
+    spin_lock(&mm->page_table_lock);
+    do {
+        pmd_t *pmd = pmd_alloc(mm, dir, address);
+        if (!pmd)
+           return -ENOMEM;
+        direct_remap_area_pmd(mm, pmd, address, end - address, &v);
+        address = (address + PGDIR_SIZE) & PGDIR_MASK;
+        dir++;
+
+    } while (address && (address < end));
+    spin_unlock(&mm->page_table_lock);
+    flush_tlb_all();
+    return 0;
+}
+
+
+int direct_remap_area_pages(struct mm_struct *mm,
+                            unsigned long address, 
+                            unsigned long machine_addr,
+                            unsigned long size, 
+                            pgprot_t prot,
+                            domid_t  domid)
+{
+    int i;
+    unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+    mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+
+    u[0].ptr  = MMU_EXTENDED_COMMAND;
+    u[0].val  = MMUEXT_SET_FOREIGNDOM;
+    u[0].val |= (unsigned long)domid << 16;
+    v = w = &u[1];
+
+    start_address = address;
+
+    for( i = 0; i < size; i += PAGE_SIZE )
+    {
+       if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
+       {
+           /* Fill in the PTE pointers. */
+           __direct_remap_area_pages( mm,
+                                      start_address, 
+                                      address-start_address, 
+                                      w);
+           
+           if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
+               return -EFAULT;     
+           v = w;
+           start_address = address;
+       }
+
+       /*
+         * Fill in the machine address: PTE ptr is done later by
+         * __direct_remap_area_pages(). 
+         */
+        v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+        machine_addr += PAGE_SIZE;
+        address += PAGE_SIZE; 
+        v++;
+    }
+
+    if ( v != w )
+    {
+       /* get the ptep's filled in */
+       __direct_remap_area_pages(mm,
+                                  start_address, 
+                                  address-start_address, 
+                                  w);   
+       if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
+           return -EFAULT;         
+    }
+    
+    return 0;
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/pageattr.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/pageattr.c
new file mode 100644 (file)
index 0000000..19f9d9c
--- /dev/null
@@ -0,0 +1,215 @@
+/* 
+ * Copyright 2002 Andi Kleen, SuSE Labs. 
+ * Thanks to Ben LaHaise for precious feedback.
+ */ 
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+
+static spinlock_t cpa_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head df_list = LIST_HEAD_INIT(df_list);
+
+
+pte_t *lookup_address(unsigned long address) 
+{ 
+       pgd_t *pgd = pgd_offset_k(address); 
+       pmd_t *pmd;
+       if (pgd_none(*pgd))
+               return NULL;
+       pmd = pmd_offset(pgd, address);                
+       if (pmd_none(*pmd))
+               return NULL;
+       if (pmd_large(*pmd))
+               return (pte_t *)pmd;
+        return pte_offset_kernel(pmd, address);
+} 
+
+static struct page *split_large_page(unsigned long address, pgprot_t prot)
+{ 
+       int i; 
+       unsigned long addr;
+       struct page *base;
+       pte_t *pbase;
+
+       spin_unlock_irq(&cpa_lock);
+       base = alloc_pages(GFP_KERNEL, 0);
+       spin_lock_irq(&cpa_lock);
+       if (!base) 
+               return NULL;
+
+       address = __pa(address);
+       addr = address & LARGE_PAGE_MASK; 
+       pbase = (pte_t *)page_address(base);
+       for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+               pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
+                                  addr == address ? prot : PAGE_KERNEL);
+       }
+       return base;
+} 
+
+static void flush_kernel_map(void *dummy) 
+{ 
+       /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
+       if (boot_cpu_data.x86_model >= 4) 
+               wbinvd();
+       /* Flush all to work around Errata in early athlons regarding 
+        * large page flushing. 
+        */
+       __flush_tlb_all();      
+}
+
+static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
+{ 
+       struct page *page;
+       unsigned long flags;
+
+       set_pte_atomic(kpte, pte);      /* change init_mm */
+       if (PTRS_PER_PMD > 1)
+               return;
+
+       spin_lock_irqsave(&pgd_lock, flags);
+       for (page = pgd_list; page; page = (struct page *)page->index) {
+               pgd_t *pgd;
+               pmd_t *pmd;
+               pgd = (pgd_t *)page_address(page) + pgd_index(address);
+               pmd = pmd_offset(pgd, address);
+               set_pte_atomic((pte_t *)pmd, pte);
+       }
+       spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+/* 
+ * No more special protections in this 2/4MB area - revert to a
+ * large page again. 
+ */
+static inline void revert_page(struct page *kpte_page, unsigned long address)
+{
+       pte_t *linear = (pte_t *) 
+               pmd_offset(pgd_offset(&init_mm, address), address);
+       set_pmd_pte(linear,  address,
+                   pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
+                           PAGE_KERNEL_LARGE));
+}
+
+static int
+__change_page_attr(struct page *page, pgprot_t prot)
+{ 
+       pte_t *kpte; 
+       unsigned long address;
+       struct page *kpte_page;
+
+#ifdef CONFIG_HIGHMEM
+       if (page >= highmem_start_page) 
+               BUG(); 
+#endif
+       address = (unsigned long)page_address(page);
+
+       kpte = lookup_address(address);
+       if (!kpte)
+               return -EINVAL;
+       kpte_page = virt_to_page(kpte);
+       if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
+               if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
+                       pte_t old = *kpte;
+                       pte_t standard = mk_pte(page, PAGE_KERNEL); 
+                       set_pte_atomic(kpte, mk_pte(page, prot)); 
+                       if (pte_same(old,standard))
+                               get_page(kpte_page);
+               } else {
+                       struct page *split = split_large_page(address, prot); 
+                       if (!split)
+                               return -ENOMEM;
+                       get_page(kpte_page);
+                       set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
+               }       
+       } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
+               set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
+               __put_page(kpte_page);
+       }
+
+       if (cpu_has_pse && (page_count(kpte_page) == 1)) {
+               list_add(&kpte_page->lru, &df_list);
+               revert_page(kpte_page, address);
+       } 
+       return 0;
+} 
+
+static inline void flush_map(void)
+{
+       on_each_cpu(flush_kernel_map, NULL, 1, 1);
+}
+
+/*
+ * Change the page attributes of an page in the linear mapping.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ * 
+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
+ * This function only deals with the kernel linear map.
+ * 
+ * Caller must call global_flush_tlb() after this.
+ */
+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+{
+       int err = 0; 
+       int i; 
+       unsigned long flags;
+
+       spin_lock_irqsave(&cpa_lock, flags);
+       for (i = 0; i < numpages; i++, page++) { 
+               err = __change_page_attr(page, prot);
+               if (err) 
+                       break; 
+       }       
+       spin_unlock_irqrestore(&cpa_lock, flags);
+       return err;
+}
+
+void global_flush_tlb(void)
+{ 
+       LIST_HEAD(l);
+       struct list_head* n;
+
+       BUG_ON(irqs_disabled());
+
+       spin_lock_irq(&cpa_lock);
+       list_splice_init(&df_list, &l);
+       spin_unlock_irq(&cpa_lock);
+       flush_map();
+       n = l.next;
+       while (n != &l) {
+               struct page *pg = list_entry(n, struct page, lru);
+               n = n->next;
+               __free_page(pg);
+       }
+} 
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       if (PageHighMem(page))
+               return;
+       /* the return value is ignored - the calls cannot fail,
+        * large pages are disabled at boot time.
+        */
+       change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+       /* we should perform an IPI and flush all tlbs,
+        * but that can deadlock->flush only current cpu.
+        */
+       __flush_tlb_all();
+}
+EXPORT_SYMBOL(kernel_map_pages);
+#endif
+
+EXPORT_SYMBOL(change_page_attr);
+EXPORT_SYMBOL(global_flush_tlb);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c
new file mode 100644 (file)
index 0000000..4516df1
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ *  linux/arch/i386/mm/pgtable.c
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/spinlock.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/io.h>
+
+void show_mem(void)
+{
+       int total = 0, reserved = 0;
+       int shared = 0, cached = 0;
+       int highmem = 0;
+       struct page *page;
+       pg_data_t *pgdat;
+       unsigned long i;
+
+       printk("Mem-info:\n");
+       show_free_areas();
+       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       for_each_pgdat(pgdat) {
+               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+                       page = pgdat->node_mem_map + i;
+                       total++;
+                       if (PageHighMem(page))
+                               highmem++;
+                       if (PageReserved(page))
+                               reserved++;
+                       else if (PageSwapCache(page))
+                               cached++;
+                       else if (page_count(page))
+                               shared += page_count(page) - 1;
+               }
+       }
+       printk("%d pages of RAM\n", total);
+       printk("%d pages of HIGHMEM\n",highmem);
+       printk("%d reserved pages\n",reserved);
+       printk("%d pages shared\n",shared);
+       printk("%d pages swap cached\n",cached);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame 
+ * and protection flags for that frame.
+ */ 
+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       if (pgd_none(*pgd)) {
+               BUG();
+               return;
+       }
+       pmd = pmd_offset(pgd, vaddr);
+       if (pmd_none(*pmd)) {
+               BUG();
+               return;
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
+       /* <pfn,flags> stored as-is, to permit clearing entries */
+       set_pte(pte, pfn_pte(pfn, flags));
+
+       /*
+        * It's enough to flush this one mapping.
+        * (PGE mappings get flushed as well)
+        */
+       __flush_tlb_one(vaddr);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame 
+ * and protection flags for that frame.
+ */ 
+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+                          pgprot_t flags)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       if (pgd_none(*pgd)) {
+               BUG();
+               return;
+       }
+       pmd = pmd_offset(pgd, vaddr);
+       if (pmd_none(*pmd)) {
+               BUG();
+               return;
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
+       /* <pfn,flags> stored as-is, to permit clearing entries */
+       set_pte(pte, pfn_pte_ma(pfn, flags));
+
+       /*
+        * It's enough to flush this one mapping.
+        * (PGE mappings get flushed as well)
+        */
+       __flush_tlb_one(vaddr);
+}
+
+/*
+ * Associate a large virtual page frame with a given physical page frame 
+ * and protection flags for that frame. pfn is for the base of the page,
+ * vaddr is what the page gets mapped to - both must be properly aligned. 
+ * The pmd must already be instantiated. Assumes PAE mode.
+ */ 
+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+
+       if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
+               printk ("set_pmd_pfn: vaddr misaligned\n");
+               return; /* BUG(); */
+       }
+       if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
+               printk ("set_pmd_pfn: pfn misaligned\n");
+               return; /* BUG(); */
+       }
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       if (pgd_none(*pgd)) {
+               printk ("set_pmd_pfn: pgd_none\n");
+               return; /* BUG(); */
+       }
+       pmd = pmd_offset(pgd, vaddr);
+       set_pmd(pmd, pfn_pmd(pfn, flags));
+       /*
+        * It's enough to flush this one mapping.
+        * (PGE mappings get flushed as well)
+        */
+       __flush_tlb_one(vaddr);
+}
+
+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+{
+       unsigned long address = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
+}
+
+void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+{
+       unsigned long address = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       if (pte) {
+               clear_page(pte);
+               __make_page_readonly(pte);
+               xen_flush_page_update_queue();
+       }
+       return pte;
+}
+
+void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+       struct page *page = virt_to_page(pte);
+       SetPageForeign(page, pte_free);
+       set_page_count(page, 1);
+
+       clear_page(pte);
+       __make_page_readonly(pte);
+       queue_pte_pin(virt_to_phys(pte));
+       flush_page_update_queue();
+}
+
+void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+       struct page *page = virt_to_page(pte);
+       ClearPageForeign(page);
+
+       queue_pte_unpin(virt_to_phys(pte));
+       __make_page_writable(pte);
+       flush_page_update_queue();
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       pte_t *ptep;
+
+#ifdef CONFIG_HIGHPTE
+       struct page *pte;
+
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       if (pte == NULL)
+               return pte;
+       if (pte >= highmem_start_page) {
+               clear_highpage(pte);
+               return pte;
+       }
+       /* not a highmem page -- free page and grab one from the cache */
+       __free_page(pte);
+#endif
+       ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
+       if (ptep)
+               return virt_to_page(ptep);
+       return NULL;
+}
+
+void pte_free(struct page *pte)
+{
+       set_page_count(pte, 1);
+#ifdef CONFIG_HIGHPTE
+       if (pte < highmem_start_page)
+#endif
+               kmem_cache_free(pte_cache,
+                               phys_to_virt(page_to_pseudophys(pte)));
+#ifdef CONFIG_HIGHPTE
+       else
+               __free_page(pte);
+#endif
+}
+
+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+{
+       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+}
+
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * If the locking proves to be non-performant, a ticketing scheme with
+ * checks at dup_mmap(), exec(), and other mmlist addition points
+ * could be used. The locking scheme was chosen on the basis of
+ * manfred's recommendations and having no core impact whatsoever.
+ * -- wli
+ */
+spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED;
+struct page *pgd_list;
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+       struct page *page = virt_to_page(pgd);
+       page->index = (unsigned long)pgd_list;
+       if (pgd_list)
+               pgd_list->private = (unsigned long)&page->index;
+       pgd_list = page;
+       page->private = (unsigned long)&pgd_list;
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+       struct page *next, **pprev, *page = virt_to_page(pgd);
+       next = (struct page *)page->index;
+       pprev = (struct page **)page->private;
+       *pprev = next;
+       if (next)
+               next->private = (unsigned long)pprev;
+}
+
+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+{
+       unsigned long flags;
+
+       if (PTRS_PER_PMD == 1)
+               spin_lock_irqsave(&pgd_lock, flags);
+
+       memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+                       swapper_pg_dir + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+       if (PTRS_PER_PMD > 1)
+               goto out;
+
+       pgd_list_add(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
+       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+ out:
+       __make_page_readonly(pgd);
+       queue_pgd_pin(__pa(pgd));
+       flush_page_update_queue();
+}
+
+/* never called when PTRS_PER_PMD > 1 */
+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+{
+       unsigned long flags; /* can be called from interrupt context */
+
+       queue_pgd_unpin(__pa(pgd));
+       __make_page_writable(pgd);
+       flush_page_update_queue();
+
+       if (PTRS_PER_PMD > 1)
+               return;
+
+       spin_lock_irqsave(&pgd_lock, flags);
+       pgd_list_del(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       int i;
+       pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
+
+       if (PTRS_PER_PMD == 1 || !pgd)
+               return pgd;
+
+       for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+               pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+               if (!pmd)
+                       goto out_oom;
+               set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd))));
+       }
+       return pgd;
+
+out_oom:
+       for (i--; i >= 0; i--)
+               kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
+       kmem_cache_free(pgd_cache, pgd);
+       return NULL;
+}
+
+void pgd_free(pgd_t *pgd)
+{
+       int i;
+
+       /* in the PAE case user pgd entries are overwritten before usage */
+       if (PTRS_PER_PMD > 1)
+               for (i = 0; i < USER_PTRS_PER_PGD; ++i)
+                       kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
+       /* in the non-PAE case, clear_page_tables() clears user pgd entries */
+       kmem_cache_free(pgd_cache, pgd);
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/pci/Makefile b/linux-2.6.9-xen-sparse/arch/xen/i386/pci/Makefile
new file mode 100644 (file)
index 0000000..175f5f4
--- /dev/null
@@ -0,0 +1,31 @@
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/pci
+
+c-obj-y                                := i386.o
+
+c-obj-$(CONFIG_PCI_BIOS)               += pcbios.o
+c-obj-$(CONFIG_PCI_MMCONFIG)   += mmconfig.o
+obj-$(CONFIG_PCI_DIRECT)       += direct.o
+
+c-pci-y                                := fixup.o
+c-pci-$(CONFIG_ACPI_PCI)       += acpi.o
+c-pci-y                                += legacy.o
+pci-y                          += irq.o
+
+c-pci-$(CONFIG_X86_VISWS)      := visws.o fixup.o
+pci-$(CONFIG_X86_VISWS)                :=
+c-pci-$(CONFIG_X86_NUMAQ)      := numa.o
+pci-$(CONFIG_X86_NUMAQ)                := irq.o
+
+obj-y                          += $(pci-y)
+c-obj-y                                += $(c-pci-y) common.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/pci/direct.c b/linux-2.6.9-xen-sparse/arch/xen/i386/pci/direct.c
new file mode 100644 (file)
index 0000000..6612213
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * direct.c - Low-level direct PCI config space access
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include "pci.h"
+
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm/hypervisor-ifs/physdev.h>
+
+/*
+ * Functions for accessing PCI configuration space with type xen accesses
+ */
+
+static int pci_conf_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
+{
+       unsigned long flags;
+       physdev_op_t op;
+       int ret;
+
+       if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
+               return -EINVAL;
+
+       spin_lock_irqsave(&pci_config_lock, flags);
+
+       op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
+       op.u.pci_cfgreg_read.bus  = bus;
+       op.u.pci_cfgreg_read.dev  = (devfn & ~0x7) >> 3;
+       op.u.pci_cfgreg_read.func = devfn & 0x7;
+       op.u.pci_cfgreg_read.reg  = reg;
+       op.u.pci_cfgreg_read.len  = len;
+
+       ret = HYPERVISOR_physdev_op(&op);
+       if (ret == 0)
+               *value = op.u.pci_cfgreg_read.value;
+
+       spin_unlock_irqrestore(&pci_config_lock, flags);
+
+       return ret;
+}
+
+static int pci_conf_write (int seg, int bus, int devfn, int reg, int len, u32 value)
+{
+       unsigned long flags;
+       physdev_op_t op;
+       int ret;
+
+       if ((bus > 255) || (devfn > 255) || (reg > 255)) 
+               return -EINVAL;
+
+       spin_lock_irqsave(&pci_config_lock, flags);
+
+       op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
+       op.u.pci_cfgreg_write.bus   = bus;
+       op.u.pci_cfgreg_write.dev   = (devfn & ~0x7) >> 3;
+       op.u.pci_cfgreg_write.func  = devfn & 0x7;
+       op.u.pci_cfgreg_write.reg   = reg;
+       op.u.pci_cfgreg_write.len   = len;
+       op.u.pci_cfgreg_write.value = value;
+
+       ret = HYPERVISOR_physdev_op(&op);
+
+       spin_unlock_irqrestore(&pci_config_lock, flags);
+
+       return ret;
+}
+
+struct pci_raw_ops pci_direct_xen = {
+       .read =         pci_conf_read,
+       .write =        pci_conf_write,
+};
+
+
+static int __init pci_direct_init(void)
+{
+
+        printk(KERN_INFO "PCI: Using configuration type Xen\n");
+        raw_pci_ops = &pci_direct_xen;
+        return 0;
+}
+
+arch_initcall(pci_direct_init);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/pci/irq.c b/linux-2.6.9-xen-sparse/arch/xen/i386/pci/irq.c
new file mode 100644 (file)
index 0000000..7dc8dfb
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ *     Low-Level PCI Support for PC -- Routing of Interrupts
+ *
+ *     (c) 1999--2000 Martin Mares <mj@ucw.cz>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/io_apic.h>
+#include <asm/hw_irq.h>
+
+#include "pci.h"
+
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm/hypervisor-ifs/physdev.h>
+
+/*
+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
+ * Avoid using: 13, 14 and 15 (FP error and IDE).
+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
+ */
+unsigned int pcibios_irq_mask = 0xfff8;
+
+static int pirq_penalty[16] = {
+       1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
+       0, 0, 0, 0, 1000, 100000, 100000, 100000
+};
+
+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
+
+
+static int __init pcibios_irq_init(void)
+{
+       int bus;
+       physdev_op_t op;
+
+       DBG("PCI: IRQ init\n");
+
+       if (pcibios_enable_irq || raw_pci_ops == NULL)
+               return 0;
+
+       op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
+       if (HYPERVISOR_physdev_op(&op) != 0) {
+               printk(KERN_WARNING "PCI: System does not support PCI\n");
+               return 0;
+       }
+
+       printk(KERN_INFO "PCI: Probing PCI hardware\n");
+       for (bus = 0; bus < 256; bus++)
+               if (test_bit(bus, (unsigned long *)
+                       &op.u.pci_probe_root_buses.busmask[0]))
+                       (void)pcibios_scan_root(bus);
+
+       pcibios_enable_irq = pirq_enable_irq;
+
+       return 0;
+}
+
+subsys_initcall(pcibios_irq_init);
+
+
+void pcibios_penalize_isa_irq(int irq)
+{
+       /*
+        *  If any ISAPnP device reports an IRQ in its list of possible
+        *  IRQ's, we try to avoid assigning it to PCI devices.
+        */
+       pirq_penalty[irq] += 100;
+}
+
+int pirq_enable_irq(struct pci_dev *dev)
+{
+       int err;
+       u8  pin;
+       physdev_op_t op;
+
+       /* Inform Xen that we are going to use this device. */
+       op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
+       op.u.pci_initialise_device.bus  = dev->bus->number;
+       op.u.pci_initialise_device.dev  = PCI_SLOT(dev->devfn);
+       op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
+       if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
+               return err;
+
+       /* Now we can bind to the very final IRQ line. */
+       pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
+       dev->irq = pin;
+
+       /* Sanity-check that an interrupt-producing device is routed
+        * to an IRQ. */
+       pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+       if (pin != 0) {
+               if (dev->irq != 0)
+                       printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
+                           dev->irq, dev->slot_name);
+               else
+                       printk(KERN_WARNING "PCI: No IRQ known for interrupt "
+                           "pin %c of device %s.\n", 'A' + pin - 1,
+                           dev->slot_name);
+       }
+
+       return 0;
+}
+
+int pci_vector_resources(int last, int nr_released)
+{
+       int count = nr_released;
+
+       int next = last;
+       int offset = (last % 8);
+
+       while (next < FIRST_SYSTEM_VECTOR) {
+               next += 8;
+#ifdef CONFIG_X86_64
+               if (next == IA32_SYSCALL_VECTOR)
+                       continue;
+#else
+               if (next == SYSCALL_VECTOR)
+                       continue;
+#endif
+               count++;
+               if (next >= FIRST_SYSTEM_VECTOR) {
+                       if (offset%8) {
+                               next = FIRST_DEVICE_VECTOR + offset;
+                               offset++;
+                               continue;
+                       }
+                       count--;
+               }
+       }
+
+       return count;
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/Makefile b/linux-2.6.9-xen-sparse/arch/xen/kernel/Makefile
new file mode 100644 (file)
index 0000000..3768ed2
--- /dev/null
@@ -0,0 +1,13 @@
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+$(obj)/vmlinux.lds:
+       @ln -fsn $(srctree)/arch/$(ARCH)/$(XENARCH)/kernel/vmlinux.lds $@
+
+extra-y += vmlinux.lds
+
+obj-y  := ctrl_if.o evtchn.o fixup.o process.o reboot.o xen_proc.o empty.o \
+           gnttab.o skbuff.o
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/ctrl_if.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/ctrl_if.c
new file mode 100644 (file)
index 0000000..78195fa
--- /dev/null
@@ -0,0 +1,543 @@
+/******************************************************************************
+ * ctrl_if.c
+ * 
+ * Management functions for special interface to the domain controller.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/evtchn.h>
+
+#if 0
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+                           __FILE__ , __LINE__ , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+/*
+ * Only used by initial domain which must create its own control-interface
+ * event channel. This value is picked up by the user-space domain controller
+ * via an ioctl.
+ */
+int initdom_ctrlif_domcontroller_port = -1;
+
+static int        ctrl_if_evtchn;
+static int        ctrl_if_irq;
+static spinlock_t ctrl_if_lock;
+
+static struct irqaction ctrl_if_irq_action;
+
+static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
+static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+
+/* Incoming message requests. */
+    /* Primary message type -> message handler. */
+static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
+    /* Primary message type -> callback in process context? */
+static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
+    /* Is it late enough during bootstrap to use schedule_task()? */
+static int safe_to_schedule_task;
+    /* Queue up messages to be handled in process context. */
+static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
+
+/* Incoming message responses: message identifier -> message handler/id. */
+static struct {
+    ctrl_msg_handler_t fn;
+    unsigned long      id;
+} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
+
+/* For received messages that must be deferred to process context. */
+static void __ctrl_if_rxmsg_deferred(void *unused);
+static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
+                    __ctrl_if_rxmsg_deferred,
+                    NULL);
+
+/* Deferred callbacks for people waiting for space in the transmit ring. */
+static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
+
+static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
+static void __ctrl_if_tx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
+
+static void __ctrl_if_rx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
+
+#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
+#define TX_FULL(_c)   \
+    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
+
+static void ctrl_if_notify_controller(void)
+{
+    notify_via_evtchn(ctrl_if_evtchn);
+}
+
+static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
+{
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+static void __ctrl_if_tx_tasklet(unsigned long data)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    ctrl_msg_t   *msg;
+    int           was_full = TX_FULL(ctrl_if);
+    CONTROL_RING_IDX rp;
+
+    rp = ctrl_if->tx_resp_prod;
+    rmb(); /* Ensure we see all requests up to 'rp'. */
+
+    while ( ctrl_if_tx_resp_cons != rp )
+    {
+        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
+
+        DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", 
+                ctrl_if_tx_resp_cons,
+                ctrl_if->tx_resp_prod,
+                msg->type, msg->subtype);
+
+        /* Execute the callback handler, if one was specified. */
+        if ( msg->id != 0xFF )
+        {
+            (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
+                msg, ctrl_if_txmsg_id_mapping[msg->id].id);
+            smp_mb(); /* Execute, /then/ free. */
+            ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
+        }
+
+        /*
+         * Step over the message in the ring /after/ finishing reading it. As 
+         * soon as the index is updated then the message may get blown away.
+         */
+        smp_mb();
+        ctrl_if_tx_resp_cons++;
+    }
+
+    if ( was_full && !TX_FULL(ctrl_if) )
+    {
+        wake_up(&ctrl_if_tx_wait);
+        run_task_queue(&ctrl_if_tx_tq);
+    }
+}
+
+static void __ctrl_if_rxmsg_deferred(void *unused)
+{
+    ctrl_msg_t *msg;
+    CONTROL_RING_IDX dp;
+
+    dp = ctrl_if_rxmsg_deferred_prod;
+    rmb(); /* Ensure we see all deferred requests up to 'dp'. */
+
+    while ( ctrl_if_rxmsg_deferred_cons != dp )
+    {
+        msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
+            ctrl_if_rxmsg_deferred_cons++)];
+        (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
+    }
+}
+
+static void __ctrl_if_rx_tasklet(unsigned long data)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    ctrl_msg_t    msg, *pmsg;
+    CONTROL_RING_IDX rp, dp;
+
+    dp = ctrl_if_rxmsg_deferred_prod;
+    rp = ctrl_if->rx_req_prod;
+    rmb(); /* Ensure we see all requests up to 'rp'. */
+
+    while ( ctrl_if_rx_req_cons != rp )
+    {
+        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
+        memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
+
+        DPRINTK("Rx-Req %u/%u :: %d/%d\n", 
+                ctrl_if_rx_req_cons-1,
+                ctrl_if->rx_req_prod,
+                msg.type, msg.subtype);
+
+        if ( msg.length != 0 )
+            memcpy(msg.msg, pmsg->msg, msg.length);
+
+        if ( test_bit(msg.type, 
+                      (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
+            memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
+                   &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+        else
+            (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
+    }
+
+    if ( dp != ctrl_if_rxmsg_deferred_prod )
+    {
+        wmb();
+        ctrl_if_rxmsg_deferred_prod = dp;
+        schedule_work(&ctrl_if_rxmsg_deferred_work);
+    }
+}
+
+static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
+                                     struct pt_regs *regs)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+
+    if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+        tasklet_schedule(&ctrl_if_tx_tasklet);
+
+    if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+        tasklet_schedule(&ctrl_if_rx_tasklet);
+
+    return IRQ_HANDLED;
+}
+
+int
+ctrl_if_send_message_noblock(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd,
+    unsigned long id)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    unsigned long flags;
+    int           i;
+
+    spin_lock_irqsave(&ctrl_if_lock, flags);
+
+    if ( TX_FULL(ctrl_if) )
+    {
+        spin_unlock_irqrestore(&ctrl_if_lock, flags);
+        return -EAGAIN;
+    }
+
+    msg->id = 0xFF;
+    if ( hnd != NULL )
+    {
+        for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
+            continue;
+        ctrl_if_txmsg_id_mapping[i].fn = hnd;
+        ctrl_if_txmsg_id_mapping[i].id = id;
+        msg->id = i;
+    }
+
+    DPRINTK("Tx-Req %u/%u :: %d/%d\n", 
+            ctrl_if->tx_req_prod, 
+            ctrl_if_tx_resp_cons,
+            msg->type, msg->subtype);
+
+    memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
+           msg, sizeof(*msg));
+    wmb(); /* Write the message before letting the controller peek at it. */
+    ctrl_if->tx_req_prod++;
+
+    spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+    ctrl_if_notify_controller();
+
+    return 0;
+}
+
+int
+ctrl_if_send_message_block(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd, 
+    unsigned long id,
+    long wait_state)
+{
+    DECLARE_WAITQUEUE(wait, current);
+    int rc;
+
+    /* Fast path. */
+    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+        return rc;
+
+    add_wait_queue(&ctrl_if_tx_wait, &wait);
+
+    for ( ; ; )
+    {
+        set_current_state(wait_state);
+
+        if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+            break;
+
+        rc = -ERESTARTSYS;
+        if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
+            break;
+
+        schedule();
+    }
+
+    set_current_state(TASK_RUNNING);
+    remove_wait_queue(&ctrl_if_tx_wait, &wait);
+
+    return rc;
+}
+
+/* Allow a reponse-callback handler to find context of a blocked requester.  */
+struct rsp_wait {
+    ctrl_msg_t         *msg;  /* Buffer for the response message.            */
+    struct task_struct *task; /* The task that is blocked on the response.   */
+    int                 done; /* Indicate to 'task' that response is rcv'ed. */
+};
+
+static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
+{
+    struct rsp_wait    *wait = (struct rsp_wait *)id;
+    struct task_struct *task = wait->task;
+
+    memcpy(wait->msg, msg, sizeof(*msg));
+    wmb();
+    wait->done = 1;
+
+    wake_up_process(task);
+}
+
+int
+ctrl_if_send_message_and_get_response(
+    ctrl_msg_t *msg, 
+    ctrl_msg_t *rmsg,
+    long wait_state)
+{
+    struct rsp_wait wait;
+    int rc;
+
+    wait.msg  = rmsg;
+    wait.done = 0;
+    wait.task = current;
+
+    if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
+                                          (unsigned long)&wait,
+                                          wait_state)) != 0 )
+        return rc;
+
+    for ( ; ; )
+    {
+        /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
+        set_current_state(TASK_UNINTERRUPTIBLE);
+        if ( wait.done )
+            break;
+        schedule();
+    }
+
+    set_current_state(TASK_RUNNING);
+    return 0;
+}
+
+int
+ctrl_if_enqueue_space_callback(
+    struct tq_struct *task)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+
+    /* Fast path. */
+    if ( !TX_FULL(ctrl_if) )
+        return 0;
+
+    (void)queue_task(task, &ctrl_if_tx_tq);
+
+    /*
+     * We may race execution of the task queue, so return re-checked status. If
+     * the task is not executed despite the ring being non-full then we will
+     * certainly return 'not full'.
+     */
+    smp_mb();
+    return TX_FULL(ctrl_if);
+}
+
+void
+ctrl_if_send_response(
+    ctrl_msg_t *msg)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    unsigned long flags;
+    ctrl_msg_t   *dmsg;
+
+    /*
+     * NB. The response may the original request message, modified in-place.
+     * In this situation we may have src==dst, so no copying is required.
+     */
+    spin_lock_irqsave(&ctrl_if_lock, flags);
+
+    DPRINTK("Tx-Rsp %u :: %d/%d\n", 
+            ctrl_if->rx_resp_prod, 
+            msg->type, msg->subtype);
+
+    dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
+    if ( dmsg != msg )
+        memcpy(dmsg, msg, sizeof(*msg));
+
+    wmb(); /* Write the message before letting the controller peek at it. */
+    ctrl_if->rx_resp_prod++;
+
+    spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+    ctrl_if_notify_controller();
+}
+
+int
+ctrl_if_register_receiver(
+    u8 type, 
+    ctrl_msg_handler_t hnd, 
+    unsigned int flags)
+{
+    unsigned long _flags;
+    int inuse;
+
+    spin_lock_irqsave(&ctrl_if_lock, _flags);
+
+    inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
+
+    if ( inuse )
+    {
+        printk(KERN_INFO "Receiver %p already established for control "
+               "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
+    }
+    else
+    {
+        ctrl_if_rxmsg_handler[type] = hnd;
+        clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+        if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
+        {
+            set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+            if ( !safe_to_schedule_task )
+                BUG();
+        }
+    }
+
+    spin_unlock_irqrestore(&ctrl_if_lock, _flags);
+
+    return !inuse;
+}
+
+void 
+ctrl_if_unregister_receiver(
+    u8 type,
+    ctrl_msg_handler_t hnd)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&ctrl_if_lock, flags);
+
+    if ( ctrl_if_rxmsg_handler[type] != hnd )
+        printk(KERN_INFO "Receiver %p is not registered for control "
+               "messages of type %d.\n", hnd, type);
+    else
+        ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
+
+    spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+    /* Ensure that @hnd will not be executed after this function returns. */
+    tasklet_unlock_wait(&ctrl_if_rx_tasklet);
+}
+
+void ctrl_if_suspend(void)
+{
+    free_irq(ctrl_if_irq, NULL);
+    unbind_evtchn_from_irq(ctrl_if_evtchn);
+}
+
+void ctrl_if_resume(void)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+
+    if ( xen_start_info.flags & SIF_INITDOMAIN )
+    {
+        /*
+         * The initial domain must create its own domain-controller link.
+         * The controller is probably not running at this point, but will
+         * pick up its end of the event channel from 
+         */
+        evtchn_op_t op;
+        op.cmd = EVTCHNOP_bind_interdomain;
+        op.u.bind_interdomain.dom1 = DOMID_SELF;
+        op.u.bind_interdomain.dom2 = DOMID_SELF;
+        op.u.bind_interdomain.port1 = 0;
+        op.u.bind_interdomain.port2 = 0;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            BUG();
+        xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
+        initdom_ctrlif_domcontroller_port   = op.u.bind_interdomain.port2;
+    }
+
+    /* Sync up with shared indexes. */
+    ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
+    ctrl_if_rx_req_cons  = ctrl_if->rx_resp_prod;
+
+    ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
+    ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn);
+
+#define SA_STATIC_ACTION 0x01000000 /* so that free_irq() doesn't do kfree() */
+    memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
+    ctrl_if_irq_action.handler = ctrl_if_interrupt;
+    ctrl_if_irq_action.name    = "ctrl-if";
+    ctrl_if_irq_action.flags   = SA_STATIC_ACTION;
+    (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
+}
+
+void __init ctrl_if_init(void)
+{
+        int i;
+
+    for ( i = 0; i < 256; i++ )
+        ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
+
+    spin_lock_init(&ctrl_if_lock);
+
+    ctrl_if_resume();
+}
+
+
+/* This is called after it is safe to call schedule_task(). */
+static int __init ctrl_if_late_setup(void)
+{
+    safe_to_schedule_task = 1;
+    return 0;
+}
+__initcall(ctrl_if_late_setup);
+
+
+/*
+ * !! The following are DANGEROUS FUNCTIONS !!
+ * Use with care [for example, see xencons_force_flush()].
+ */
+
+int ctrl_if_transmitter_empty(void)
+{
+    return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
+}
+
+void ctrl_if_discard_responses(void)
+{
+    ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
+}
+
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/empty.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/empty.c
new file mode 100644 (file)
index 0000000..32f03e7
--- /dev/null
@@ -0,0 +1,44 @@
+
+#include <linux/string.h>
+#include <asm-xen/hypervisor.h>
+
+#if 0
+static __inline__ int HYPERVISOR_console_write(const char *str, int count)
+{
+       int ret;
+       __asm__ __volatile__ (
+               TRAP_INSTR
+               : "=a" (ret) : "0" (__HYPERVISOR_console_write), 
+               "b" (str), "c" (count) : "memory" );
+
+
+       return ret;
+}
+#endif
+
+#if 01
+void
+xen_puts(const char *str)
+{
+
+       (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(str), (char *)str);
+}
+
+asmlinkage int CLPRINTK(const char *fmt, ...)
+{
+       va_list args;
+       int printk_len;
+       static char printk_buf[1024+1];
+    
+       /* Emit the output into the temporary buffer */
+       va_start(args, fmt);
+       printk_len = vsnprintf(printk_buf, sizeof(printk_buf)-1, fmt, args);
+       va_end(args);
+
+       printk_buf[printk_len] = 0;
+       /* Send the processed output directly to Xen. */
+       (void)HYPERVISOR_console_io(CONSOLEIO_write, printk_len, printk_buf);
+
+       return 0;
+}
+#endif
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/evtchn.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/evtchn.c
new file mode 100644 (file)
index 0000000..b779ac1
--- /dev/null
@@ -0,0 +1,513 @@
+/******************************************************************************
+ * evtchn.c
+ * 
+ * Communication via Xen event channels.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/version.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
+#include <asm/hypervisor-ifs/event_channel.h>
+#include <asm/hypervisor-ifs/physdev.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+EXPORT_SYMBOL(force_evtchn_callback);
+EXPORT_SYMBOL(evtchn_do_upcall);
+#endif
+
+/*
+ * This lock protects updates to the following mapping and reference-count
+ * arrays. The lock does not need to be acquired to read the mapping tables.
+ */
+static spinlock_t irq_mapping_update_lock;
+
+/* IRQ <-> event-channel mappings. */
+static int evtchn_to_irq[NR_EVENT_CHANNELS];
+static int irq_to_evtchn[NR_IRQS];
+
+/* IRQ <-> VIRQ mapping. */
+static int virq_to_irq[NR_VIRQS];
+
+/* Reference counts for bindings to IRQs. */
+static int irq_bindcount[NR_IRQS];
+
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+
+/* Upcall to generic IRQ layer. */
+extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
+
+#define VALID_EVTCHN(_chn) ((_chn) != -1)
+
+/*
+ * Force a proper event-channel callback from Xen after clearing the
+ * callback mask. We do this in a very simple manner, by making a call
+ * down into Xen. The pending flag will be checked by Xen on return.
+ */
+void force_evtchn_callback(void)
+{
+    (void)HYPERVISOR_xen_version(0);
+}
+
+void evtchn_do_upcall(struct pt_regs *regs)
+{
+    unsigned long  l1, l2;
+    unsigned int   l1i, l2i, port;
+    int            irq;
+    unsigned long  flags;
+    shared_info_t *s = HYPERVISOR_shared_info;
+
+    local_irq_save(flags);
+    
+    while ( s->vcpu_data[0].evtchn_upcall_pending )
+    {
+        s->vcpu_data[0].evtchn_upcall_pending = 0;
+        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+        l1 = xchg(&s->evtchn_pending_sel, 0);
+        while ( (l1i = ffs(l1)) != 0 )
+        {
+            l1i--;
+            l1 &= ~(1 << l1i);
+        
+            l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
+            while ( (l2i = ffs(l2)) != 0 )
+            {
+                l2i--;
+                l2 &= ~(1 << l2i);
+            
+                port = (l1i << 5) + l2i;
+                if ( (irq = evtchn_to_irq[port]) != -1 )
+                    do_IRQ(irq, regs);
+                else
+                    evtchn_device_upcall(port);
+            }
+        }
+    }
+
+    local_irq_restore(flags);
+}
+
+static int find_unbound_irq(void)
+{
+    int irq;
+
+    for ( irq = 0; irq < NR_IRQS; irq++ )
+        if ( irq_bindcount[irq] == 0 )
+            break;
+
+    if ( irq == NR_IRQS )
+        panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+    return irq;
+}
+
+int bind_virq_to_irq(int virq)
+{
+    evtchn_op_t op;
+    int evtchn, irq;
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( (irq = virq_to_irq[virq]) == -1 )
+    {
+        op.cmd              = EVTCHNOP_bind_virq;
+        op.u.bind_virq.virq = virq;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to bind virtual IRQ %d\n", virq);
+        evtchn = op.u.bind_virq.port;
+
+        irq = find_unbound_irq();
+        evtchn_to_irq[evtchn] = irq;
+        irq_to_evtchn[irq]    = evtchn;
+
+        virq_to_irq[virq] = irq;
+    }
+
+    irq_bindcount[irq]++;
+
+    spin_unlock(&irq_mapping_update_lock);
+    
+    return irq;
+}
+
+void unbind_virq_from_irq(int virq)
+{
+    evtchn_op_t op;
+    int irq    = virq_to_irq[virq];
+    int evtchn = irq_to_evtchn[irq];
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( --irq_bindcount[irq] == 0 )
+    {
+        op.cmd          = EVTCHNOP_close;
+        op.u.close.dom  = DOMID_SELF;
+        op.u.close.port = evtchn;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to unbind virtual IRQ %d\n", virq);
+
+        evtchn_to_irq[evtchn] = -1;
+        irq_to_evtchn[irq]    = -1;
+        virq_to_irq[virq]     = -1;
+    }
+
+    spin_unlock(&irq_mapping_update_lock);
+}
+
+int bind_evtchn_to_irq(int evtchn)
+{
+    int irq;
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
+    {
+        irq = find_unbound_irq();
+        evtchn_to_irq[evtchn] = irq;
+        irq_to_evtchn[irq]    = evtchn;
+    }
+
+    irq_bindcount[irq]++;
+
+    spin_unlock(&irq_mapping_update_lock);
+    
+    return irq;
+}
+
+void unbind_evtchn_from_irq(int evtchn)
+{
+    int irq = evtchn_to_irq[evtchn];
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( --irq_bindcount[irq] == 0 )
+    {
+        evtchn_to_irq[evtchn] = -1;
+        irq_to_evtchn[irq]    = -1;
+    }
+
+    spin_unlock(&irq_mapping_update_lock);
+}
+
+
+/*
+ * Interface to generic handling in irq.c
+ */
+
+static unsigned int startup_dynirq(unsigned int irq)
+{
+    unmask_evtchn(irq_to_evtchn[irq]);
+    return 0;
+}
+
+static void shutdown_dynirq(unsigned int irq)
+{
+    mask_evtchn(irq_to_evtchn[irq]);
+}
+
+static void enable_dynirq(unsigned int irq)
+{
+    unmask_evtchn(irq_to_evtchn[irq]);
+}
+
+static void disable_dynirq(unsigned int irq)
+{
+    mask_evtchn(irq_to_evtchn[irq]);
+}
+
+static void ack_dynirq(unsigned int irq)
+{
+    mask_evtchn(irq_to_evtchn[irq]);
+    clear_evtchn(irq_to_evtchn[irq]);
+}
+
+static void end_dynirq(unsigned int irq)
+{
+    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+        unmask_evtchn(irq_to_evtchn[irq]);
+}
+
+static struct hw_interrupt_type dynirq_type = {
+    "Dynamic-irq",
+    startup_dynirq,
+    shutdown_dynirq,
+    enable_dynirq,
+    disable_dynirq,
+    ack_dynirq,
+    end_dynirq,
+    NULL
+};
+
+static inline void pirq_unmask_notify(int pirq)
+{
+    physdev_op_t op;
+    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
+    {
+        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+        (void)HYPERVISOR_physdev_op(&op);
+    }
+}
+
+static inline void pirq_query_unmask(int pirq)
+{
+    physdev_op_t op;
+    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+    op.u.irq_status_query.irq = pirq;
+    (void)HYPERVISOR_physdev_op(&op);
+    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
+        set_bit(pirq, &pirq_needs_unmask_notify[0]);
+}
+
+/*
+ * On startup, if there is no action associated with the IRQ then we are
+ * probing. In this case we should not share with others as it will confuse us.
+ */
+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
+
+static unsigned int startup_pirq(unsigned int irq)
+{
+    evtchn_op_t op;
+    int evtchn;
+
+    op.cmd               = EVTCHNOP_bind_pirq;
+    op.u.bind_pirq.pirq  = irq;
+    /* NB. We are happy to share unless we are probing. */
+    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+    {
+        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
+            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
+        return 0;
+    }
+    evtchn = op.u.bind_pirq.port;
+
+    pirq_query_unmask(irq_to_pirq(irq));
+
+    evtchn_to_irq[evtchn] = irq;
+    irq_to_evtchn[irq]    = evtchn;
+
+    unmask_evtchn(evtchn);
+    pirq_unmask_notify(irq_to_pirq(irq));
+
+    return 0;
+}
+
+static void shutdown_pirq(unsigned int irq)
+{
+    evtchn_op_t op;
+    int evtchn = irq_to_evtchn[irq];
+
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+
+    mask_evtchn(evtchn);
+
+    op.cmd          = EVTCHNOP_close;
+    op.u.close.dom  = DOMID_SELF;
+    op.u.close.port = evtchn;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+        panic("Failed to unbind physical IRQ %d\n", irq);
+
+    evtchn_to_irq[evtchn] = -1;
+    irq_to_evtchn[irq]    = -1;
+}
+
+static void enable_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    unmask_evtchn(evtchn);
+    pirq_unmask_notify(irq_to_pirq(irq));
+}
+
+static void disable_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    mask_evtchn(evtchn);
+}
+
+static void ack_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    mask_evtchn(evtchn);
+    clear_evtchn(evtchn);
+}
+
+static void end_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+    {
+        unmask_evtchn(evtchn);
+        pirq_unmask_notify(irq_to_pirq(irq));
+    }
+}
+
+static struct hw_interrupt_type pirq_type = {
+    "Phys-irq",
+    startup_pirq,
+    shutdown_pirq,
+    enable_pirq,
+    disable_pirq,
+    ack_pirq,
+    end_pirq,
+    NULL
+};
+
+static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
+                                       struct pt_regs *regs)
+{
+    /* nothing */
+    return IRQ_HANDLED;
+}
+
+static struct irqaction misdirect_action = {
+    misdirect_interrupt, 
+    SA_INTERRUPT, 
+    CPU_MASK_NONE, 
+    "misdirect", 
+    NULL, 
+    NULL
+};
+
+void irq_suspend(void)
+{
+    int pirq, virq, irq, evtchn;
+
+    /* Unbind VIRQs from event channels. */
+    for ( virq = 0; virq < NR_VIRQS; virq++ )
+    {
+        if ( (irq = virq_to_irq[virq]) == -1 )
+            continue;
+        evtchn = irq_to_evtchn[irq];
+
+        /* Mark the event channel as unused in our table. */
+        evtchn_to_irq[evtchn] = -1;
+        irq_to_evtchn[irq]    = -1;
+    }
+
+    /* Check that no PIRQs are still bound. */
+    for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
+        if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
+            panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
+                  pirq, evtchn);
+}
+
+void irq_resume(void)
+{
+    evtchn_op_t op;
+    int         virq, irq, evtchn;
+
+    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
+        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
+
+    for ( virq = 0; virq < NR_VIRQS; virq++ )
+    {
+        if ( (irq = virq_to_irq[virq]) == -1 )
+            continue;
+
+        /* Get a new binding from Xen. */
+        op.cmd              = EVTCHNOP_bind_virq;
+        op.u.bind_virq.virq = virq;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to bind virtual IRQ %d\n", virq);
+        evtchn = op.u.bind_virq.port;
+        
+        /* Record the new mapping. */
+        evtchn_to_irq[evtchn] = irq;
+        irq_to_evtchn[irq]    = evtchn;
+
+        /* Ready for use. */
+        unmask_evtchn(evtchn);
+    }
+}
+
+void __init init_IRQ(void)
+{
+    int i;
+
+    spin_lock_init(&irq_mapping_update_lock);
+
+    /* No VIRQ -> IRQ mappings. */
+    for ( i = 0; i < NR_VIRQS; i++ )
+        virq_to_irq[i] = -1;
+
+    /* No event-channel -> IRQ mappings. */
+    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+    {
+        evtchn_to_irq[i] = -1;
+        mask_evtchn(i); /* No event channels are 'live' right now. */
+    }
+
+    /* No IRQ -> event-channel mappings. */
+    for ( i = 0; i < NR_IRQS; i++ )
+        irq_to_evtchn[i] = -1;
+
+    for ( i = 0; i < NR_DYNIRQS; i++ )
+    {
+        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+        irq_bindcount[dynirq_to_irq(i)] = 0;
+
+        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
+        irq_desc[dynirq_to_irq(i)].action  = 0;
+        irq_desc[dynirq_to_irq(i)].depth   = 1;
+        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
+    }
+
+    for ( i = 0; i < NR_PIRQS; i++ )
+    {
+        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+        irq_bindcount[pirq_to_irq(i)] = 1;
+
+        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
+        irq_desc[pirq_to_irq(i)].action  = 0;
+        irq_desc[pirq_to_irq(i)].depth   = 1;
+        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+    }
+
+    (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
+
+    /* This needs to be done early, but after the IRQ subsystem is alive. */
+    ctrl_if_init();
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/fixup.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/fixup.c
new file mode 100644 (file)
index 0000000..c6a9044
--- /dev/null
@@ -0,0 +1,79 @@
+/******************************************************************************
+ * fixup.c
+ * 
+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
+ * Used to avoid repeated slow emulation of common instructions used by the
+ * user-space TLS (Thread-Local Storage) libraries.
+ * 
+ * **** NOTE ****
+ *  Issues with the binary rewriting have caused it to be removed. Instead
+ *  we rely on Xen's emulator to boot the kernel, and then print a banner
+ *  message recommending that the user disables /lib/tls.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#define DP(_f) printk(KERN_ALERT "  " _f "\n")
+
+asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
+{
+    static unsigned long printed = 0;
+    int i;
+
+    if ( !test_and_set_bit(0, &printed) )
+    {
+        HYPERVISOR_vm_assist(VMASST_CMD_disable,
+                             VMASST_TYPE_4gb_segments_notify);
+
+        DP("");
+        DP("***************************************************************");
+        DP("***************************************************************");
+        DP("** WARNING: Currently emulating unsupported memory accesses  **");
+        DP("**          in /lib/tls libraries. The emulation is very     **");
+        DP("**          slow, and may not work correctly with all        **");
+        DP("**          programs (e.g., some may 'Segmentation fault').  **");
+        DP("**          TO ENSURE FULL PERFORMANCE AND CORRECT FUNCTION, **");
+        DP("**          YOU MUST EXECUTE THE FOLLOWING AS ROOT:          **");
+        DP("**          mv /lib/tls /lib/tls.disabled                    **");
+        DP("***************************************************************");
+        DP("***************************************************************");
+        DP("");
+
+        for ( i = 5; i > 0; i-- )
+        {
+            printk("Pausing... %d", i);
+            mdelay(1000);
+            printk("\b\b\b\b\b\b\b\b\b\b\b\b");
+        }
+        printk("Continuing...\n\n");
+    }
+}
+
+static int __init fixup_init(void)
+{
+    HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                         VMASST_TYPE_4gb_segments_notify);
+    return 0;
+}
+__initcall(fixup_init);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/gnttab.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/gnttab.c
new file mode 100644 (file)
index 0000000..af892ae
--- /dev/null
@@ -0,0 +1,163 @@
+/******************************************************************************
+ * gnttab.c
+ * 
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#include <asm-xen/gnttab.h>
+
+#ifndef set_fixmap_ma
+#define set_fixmap_ma set_fixmap
+#endif
+
+#if 1
+#define ASSERT(_p) \
+    if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
+    #_p , __LINE__, __FILE__); *(int*)0=0; }
+#else
+#define ASSERT(_p) ((void)0)
+#endif
+
+EXPORT_SYMBOL(gnttab_grant_foreign_access);
+EXPORT_SYMBOL(gnttab_end_foreign_access);
+EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
+EXPORT_SYMBOL(gnttab_end_foreign_transfer);
+
+#define NR_GRANT_REFS 512
+static grant_ref_t gnttab_free_list[NR_GRANT_REFS];
+static grant_ref_t gnttab_free_head;
+
+static grant_entry_t *shared;
+
+/*
+ * Lock-free grant-entry allocator
+ */
+
+static inline int
+get_free_entry(
+    void)
+{
+    grant_ref_t fh, nfh = gnttab_free_head;
+    do { if ( unlikely((fh = nfh) == NR_GRANT_REFS) ) return -1; }
+    while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh,
+                                    gnttab_free_list[fh])) != fh) );
+    return fh;
+}
+
+static inline void
+put_free_entry(
+    grant_ref_t ref)
+{
+    grant_ref_t fh, nfh = gnttab_free_head;
+    do { gnttab_free_list[ref] = fh = nfh; wmb(); }
+    while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh, ref)) != fh) );
+}
+
+/*
+ * Public grant-issuing interface functions
+ */
+
+int
+gnttab_grant_foreign_access(
+    domid_t domid, unsigned long frame, int readonly)
+{
+    int ref;
+    
+    if ( unlikely((ref = get_free_entry()) == -1) )
+        return -ENOSPC;
+
+    shared[ref].frame = frame;
+    shared[ref].domid = domid;
+    wmb();
+    shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
+
+    return ref;
+}
+
+void
+gnttab_end_foreign_access(
+    grant_ref_t ref, int readonly)
+{
+    u16 flags, nflags;
+
+    nflags = shared[ref].flags;
+    do {
+        if ( (flags = nflags) & (GTF_reading|GTF_writing) )
+            printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+    }
+    while ( (nflags = cmpxchg(&shared[ref].flags, flags, 0)) != flags );
+
+    put_free_entry(ref);
+}
+
+int
+gnttab_grant_foreign_transfer(
+    domid_t domid)
+{
+    int ref;
+
+    if ( unlikely((ref = get_free_entry()) == -1) )
+        return -ENOSPC;
+
+    shared[ref].frame = 0;
+    shared[ref].domid = domid;
+    wmb();
+    shared[ref].flags = GTF_accept_transfer;
+
+    return ref;
+}
+
+unsigned long
+gnttab_end_foreign_transfer(
+    grant_ref_t ref)
+{
+    unsigned long frame = 0;
+    u16           flags;
+
+    flags = shared[ref].flags;
+    ASSERT(flags == (GTF_accept_transfer | GTF_transfer_committed));
+
+    /*
+     * If a transfer is committed then wait for the frame address to appear.
+     * Otherwise invalidate the grant entry against future use.
+     */
+    if ( likely(flags != GTF_accept_transfer) ||
+         (cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
+        while ( unlikely((frame = shared[ref].frame) == 0) )
+            cpu_relax();
+
+    put_free_entry(ref);
+
+    return frame;
+}
+
+void __init gnttab_init(void)
+{
+    gnttab_setup_table_t setup;
+    unsigned long        frame;
+    int                  i;
+
+    for ( i = 0; i < NR_GRANT_REFS; i++ )
+        gnttab_free_list[i] = i + 1;
+
+    setup.dom        = DOMID_SELF;
+    setup.nr_frames  = 1;
+    setup.frame_list = &frame;
+    if ( HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0 )
+        BUG();
+    if ( setup.status != 0 )
+        BUG();
+
+    set_fixmap_ma(FIX_GNTTAB, frame << PAGE_SHIFT);
+    shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB);
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/process.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/process.c
new file mode 100644 (file)
index 0000000..3a0c620
--- /dev/null
@@ -0,0 +1,29 @@
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/platform.h>
+#include <linux/pm.h>
+#include <linux/rcupdate.h>
+
+extern int set_timeout_timer(void);
+
+void xen_cpu_idle (void)
+{
+       struct rcu_data *rdp = &__get_cpu_var(rcu_bh_data);
+
+       local_irq_disable();
+       if (need_resched() || rdp->curlist) {
+               local_irq_enable();
+               return;
+       }
+       if (set_timeout_timer() == 0) {
+               /* NB. Blocking reenable events in a race-free manner. */
+               HYPERVISOR_block();
+               return;
+       }
+       local_irq_enable();
+       HYPERVISOR_yield();
+}
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/reboot.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/reboot.c
new file mode 100644 (file)
index 0000000..9b9761e
--- /dev/null
@@ -0,0 +1,243 @@
+
+#define __KERNEL_SYSCALLS__
+static int errno;
+#include <linux/errno.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/unistd.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/hypervisor-ifs/dom0_ops.h>
+#include <asm-xen/suspend.h>
+#include <asm-xen/queues.h>
+
+void machine_restart(char * __unused)
+{
+       /* We really want to get pending console data out before we die. */
+       extern void xencons_force_flush(void);
+       xencons_force_flush();
+       HYPERVISOR_reboot();
+}
+
+void machine_halt(void)
+{
+       machine_power_off();
+}
+
+void machine_power_off(void)
+{
+       /* We really want to get pending console data out before we die. */
+       extern void xencons_force_flush(void);
+       xencons_force_flush();
+       HYPERVISOR_shutdown();
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+int reboot_thru_bios = 0;      /* for dmi_scan.c */
+EXPORT_SYMBOL(machine_restart);
+EXPORT_SYMBOL(machine_halt);
+EXPORT_SYMBOL(machine_power_off);
+#endif
+
+
+/******************************************************************************
+ * Stop/pickle callback handling.
+ */
+
+#include <asm/suspend.h>
+
+/* Ignore multiple shutdown requests. */
+static int shutting_down = -1;
+
+static void __do_suspend(void)
+{
+    int i, j;
+    suspend_record_t *suspend_record;
+
+    /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
+       /* XXX SMH: yes it would :-( */ 
+#ifdef CONFIG_XEN_BLKDEV_FRONTEND
+    extern void blkdev_suspend(void);
+    extern void blkdev_resume(void);
+#else
+#define blkdev_suspend() do{}while(0)
+#define blkdev_resume()  do{}while(0)
+#endif
+
+#ifdef CONFIG_XEN_NETDEV_FRONTEND
+    extern void netif_suspend(void);
+    extern void netif_resume(void);  
+#else
+#define netif_suspend() do{}while(0)
+#define netif_resume()  do{}while(0)
+#endif
+
+    extern void time_suspend(void);
+    extern void time_resume(void);
+    extern unsigned long max_pfn;
+    extern unsigned long *pfn_to_mfn_frame_list;
+
+    suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
+    if ( suspend_record == NULL )
+        goto out;
+
+    suspend_record->nr_pfns = max_pfn; /* final number of pfns */
+
+    __cli();
+
+    netif_suspend();
+
+    blkdev_suspend();
+
+    time_suspend();
+
+    ctrl_if_suspend();
+
+    irq_suspend();
+
+    HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+    clear_fixmap(FIX_SHARED_INFO);
+
+    memcpy(&suspend_record->resume_info, &xen_start_info, sizeof(xen_start_info));
+
+    HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
+
+    HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                        VMASST_TYPE_4gb_segments);
+#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
+    HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                        VMASST_TYPE_writable_pagetables);
+#endif
+
+    shutting_down = -1; 
+
+    memcpy(&xen_start_info, &suspend_record->resume_info, sizeof(xen_start_info));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+#else
+    set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+#endif
+
+    HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+
+    memset(empty_zero_page, 0, PAGE_SIZE);
+
+    for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+    {
+        pfn_to_mfn_frame_list[j] = 
+            virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+    }
+    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+        virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+    irq_resume();
+
+    ctrl_if_resume();
+
+    time_resume();
+
+    blkdev_resume();
+
+    netif_resume();
+
+    __sti();
+
+ out:
+    if ( suspend_record != NULL )
+        free_page((unsigned long)suspend_record);
+}
+
+static int shutdown_process(void *__unused)
+{
+    static char *envp[] = { "HOME=/", "TERM=linux", 
+                            "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+    static char *restart_argv[]  = { "/sbin/shutdown", "-r", "now", NULL };
+    static char *poweroff_argv[] = { "/sbin/halt",     "-p",        NULL };
+
+    extern asmlinkage long sys_reboot(int magic1, int magic2,
+                                      unsigned int cmd, void *arg);
+
+    daemonize(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+        "shutdown"
+#endif
+        );
+
+    switch ( shutting_down )
+    {
+    case CMSG_SHUTDOWN_POWEROFF:
+        if ( execve("/sbin/halt", poweroff_argv, envp) < 0 )
+        {
+            sys_reboot(LINUX_REBOOT_MAGIC1,
+                       LINUX_REBOOT_MAGIC2,
+                       LINUX_REBOOT_CMD_POWER_OFF,
+                       NULL);
+        }
+        break;
+
+    case CMSG_SHUTDOWN_REBOOT:
+        if ( execve("/sbin/shutdown", restart_argv, envp) < 0 )
+        {
+            sys_reboot(LINUX_REBOOT_MAGIC1,
+                       LINUX_REBOOT_MAGIC2,
+                       LINUX_REBOOT_CMD_RESTART,
+                       NULL);
+        }
+        break;
+    }
+
+    shutting_down = -1; /* could try again */
+
+    return 0;
+}
+
+static void __shutdown_handler(void *unused)
+{
+    int err;
+
+    if ( shutting_down != CMSG_SHUTDOWN_SUSPEND )
+    {
+        err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
+        if ( err < 0 )
+            printk(KERN_ALERT "Error creating shutdown process!\n");
+    }
+    else
+    {
+        __do_suspend();
+    }
+}
+
+static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
+{
+    static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
+
+    if ( (shutting_down == -1) &&
+         ((msg->subtype == CMSG_SHUTDOWN_POWEROFF) ||
+          (msg->subtype == CMSG_SHUTDOWN_REBOOT) ||
+          (msg->subtype == CMSG_SHUTDOWN_SUSPEND)) )
+    {
+        shutting_down = msg->subtype;
+        schedule_work(&shutdown_work);
+    }
+    else
+    {
+        printk("Ignore spurious shutdown request\n");
+    }
+
+    ctrl_if_send_response(msg);
+}
+
+static int __init setup_shutdown_event(void)
+{
+    ctrl_if_register_receiver(CMSG_SHUTDOWN, shutdown_handler, 0);
+    return 0;
+}
+
+__initcall(setup_shutdown_event);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/skbuff.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/skbuff.c
new file mode 100644 (file)
index 0000000..90274fd
--- /dev/null
@@ -0,0 +1,47 @@
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+EXPORT_SYMBOL(__dev_alloc_skb);
+
+/* Referenced in netback.c. */
+/*static*/ kmem_cache_t *skbuff_cachep;
+
+/* Size must be cacheline-aligned (alloc_skb uses SKB_DATA_ALIGN). */
+#define XEN_SKB_SIZE \
+    ((PAGE_SIZE - sizeof(struct skb_shared_info)) & ~(SMP_CACHE_BYTES - 1))
+
+struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
+{
+    struct sk_buff *skb;
+    skb = alloc_skb_from_cache(skbuff_cachep, length + 16, gfp_mask);
+    if ( likely(skb != NULL) )
+        skb_reserve(skb, 16);
+    return skb;
+}
+
+static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
+{
+    scrub_pages(buf, 1);
+}
+
+static int __init skbuff_init(void)
+{
+    skbuff_cachep = kmem_cache_create(
+        "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL);
+    return 0;
+}
+__initcall(skbuff_init);
diff --git a/linux-2.6.9-xen-sparse/arch/xen/kernel/xen_proc.c b/linux-2.6.9-xen-sparse/arch/xen/kernel/xen_proc.c
new file mode 100644 (file)
index 0000000..9c06dcd
--- /dev/null
@@ -0,0 +1,18 @@
+
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+static struct proc_dir_entry *xen_base;
+
+struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
+{
+    if ( xen_base == NULL )
+        if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
+            panic("Couldn't create /proc/xen");
+    return create_proc_entry(name, mode, xen_base);
+}
+
+void remove_xen_proc_entry(const char *name)
+{
+    remove_proc_entry(name, xen_base);
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/Makefile b/linux-2.6.9-xen-sparse/drivers/Makefile
new file mode 100644 (file)
index 0000000..6137e38
--- /dev/null
@@ -0,0 +1,62 @@
+#
+# Makefile for the Linux kernel device drivers.
+#
+# 15 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-$(CONFIG_PCI)              += pci/
+obj-$(CONFIG_PARISC)           += parisc/
+obj-y                          += video/
+obj-$(CONFIG_ACPI_BOOT)                += acpi/
+# PnP must come after ACPI since it will eventually need to check if acpi
+# was used and do nothing if so
+obj-$(CONFIG_PNP)              += pnp/
+
+# char/ comes before serial/ etc so that the VT console is the boot-time
+# default.
+obj-y                          += char/
+
+# i810fb depends on char/agp/
+obj-$(CONFIG_FB_I810)           += video/i810/
+
+# we also need input/serio early so serio bus is initialized by the time
+# serial drivers start registering their serio ports
+obj-$(CONFIG_SERIO)            += input/serio/
+obj-y                          += serial/
+obj-$(CONFIG_PARPORT)          += parport/
+obj-y                          += base/ block/ misc/ net/ media/
+obj-$(CONFIG_NUBUS)            += nubus/
+obj-$(CONFIG_ATM)              += atm/
+obj-$(CONFIG_PPC_PMAC)         += macintosh/
+obj-$(CONFIG_ARCH_XEN)         += xen/
+obj-$(CONFIG_IDE)              += ide/
+obj-$(CONFIG_FC4)              += fc4/
+obj-$(CONFIG_SCSI)             += scsi/
+obj-$(CONFIG_FUSION)           += message/
+obj-$(CONFIG_IEEE1394)         += ieee1394/
+obj-y                          += cdrom/
+obj-$(CONFIG_MTD)              += mtd/
+obj-$(CONFIG_PCMCIA)           += pcmcia/
+obj-$(CONFIG_DIO)              += dio/
+obj-$(CONFIG_SBUS)             += sbus/
+obj-$(CONFIG_ZORRO)            += zorro/
+obj-$(CONFIG_MAC)              += macintosh/
+obj-$(CONFIG_PARIDE)           += block/paride/
+obj-$(CONFIG_TC)               += tc/
+obj-$(CONFIG_USB)              += usb/
+obj-$(CONFIG_USB_GADGET)       += usb/gadget/
+obj-$(CONFIG_INPUT)            += input/
+obj-$(CONFIG_GAMEPORT)         += input/gameport/
+obj-$(CONFIG_I2O)              += message/
+obj-$(CONFIG_I2C)              += i2c/
+obj-$(CONFIG_W1)               += w1/
+obj-$(CONFIG_PHONE)            += telephony/
+obj-$(CONFIG_MD)               += md/
+obj-$(CONFIG_BT)               += bluetooth/
+obj-$(CONFIG_ISDN)             += isdn/
+obj-$(CONFIG_MCA)              += mca/
+obj-$(CONFIG_EISA)             += eisa/
+obj-$(CONFIG_CPU_FREQ)         += cpufreq/
+obj-$(CONFIG_MMC)              += mmc/
+obj-y                          += firmware/
diff --git a/linux-2.6.9-xen-sparse/drivers/char/mem.c b/linux-2.6.9-xen-sparse/drivers/char/mem.c
new file mode 100644 (file)
index 0000000..83f54c2
--- /dev/null
@@ -0,0 +1,753 @@
+/*
+ *  linux/drivers/char/mem.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Added devfs support. 
+ *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
+ *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/smp_lock.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_IA64
+# include <linux/efi.h>
+#endif
+
+#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
+extern void tapechar_init(void);
+#endif
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+static inline int uncached_access(struct file *file, unsigned long addr)
+{
+#if defined(__i386__)
+       /*
+        * On the PPro and successors, the MTRRs are used to set
+        * memory types for physical addresses outside main memory,
+        * so blindly setting PCD or PWT on those pages is wrong.
+        * For Pentiums and earlier, the surround logic should disable
+        * caching for the high addresses through the KEN pin, but
+        * we maintain the tradition of paranoia in this code.
+        */
+       if (file->f_flags & O_SYNC)
+               return 1;
+       return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
+                 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
+                 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
+                 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
+         && addr >= __pa(high_memory);
+#elif defined(__x86_64__)
+       /* 
+        * This is broken because it can generate memory type aliases,
+        * which can cause cache corruptions
+        * But it is only available for root and we have to be bug-to-bug
+        * compatible with i386.
+        */
+       if (file->f_flags & O_SYNC)
+               return 1;
+       /* same behaviour as i386. PAT always set to cached and MTRRs control the
+          caching behaviour. 
+          Hopefully a full PAT implementation will fix that soon. */      
+       return 0;
+#elif defined(CONFIG_IA64)
+       /*
+        * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
+        */
+       return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+#elif defined(CONFIG_PPC64)
+       /* On PPC64, we always do non-cacheable access to the IO hole and
+        * cacheable elsewhere. Cache paradox can checkstop the CPU and
+        * the high_memory heuristic below is wrong on machines with memory
+        * above the IO hole... Ah, and of course, XFree86 doesn't pass
+        * O_SYNC when mapping us to tap IO space. Surprised ?
+        */
+       return !page_is_ram(addr >> PAGE_SHIFT);
+#else
+       /*
+        * Accessing memory above the top the kernel knows about or through a file pointer
+        * that was marked O_SYNC will be done non-cached.
+        */
+       if (file->f_flags & O_SYNC)
+               return 1;
+       return addr >= __pa(high_memory);
+#endif
+}
+
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
+{
+       unsigned long end_mem;
+
+       end_mem = __pa(high_memory);
+       if (addr >= end_mem)
+               return 0;
+
+       if (*count > end_mem - addr)
+               *count = end_mem - addr;
+
+       return 1;
+}
+#endif
+
+static ssize_t do_write_mem(void *p, unsigned long realp,
+                           const char __user * buf, size_t count, loff_t *ppos)
+{
+       ssize_t written;
+       unsigned long copied;
+
+       written = 0;
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+       /* we don't have page 0 mapped on sparc and m68k.. */
+       if (realp < PAGE_SIZE) {
+               unsigned long sz = PAGE_SIZE-realp;
+               if (sz > count) sz = count; 
+               /* Hmm. Do something? */
+               buf+=sz;
+               p+=sz;
+               count-=sz;
+               written+=sz;
+       }
+#endif
+       copied = copy_from_user(p, buf, count);
+       if (copied) {
+               ssize_t ret = written + (count - copied);
+
+               if (ret)
+                       return ret;
+               return -EFAULT;
+       }
+       written += count;
+       *ppos += written;
+       return written;
+}
+
+
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the 
+ * memory location. 
+ */
+static ssize_t read_mem(struct file * file, char __user * buf,
+                       size_t count, loff_t *ppos)
+{
+       unsigned long p = *ppos;
+       ssize_t read;
+
+       if (!valid_phys_addr_range(p, &count))
+               return -EFAULT;
+       read = 0;
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+       /* we don't have page 0 mapped on sparc and m68k.. */
+       if (p < PAGE_SIZE) {
+               unsigned long sz = PAGE_SIZE-p;
+               if (sz > count) 
+                       sz = count; 
+               if (sz > 0) {
+                       if (clear_user(buf, sz))
+                               return -EFAULT;
+                       buf += sz; 
+                       p += sz; 
+                       count -= sz; 
+                       read += sz; 
+               }
+       }
+#endif
+       if (copy_to_user(buf, __va(p), count))
+               return -EFAULT;
+       read += count;
+       *ppos += read;
+       return read;
+}
+
+static ssize_t write_mem(struct file * file, const char __user * buf, 
+                        size_t count, loff_t *ppos)
+{
+       unsigned long p = *ppos;
+
+       if (!valid_phys_addr_range(p, &count))
+               return -EFAULT;
+       return do_write_mem(__va(p), p, buf, count, ppos);
+}
+
+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
+{
+#if !defined(CONFIG_XEN)
+       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       int uncached;
+
+       uncached = uncached_access(file, offset);
+#ifdef pgprot_noncached
+       if (uncached)
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+
+       /* Don't try to swap out physical pages.. */
+       vma->vm_flags |= VM_RESERVED;
+
+       /*
+        * Don't dump addresses that are not real memory to a core file.
+        */
+       if (uncached)
+               vma->vm_flags |= VM_IO;
+
+       if (remap_page_range(vma, vma->vm_start, offset, vma->vm_end-vma->vm_start,
+                            vma->vm_page_prot))
+               return -EAGAIN;
+       return 0;
+#elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
+       return -ENXIO;
+#else
+       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+       if (!(xen_start_info.flags & SIF_PRIVILEGED))
+               return -ENXIO;
+
+       /* Currently we're not smart about setting PTE cacheability. */
+       vma->vm_flags |= VM_RESERVED | VM_IO;
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset, 
+                               vma->vm_end-vma->vm_start, vma->vm_page_prot,
+                               DOMID_IO))
+               return -EAGAIN;
+       return 0;
+#endif
+}
+
+extern long vread(char *buf, char *addr, unsigned long count);
+extern long vwrite(char *buf, char *addr, unsigned long count);
+
+/*
+ * This function reads the *virtual* memory as seen by the kernel.
+ */
+static ssize_t read_kmem(struct file *file, char __user *buf, 
+                        size_t count, loff_t *ppos)
+{
+       unsigned long p = *ppos;
+       ssize_t read = 0;
+       ssize_t virtr = 0;
+       char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+               
+       if (p < (unsigned long) high_memory) {
+               read = count;
+               if (count > (unsigned long) high_memory - p)
+                       read = (unsigned long) high_memory - p;
+
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+               /* we don't have page 0 mapped on sparc and m68k.. */
+               if (p < PAGE_SIZE && read > 0) {
+                       size_t tmp = PAGE_SIZE - p;
+                       if (tmp > read) tmp = read;
+                       if (clear_user(buf, tmp))
+                               return -EFAULT;
+                       buf += tmp;
+                       p += tmp;
+                       read -= tmp;
+                       count -= tmp;
+               }
+#endif
+               if (copy_to_user(buf, (char *)p, read))
+                       return -EFAULT;
+               p += read;
+               buf += read;
+               count -= read;
+       }
+
+       if (count > 0) {
+               kbuf = (char *)__get_free_page(GFP_KERNEL);
+               if (!kbuf)
+                       return -ENOMEM;
+               while (count > 0) {
+                       int len = count;
+
+                       if (len > PAGE_SIZE)
+                               len = PAGE_SIZE;
+                       len = vread(kbuf, (char *)p, len);
+                       if (!len)
+                               break;
+                       if (copy_to_user(buf, kbuf, len)) {
+                               free_page((unsigned long)kbuf);
+                               return -EFAULT;
+                       }
+                       count -= len;
+                       buf += len;
+                       virtr += len;
+                       p += len;
+               }
+               free_page((unsigned long)kbuf);
+       }
+       *ppos = p;
+       return virtr + read;
+}
+
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file * file, const char __user * buf, 
+                         size_t count, loff_t *ppos)
+{
+       unsigned long p = *ppos;
+       ssize_t wrote = 0;
+       ssize_t virtr = 0;
+       ssize_t written;
+       char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
+       if (p < (unsigned long) high_memory) {
+
+               wrote = count;
+               if (count > (unsigned long) high_memory - p)
+                       wrote = (unsigned long) high_memory - p;
+
+               written = do_write_mem((void*)p, p, buf, wrote, ppos);
+               if (written != wrote)
+                       return written;
+               wrote = written;
+               p += wrote;
+               buf += wrote;
+               count -= wrote;
+       }
+
+       if (count > 0) {
+               kbuf = (char *)__get_free_page(GFP_KERNEL);
+               if (!kbuf)
+                       return wrote ? wrote : -ENOMEM;
+               while (count > 0) {
+                       int len = count;
+
+                       if (len > PAGE_SIZE)
+                               len = PAGE_SIZE;
+                       if (len) {
+                               written = copy_from_user(kbuf, buf, len);
+                               if (written) {
+                                       ssize_t ret;
+
+                                       free_page((unsigned long)kbuf);
+                                       ret = wrote + virtr + (len - written);
+                                       return ret ? ret : -EFAULT;
+                               }
+                       }
+                       len = vwrite(kbuf, (char *)p, len);
+                       count -= len;
+                       buf += len;
+                       virtr += len;
+                       p += len;
+               }
+               free_page((unsigned long)kbuf);
+       }
+
+       *ppos = p;
+       return virtr + wrote;
+}
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static ssize_t read_port(struct file * file, char __user * buf,
+                        size_t count, loff_t *ppos)
+{
+       unsigned long i = *ppos;
+       char __user *tmp = buf;
+
+       if (verify_area(VERIFY_WRITE,buf,count))
+               return -EFAULT; 
+       while (count-- > 0 && i < 65536) {
+               if (__put_user(inb(i),tmp) < 0) 
+                       return -EFAULT;  
+               i++;
+               tmp++;
+       }
+       *ppos = i;
+       return tmp-buf;
+}
+
+static ssize_t write_port(struct file * file, const char __user * buf,
+                         size_t count, loff_t *ppos)
+{
+       unsigned long i = *ppos;
+       const char __user * tmp = buf;
+
+       if (verify_area(VERIFY_READ,buf,count))
+               return -EFAULT;
+       while (count-- > 0 && i < 65536) {
+               char c;
+               if (__get_user(c, tmp)) 
+                       return -EFAULT; 
+               outb(c,i);
+               i++;
+               tmp++;
+       }
+       *ppos = i;
+       return tmp-buf;
+}
+#endif
+
+static ssize_t read_null(struct file * file, char __user * buf,
+                        size_t count, loff_t *ppos)
+{
+       return 0;
+}
+
+static ssize_t write_null(struct file * file, const char __user * buf,
+                         size_t count, loff_t *ppos)
+{
+       return count;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * For fun, we are using the MMU for this.
+ */
+static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
+{
+       struct mm_struct *mm;
+       struct vm_area_struct * vma;
+       unsigned long addr=(unsigned long)buf;
+
+       mm = current->mm;
+       /* Oops, this was forgotten before. -ben */
+       down_read(&mm->mmap_sem);
+
+       /* For private mappings, just map in zero pages. */
+       for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+               unsigned long count;
+
+               if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
+                       goto out_up;
+               if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
+                       break;
+               count = vma->vm_end - addr;
+               if (count > size)
+                       count = size;
+
+               zap_page_range(vma, addr, count, NULL);
+               zeromap_page_range(vma, addr, count, PAGE_COPY);
+
+               size -= count;
+               buf += count;
+               addr += count;
+               if (size == 0)
+                       goto out_up;
+       }
+
+       up_read(&mm->mmap_sem);
+       
+       /* The shared case is hard. Let's do the conventional zeroing. */ 
+       do {
+               unsigned long unwritten = clear_user(buf, PAGE_SIZE);
+               if (unwritten)
+                       return size + unwritten - PAGE_SIZE;
+               cond_resched();
+               buf += PAGE_SIZE;
+               size -= PAGE_SIZE;
+       } while (size);
+
+       return size;
+out_up:
+       up_read(&mm->mmap_sem);
+       return size;
+}
+
+static ssize_t read_zero(struct file * file, char __user * buf, 
+                        size_t count, loff_t *ppos)
+{
+       unsigned long left, unwritten, written = 0;
+
+       if (!count)
+               return 0;
+
+       if (!access_ok(VERIFY_WRITE, buf, count))
+               return -EFAULT;
+
+       left = count;
+
+       /* do we want to be clever? Arbitrary cut-off */
+       if (count >= PAGE_SIZE*4) {
+               unsigned long partial;
+
+               /* How much left of the page? */
+               partial = (PAGE_SIZE-1) & -(unsigned long) buf;
+               unwritten = clear_user(buf, partial);
+               written = partial - unwritten;
+               if (unwritten)
+                       goto out;
+               left -= partial;
+               buf += partial;
+               unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
+               written += (left & PAGE_MASK) - unwritten;
+               if (unwritten)
+                       goto out;
+               buf += left & PAGE_MASK;
+               left &= ~PAGE_MASK;
+       }
+       unwritten = clear_user(buf, left);
+       written += left - unwritten;
+out:
+       return written ? written : -EFAULT;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+       if (vma->vm_flags & VM_SHARED)
+               return shmem_zero_setup(vma);
+       if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
+               return -EAGAIN;
+       return 0;
+}
+#else /* CONFIG_MMU */
+static ssize_t read_zero(struct file * file, char * buf, 
+                        size_t count, loff_t *ppos)
+{
+       size_t todo = count;
+
+       while (todo) {
+               size_t chunk = todo;
+
+               if (chunk > 4096)
+                       chunk = 4096;   /* Just for latency reasons */
+               if (clear_user(buf, chunk))
+                       return -EFAULT;
+               buf += chunk;
+               todo -= chunk;
+               cond_resched();
+       }
+       return count;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_MMU */
+
+static ssize_t write_full(struct file * file, const char __user * buf,
+                         size_t count, loff_t *ppos)
+{
+       return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
+ * can fopen() both devices with "a" now.  This was previously impossible.
+ * -- SRB.
+ */
+
+static loff_t null_lseek(struct file * file, loff_t offset, int orig)
+{
+       return file->f_pos = 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
+{
+       loff_t ret;
+
+       down(&file->f_dentry->d_inode->i_sem);
+       switch (orig) {
+               case 0:
+                       file->f_pos = offset;
+                       ret = file->f_pos;
+                       force_successful_syscall_return();
+                       break;
+               case 1:
+                       file->f_pos += offset;
+                       ret = file->f_pos;
+                       force_successful_syscall_return();
+                       break;
+               default:
+                       ret = -EINVAL;
+       }
+       up(&file->f_dentry->d_inode->i_sem);
+       return ret;
+}
+
+static int open_port(struct inode * inode, struct file * filp)
+{
+       return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+#define mmap_kmem      mmap_mem
+#define zero_lseek     null_lseek
+#define full_lseek      null_lseek
+#define write_zero     write_null
+#define read_full       read_zero
+#define open_mem       open_port
+#define open_kmem      open_mem
+
+static struct file_operations mem_fops = {
+       .llseek         = memory_lseek,
+       .read           = read_mem,
+       .write          = write_mem,
+       .mmap           = mmap_mem,
+       .open           = open_mem,
+};
+
+static struct file_operations kmem_fops = {
+       .llseek         = memory_lseek,
+       .read           = read_kmem,
+       .write          = write_kmem,
+       .mmap           = mmap_kmem,
+       .open           = open_kmem,
+};
+
+static struct file_operations null_fops = {
+       .llseek         = null_lseek,
+       .read           = read_null,
+       .write          = write_null,
+};
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static struct file_operations port_fops = {
+       .llseek         = memory_lseek,
+       .read           = read_port,
+       .write          = write_port,
+       .open           = open_port,
+};
+#endif
+
+static struct file_operations zero_fops = {
+       .llseek         = zero_lseek,
+       .read           = read_zero,
+       .write          = write_zero,
+       .mmap           = mmap_zero,
+};
+
+static struct file_operations full_fops = {
+       .llseek         = full_lseek,
+       .read           = read_full,
+       .write          = write_full,
+};
+
+static ssize_t kmsg_write(struct file * file, const char __user * buf,
+                         size_t count, loff_t *ppos)
+{
+       char *tmp;
+       int ret;
+
+       tmp = kmalloc(count + 1, GFP_KERNEL);
+       if (tmp == NULL)
+               return -ENOMEM;
+       ret = -EFAULT;
+       if (!copy_from_user(tmp, buf, count)) {
+               tmp[count] = 0;
+               ret = printk("%s", tmp);
+       }
+       kfree(tmp);
+       return ret;
+}
+
+static struct file_operations kmsg_fops = {
+       .write =        kmsg_write,
+};
+
+static int memory_open(struct inode * inode, struct file * filp)
+{
+       switch (iminor(inode)) {
+               case 1:
+                       filp->f_op = &mem_fops;
+                       break;
+               case 2:
+                       filp->f_op = &kmem_fops;
+                       break;
+               case 3:
+                       filp->f_op = &null_fops;
+                       break;
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+               case 4:
+                       filp->f_op = &port_fops;
+                       break;
+#endif
+               case 5:
+                       filp->f_op = &zero_fops;
+                       break;
+               case 7:
+                       filp->f_op = &full_fops;
+                       break;
+               case 8:
+                       filp->f_op = &random_fops;
+                       break;
+               case 9:
+                       filp->f_op = &urandom_fops;
+                       break;
+               case 11:
+                       filp->f_op = &kmsg_fops;
+                       break;
+               default:
+                       return -ENXIO;
+       }
+       if (filp->f_op && filp->f_op->open)
+               return filp->f_op->open(inode,filp);
+       return 0;
+}
+
+static struct file_operations memory_fops = {
+       .open           = memory_open,  /* just a selector for the real open */
+};
+
+static const struct {
+       unsigned int            minor;
+       char                    *name;
+       umode_t                 mode;
+       struct file_operations  *fops;
+} devlist[] = { /* list of minor devices */
+       {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+       {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+       {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+       {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
+#endif
+       {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
+       {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
+       {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
+       {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
+       {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
+};
+
+static struct class_simple *mem_class;
+
+static int __init chr_dev_init(void)
+{
+       int i;
+
+       if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
+               printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+
+       mem_class = class_simple_create(THIS_MODULE, "mem");
+       for (i = 0; i < ARRAY_SIZE(devlist); i++) {
+               class_simple_device_add(mem_class,
+                                       MKDEV(MEM_MAJOR, devlist[i].minor),
+                                       NULL, devlist[i].name);
+               devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
+                               S_IFCHR | devlist[i].mode, devlist[i].name);
+       }
+       
+       return 0;
+}
+
+fs_initcall(chr_dev_init);
diff --git a/linux-2.6.9-xen-sparse/drivers/char/tty_io.c b/linux-2.6.9-xen-sparse/drivers/char/tty_io.c
new file mode 100644 (file)
index 0000000..c524a5c
--- /dev/null
@@ -0,0 +1,2955 @@
+/*
+ *  linux/drivers/char/tty_io.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures.  Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time.  Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c).  This
+ * makes for cleaner and more compact code.  -TYT, 9/17/92 
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling.  No delays, but all
+ * other bits should be there.
+ *     -- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ *     -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ *     -- ctm@ardi.com, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ *     -- mj@k332.feld.cvut.cz, 19-Nov-95
+ * 
+ * Restrict vt switching via ioctl()
+ *      -- grif@cs.ucr.edu, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ *     -- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
+ *
+ * Rewrote init_dev and release_dev to eliminate races.
+ *     -- Bill Hawes <whawes@star.net>, June 97
+ *
+ * Added devfs support.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ *      -- Russell King <rmk@arm.linux.org.uk>
+ *
+ * Move do_SAK() into process context.  Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc() -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/wait.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <linux/kmod.h>
+
+#undef TTY_DEBUG_HANGUP
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct termios tty_std_termios = {     /* for the benefit of tty drivers  */
+       .c_iflag = ICRNL | IXON,
+       .c_oflag = OPOST | ONLCR,
+       .c_cflag = B38400 | CS8 | CREAD | HUPCL,
+       .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+                  ECHOCTL | ECHOKE | IEXTEN,
+       .c_cc = INIT_C_CC
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+   could do with some rationalisation such as pulling the tty proc function
+   into this file */
+   
+LIST_HEAD(tty_drivers);                        /* linked list of tty drivers */
+
+/* Semaphore to protect creating and releasing a tty. This is shared with
+   vt.c for deeply disgusting hack reasons */
+DECLARE_MUTEX(tty_sem);
+
+#ifdef CONFIG_VT
+int console_use_vt = 1;
+#endif
+#ifdef CONFIG_UNIX98_PTYS
+extern struct tty_driver *ptm_driver;  /* Unix98 pty masters; for /dev/ptmx */
+extern int pty_limit;          /* Config limit on Unix98 ptys */
+static DEFINE_IDR(allocated_ptys);
+static DECLARE_MUTEX(allocated_ptys_lock);
+#endif
+
+extern void disable_early_printk(void);
+
+static void initialize_tty_struct(struct tty_struct *tty);
+
+static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *);
+static unsigned int tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+static int ptmx_open(struct inode *, struct file *);
+static int tty_release(struct inode *, struct file *);
+int tty_ioctl(struct inode * inode, struct file * file,
+             unsigned int cmd, unsigned long arg);
+static int tty_fasync(int fd, struct file * filp, int on);
+extern void rs_360_init(void);
+static void release_mem(struct tty_struct *tty, int idx);
+
+
+static struct tty_struct *alloc_tty_struct(void)
+{
+       struct tty_struct *tty;
+
+       tty = kmalloc(sizeof(struct tty_struct), GFP_KERNEL);
+       if (tty)
+               memset(tty, 0, sizeof(struct tty_struct));
+       return tty;
+}
+
+static inline void free_tty_struct(struct tty_struct *tty)
+{
+       kfree(tty);
+}
+
+#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
+
+char *tty_name(struct tty_struct *tty, char *buf)
+{
+       if (!tty) /* Hmm.  NULL pointer.  That's fun. */
+               strcpy(buf, "NULL tty");
+       else
+               strcpy(buf, tty->name);
+       return buf;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+inline int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+                             const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+       if (!tty) {
+               printk(KERN_WARNING
+                       "null TTY for (%d:%d) in %s\n",
+                       imajor(inode), iminor(inode), routine);
+               return 1;
+       }
+       if (tty->magic != TTY_MAGIC) {
+               printk(KERN_WARNING
+                       "bad magic number for tty struct (%d:%d) in %s\n",
+                       imajor(inode), iminor(inode), routine);
+               return 1;
+       }
+#endif
+       return 0;
+}
+
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+       struct list_head *p;
+       int count = 0;
+       
+       file_list_lock();
+       list_for_each(p, &tty->tty_files) {
+               count++;
+       }
+       file_list_unlock();
+       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+           tty->driver->subtype == PTY_TYPE_SLAVE &&
+           tty->link && tty->link->count)
+               count++;
+       if (tty->count != count) {
+               printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
+                                   "!= #fd's(%d) in %s\n",
+                      tty->name, tty->count, count, routine);
+               return count;
+       }       
+#endif
+       return 0;
+}
+
+/*
+ *     This is probably overkill for real world processors but
+ *     they are not on hot paths so a little discipline won't do 
+ *     any harm.
+ */
+static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+{
+       down(&tty->termios_sem);
+       tty->termios->c_line = num;
+       up(&tty->termios_sem);
+}
+
+/*
+ *     This guards the refcounted line discipline lists. The lock
+ *     must be taken with irqs off because there are hangup path
+ *     callers who will do ldisc lookups and cannot sleep.
+ */
+static spinlock_t tty_ldisc_lock = SPIN_LOCK_UNLOCKED;
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table     */
+
+int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
+{
+       unsigned long flags;
+       int ret = 0;
+       
+       if (disc < N_TTY || disc >= NR_LDISCS)
+               return -EINVAL;
+       
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+       if (new_ldisc) {
+               tty_ldiscs[disc] = *new_ldisc;
+               tty_ldiscs[disc].num = disc;
+               tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED;
+               tty_ldiscs[disc].refcount = 0;
+       } else {
+               if(tty_ldiscs[disc].refcount)
+                       ret = -EBUSY;
+               else
+                       tty_ldiscs[disc].flags &= ~LDISC_FLAG_DEFINED;
+       }
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+       
+       return ret;
+}
+
+EXPORT_SYMBOL(tty_register_ldisc);
+
+struct tty_ldisc *tty_ldisc_get(int disc)
+{
+       unsigned long flags;
+       struct tty_ldisc *ld;
+
+       if (disc < N_TTY || disc >= NR_LDISCS)
+               return NULL;
+       
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+
+       ld = &tty_ldiscs[disc];
+       /* Check the entry is defined */
+       if(ld->flags & LDISC_FLAG_DEFINED)
+       {
+               /* If the module is being unloaded we can't use it */
+               if (!try_module_get(ld->owner))
+                       ld = NULL;
+               else /* lock it */
+                       ld->refcount++;
+       }
+       else
+               ld = NULL;
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+       return ld;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_get);
+
+void tty_ldisc_put(int disc)
+{
+       struct tty_ldisc *ld;
+       unsigned long flags;
+       
+       if (disc < N_TTY || disc >= NR_LDISCS)
+               BUG();
+               
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+       ld = &tty_ldiscs[disc];
+       if(ld->refcount == 0)
+               BUG();
+       ld->refcount --;
+       module_put(ld->owner);
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+       
+EXPORT_SYMBOL_GPL(tty_ldisc_put);
+
+void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+       tty->ldisc = *ld;
+       tty->ldisc.refcount = 0;
+}
+
+/**
+ *     tty_ldisc_try           -       internal helper
+ *     @tty: the tty
+ *
+ *     Make a single attempt to grab and bump the refcount on
+ *     the tty ldisc. Return 0 on failure or 1 on success. This is
+ *     used to implement both the waiting and non waiting versions
+ *     of tty_ldisc_ref
+ */
+
+static int tty_ldisc_try(struct tty_struct *tty)
+{
+       unsigned long flags;
+       struct tty_ldisc *ld;
+       int ret = 0;
+       
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+       ld = &tty->ldisc;
+       if(test_bit(TTY_LDISC, &tty->flags))
+       {
+               ld->refcount++;
+               ret = 1;
+       }
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+       return ret;
+}
+
+/**
+ *     tty_ldisc_ref_wait      -       wait for the tty ldisc
+ *     @tty: tty device
+ *
+ *     Dereference the line discipline for the terminal and take a 
+ *     reference to it. If the line discipline is in flux then 
+ *     wait patiently until it changes.
+ *
+ *     Note: Must not be called from an IRQ/timer context. The caller
+ *     must also be careful not to hold other locks that will deadlock
+ *     against a discipline change, such as an existing ldisc reference
+ *     (which we check for)
+ */
+struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
+{
+       /* wait_event is a macro */
+       wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
+       if(tty->ldisc.refcount == 0)
+               printk(KERN_ERR "tty_ldisc_ref_wait\n");
+       return &tty->ldisc;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
+
+/**
+ *     tty_ldisc_ref           -       get the tty ldisc
+ *     @tty: tty device
+ *
+ *     Dereference the line discipline for the terminal and take a 
+ *     reference to it. If the line discipline is in flux then 
+ *     return NULL. Can be called from IRQ and timer functions.
+ */
+struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
+{
+       if(tty_ldisc_try(tty))
+               return &tty->ldisc;
+       return NULL;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_ref);
+
+/**
+ *     tty_ldisc_deref         -       free a tty ldisc reference
+ *     @ld: reference to free up
+ *
+ *     Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
+ *     be called in IRQ context.
+ */
+void tty_ldisc_deref(struct tty_ldisc *ld)
+{
+       unsigned long flags;
+
+       if(ld == NULL)
+               BUG();
+               
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+       if(ld->refcount == 0)
+               printk(KERN_ERR "tty_ldisc_deref: no references.\n");
+       else
+               ld->refcount--;
+       if(ld->refcount == 0)
+               wake_up(&tty_ldisc_wait);
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_deref);
+
+/**
+ *     tty_ldisc_enable        -       allow ldisc use
+ *     @tty: terminal to activate ldisc on
+ *
+ *     Set the TTY_LDISC flag when the line discipline can be called
+ *     again. Do neccessary wakeups for existing sleepers.
+ *
+ *     Note: nobody should set this bit except via this function. Clearing
+ *     directly is allowed.
+ */
+
+static void tty_ldisc_enable(struct tty_struct *tty)
+{
+       set_bit(TTY_LDISC, &tty->flags);
+       wake_up(&tty_ldisc_wait);
+}
+       
+/**
+ *     tty_set_ldisc           -       set line discipline
+ *     @tty: the terminal to set
+ *     @ldisc: the line discipline
+ *
+ *     Set the discipline of a tty line. Must be called from a process
+ *     context.
+ */
+static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+{
+       int     retval = 0;
+       struct  tty_ldisc o_ldisc;
+       char buf[64];
+       int work;
+       unsigned long flags;
+       struct tty_ldisc *ld;
+
+       if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
+               return -EINVAL;
+
+restart:
+
+       if (tty->ldisc.num == ldisc)
+               return 0;       /* We are already in the desired discipline */
+       
+       ld = tty_ldisc_get(ldisc);
+       /* Eduardo Blanco <ejbs@cs.cs.com.uy> */
+       /* Cyrus Durgin <cider@speakeasy.org> */
+       if (ld == NULL) {
+               request_module("tty-ldisc-%d", ldisc);
+               ld = tty_ldisc_get(ldisc);
+       }
+       if (ld == NULL)
+               return -EINVAL;
+
+       o_ldisc = tty->ldisc;
+
+       tty_wait_until_sent(tty, 0);
+
+       /*
+        *      Make sure we don't change while someone holds a
+        *      reference to the line discipline. The TTY_LDISC bit
+        *      prevents anyone taking a reference once it is clear.
+        *      We need the lock to avoid racing reference takers.
+        */
+        
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+       if(tty->ldisc.refcount)
+       {
+               /* Free the new ldisc we grabbed. Must drop the lock
+                  first. */
+               spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+               tty_ldisc_put(ldisc);
+               /*
+                * There are several reasons we may be busy, including
+                * random momentary I/O traffic. We must therefore
+                * retry. We could distinguish between blocking ops
+                * and retries if we made tty_ldisc_wait() smarter. That
+                * is up for discussion.
+                */
+               if(wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
+                       return -ERESTARTSYS;                    
+               goto restart;
+       }
+       clear_bit(TTY_LDISC, &tty->flags);      
+       clear_bit(TTY_DONT_FLIP, &tty->flags);
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+       
+       /*
+        *      From this point on we know nobody has an ldisc
+        *      usage reference, nor can they obtain one until
+        *      we say so later on.
+        */
+        
+       work = cancel_delayed_work(&tty->flip.work);
+       /*
+        * Wait for ->hangup_work and ->flip.work handlers to terminate
+        */
+        
+       flush_scheduled_work();
+       /* Shutdown the current discipline. */
+       if (tty->ldisc.close)
+               (tty->ldisc.close)(tty);
+
+       /* Now set up the new line discipline. */
+       tty_ldisc_assign(tty, ld);
+       tty_set_termios_ldisc(tty, ldisc);
+       if (tty->ldisc.open)
+               retval = (tty->ldisc.open)(tty);
+       if (retval < 0) {
+               tty_ldisc_put(ldisc);
+               /* There is an outstanding reference here so this is safe */
+               tty_ldisc_assign(tty, tty_ldisc_get(o_ldisc.num));
+               tty_set_termios_ldisc(tty, tty->ldisc.num);
+               if (tty->ldisc.open && (tty->ldisc.open(tty) < 0)) {
+                       tty_ldisc_put(o_ldisc.num);
+                       /* This driver is always present */
+                       tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+                       tty_set_termios_ldisc(tty, N_TTY);
+                       if (tty->ldisc.open) {
+                               int r = tty->ldisc.open(tty);
+
+                               if (r < 0)
+                                       panic("Couldn't open N_TTY ldisc for "
+                                             "%s --- error %d.",
+                                             tty_name(tty, buf), r);
+                       }
+               }
+       }
+       /* At this point we hold a reference to the new ldisc and a
+          a reference to the old ldisc. If we ended up flipping back
+          to the existing ldisc we have two references to it */
+       
+       if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc)
+               tty->driver->set_ldisc(tty);
+               
+       tty_ldisc_put(o_ldisc.num);
+       
+       /*
+        *      Allow ldisc referencing to occur as soon as the driver
+        *      ldisc callback completes.
+        */
+        
+       tty_ldisc_enable(tty);
+       
+       /* Restart it in case no characters kick it off. Safe if
+          already running */
+       if(work)
+               schedule_delayed_work(&tty->flip.work, 1);
+       return retval;
+}
+
+/*
+ * This routine returns a tty driver structure, given a device number
+ */
+struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+       struct tty_driver *p;
+
+       list_for_each_entry(p, &tty_drivers, tty_drivers) {
+               dev_t base = MKDEV(p->major, p->minor_start);
+               if (device < base || device >= base + p->num)
+                       continue;
+               *index = device - base;
+               return p;
+       }
+       return NULL;
+}
+
+/*
+ * If we try to write to, or set the state of, a terminal and we're
+ * not in the foreground, send a SIGTTOU.  If the signal is blocked or
+ * ignored, go ahead and perform the operation.  (POSIX 7.2)
+ */
+int tty_check_change(struct tty_struct * tty)
+{
+       if (current->signal->tty != tty)
+               return 0;
+       if (tty->pgrp <= 0) {
+               printk(KERN_WARNING "tty_check_change: tty->pgrp <= 0!\n");
+               return 0;
+       }
+       if (process_group(current) == tty->pgrp)
+               return 0;
+       if (is_ignored(SIGTTOU))
+               return 0;
+       if (is_orphaned_pgrp(process_group(current)))
+               return -EIO;
+       (void) kill_pg(process_group(current), SIGTTOU, 1);
+       return -ERESTARTSYS;
+}
+
+EXPORT_SYMBOL(tty_check_change);
+
+static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
+                               size_t count, loff_t *ppos)
+{
+       return 0;
+}
+
+static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
+                                size_t count, loff_t *ppos)
+{
+       return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait)
+{
+       return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
+}
+
+static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
+                            unsigned int cmd, unsigned long arg)
+{
+       return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static struct file_operations tty_fops = {
+       .llseek         = no_llseek,
+       .read           = tty_read,
+       .write          = tty_write,
+       .poll           = tty_poll,
+       .ioctl          = tty_ioctl,
+       .open           = tty_open,
+       .release        = tty_release,
+       .fasync         = tty_fasync,
+};
+
+#ifdef CONFIG_UNIX98_PTYS
+static struct file_operations ptmx_fops = {
+       .llseek         = no_llseek,
+       .read           = tty_read,
+       .write          = tty_write,
+       .poll           = tty_poll,
+       .ioctl          = tty_ioctl,
+       .open           = ptmx_open,
+       .release        = tty_release,
+       .fasync         = tty_fasync,
+};
+#endif
+
+static struct file_operations console_fops = {
+       .llseek         = no_llseek,
+       .read           = tty_read,
+       .write          = redirected_tty_write,
+       .poll           = tty_poll,
+       .ioctl          = tty_ioctl,
+       .open           = tty_open,
+       .release        = tty_release,
+       .fasync         = tty_fasync,
+};
+
+static struct file_operations hung_up_tty_fops = {
+       .llseek         = no_llseek,
+       .read           = hung_up_tty_read,
+       .write          = hung_up_tty_write,
+       .poll           = hung_up_tty_poll,
+       .ioctl          = hung_up_tty_ioctl,
+       .release        = tty_release,
+};
+
+static spinlock_t redirect_lock = SPIN_LOCK_UNLOCKED;
+static struct file *redirect;
+
+/**
+ *     tty_wakeup      -       request more data
+ *     @tty: terminal
+ *
+ *     Internal and external helper for wakeups of tty. This function
+ *     informs the line discipline if present that the driver is ready
+ *     to receive more output data.
+ */
+void tty_wakeup(struct tty_struct *tty)
+{
+       struct tty_ldisc *ld;
+       
+       if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+               ld = tty_ldisc_ref(tty);
+               if(ld) {
+                       if(ld->write_wakeup)
+                               ld->write_wakeup(tty);
+                       tty_ldisc_deref(ld);
+               }
+       }
+       wake_up_interruptible(&tty->write_wait);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ *     tty_ldisc_flush -       flush line discipline queue
+ *     @tty: tty
+ *
+ *     Flush the line discipline queue (if any) for this tty. If there
+ *     is no line discipline active this is a no-op.
+ */
+void tty_ldisc_flush(struct tty_struct *tty)
+{
+       struct tty_ldisc *ld = tty_ldisc_ref(tty);
+       if(ld) {
+               if(ld->flush_buffer)
+                       ld->flush_buffer(tty);
+               tty_ldisc_deref(ld);
+       }
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_flush);
+       
+/*
+ * This can be called by the "eventd" kernel thread.  That is process synchronous,
+ * but doesn't hold any locks, so we need to make sure we have the appropriate
+ * locks for what we're doing..
+ */
+void do_tty_hangup(void *data)
+{
+       struct tty_struct *tty = (struct tty_struct *) data;
+       struct file * cons_filp = NULL;
+       struct file *filp, *f = NULL;
+       struct task_struct *p;
+       struct tty_ldisc *ld;
+       int    closecount = 0, n;
+
+       if (!tty)
+               return;
+
+       /* inuse_filps is protected by the single kernel lock */
+       lock_kernel();
+
+       spin_lock(&redirect_lock);
+       if (redirect && redirect->private_data == tty) {
+               f = redirect;
+               redirect = NULL;
+       }
+       spin_unlock(&redirect_lock);
+       
+       check_tty_count(tty, "do_tty_hangup");
+       file_list_lock();
+       /* This breaks for file handles being sent over AF_UNIX sockets ? */
+       list_for_each_entry(filp, &tty->tty_files, f_list) {
+               if (filp->f_op->write == redirected_tty_write)
+                       cons_filp = filp;
+               if (filp->f_op->write != tty_write)
+                       continue;
+               closecount++;
+               tty_fasync(-1, filp, 0);        /* can't block */
+               filp->f_op = &hung_up_tty_fops;
+       }
+       file_list_unlock();
+       
+       /* FIXME! What are the locking issues here? This may me overdoing things..
+        * this question is especially important now that we've removed the irqlock. */
+
+       ld = tty_ldisc_ref(tty);
+       if(ld != NULL)  /* We may have no line discipline at this point */
+       {
+               if (ld->flush_buffer)
+                       ld->flush_buffer(tty);
+               if (tty->driver->flush_buffer)
+                       tty->driver->flush_buffer(tty);
+               if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
+                   ld->write_wakeup)
+                       ld->write_wakeup(tty);
+               if (ld->hangup)
+                       ld->hangup(tty);
+       }
+
+       /* FIXME: Once we trust the LDISC code better we can wait here for
+          ldisc completion and fix the driver call race */
+          
+       wake_up_interruptible(&tty->write_wait);
+       wake_up_interruptible(&tty->read_wait);
+
+       /*
+        * Shutdown the current line discipline, and reset it to
+        * N_TTY.
+        */
+       if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+       {
+               down(&tty->termios_sem);
+               *tty->termios = tty->driver->init_termios;
+               up(&tty->termios_sem);
+       }
+       
+       /* Defer ldisc switch */
+       /* tty_deferred_ldisc_switch(N_TTY);
+       
+         This should get done automatically when the port closes and
+         tty_release is called */
+       
+       read_lock(&tasklist_lock);
+       if (tty->session > 0) {
+               do_each_task_pid(tty->session, PIDTYPE_SID, p) {
+                       if (p->signal->tty == tty)
+                               p->signal->tty = NULL;
+                       if (!p->signal->leader)
+                               continue;
+                       send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p);
+                       send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+                       if (tty->pgrp > 0)
+                               p->signal->tty_old_pgrp = tty->pgrp;
+               } while_each_task_pid(tty->session, PIDTYPE_SID, p);
+       }
+       read_unlock(&tasklist_lock);
+
+       tty->flags = 0;
+       tty->session = 0;
+       tty->pgrp = -1;
+       tty->ctrl_status = 0;
+       /*
+        *      If one of the devices matches a console pointer, we
+        *      cannot just call hangup() because that will cause
+        *      tty->count and state->count to go out of sync.
+        *      So we just call close() the right number of times.
+        */
+       if (cons_filp) {
+               if (tty->driver->close)
+                       for (n = 0; n < closecount; n++)
+                               tty->driver->close(tty, cons_filp);
+       } else if (tty->driver->hangup)
+               (tty->driver->hangup)(tty);
+               
+       /* We don't want to have driver/ldisc interactions beyond
+          the ones we did here. The driver layer expects no
+          calls after ->hangup() from the ldisc side. However we
+          can't yet guarantee all that */
+
+       set_bit(TTY_HUPPED, &tty->flags);
+       if (ld) {
+               tty_ldisc_enable(tty);
+               tty_ldisc_deref(ld);
+       }
+       unlock_kernel();
+       if (f)
+               fput(f);
+}
+
+void tty_hangup(struct tty_struct * tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+       char    buf[64];
+       
+       printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
+#endif
+       schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+void tty_vhangup(struct tty_struct * tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+       char    buf[64];
+
+       printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
+#endif
+       do_tty_hangup((void *) tty);
+}
+EXPORT_SYMBOL(tty_vhangup);
+
+int tty_hung_up_p(struct file * filp)
+{
+       return (filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+/*
+ * This function is typically called only by the session leader, when
+ * it wants to disassociate itself from its controlling tty.
+ *
+ * It performs the following functions:
+ *     (1)  Sends a SIGHUP and SIGCONT to the foreground process group
+ *     (2)  Clears the tty from being controlling the session
+ *     (3)  Clears the controlling tty for all processes in the
+ *             session group.
+ *
+ * The argument on_exit is set to 1 if called when a process is
+ * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ */
+void disassociate_ctty(int on_exit)
+{
+       struct tty_struct *tty;
+       struct task_struct *p;
+       int tty_pgrp = -1;
+
+       lock_kernel();
+
+       tty = current->signal->tty;
+       if (tty) {
+               tty_pgrp = tty->pgrp;
+               if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
+                       tty_vhangup(tty);
+       } else {
+               if (current->signal->tty_old_pgrp) {
+                       kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit);
+                       kill_pg(current->signal->tty_old_pgrp, SIGCONT, on_exit);
+               }
+               unlock_kernel();        
+               return;
+       }
+       if (tty_pgrp > 0) {
+               kill_pg(tty_pgrp, SIGHUP, on_exit);
+               if (!on_exit)
+                       kill_pg(tty_pgrp, SIGCONT, on_exit);
+       }
+
+       current->signal->tty_old_pgrp = 0;
+       tty->session = 0;
+       tty->pgrp = -1;
+
+       read_lock(&tasklist_lock);
+       do_each_task_pid(current->signal->session, PIDTYPE_SID, p) {
+               p->signal->tty = NULL;
+       } while_each_task_pid(current->signal->session, PIDTYPE_SID, p);
+       read_unlock(&tasklist_lock);
+       unlock_kernel();
+}
+
+void stop_tty(struct tty_struct *tty)
+{
+       if (tty->stopped)
+               return;
+       tty->stopped = 1;
+       if (tty->link && tty->link->packet) {
+               tty->ctrl_status &= ~TIOCPKT_START;
+               tty->ctrl_status |= TIOCPKT_STOP;
+               wake_up_interruptible(&tty->link->read_wait);
+       }
+       if (tty->driver->stop)
+               (tty->driver->stop)(tty);
+}
+
+EXPORT_SYMBOL(stop_tty);
+
+void start_tty(struct tty_struct *tty)
+{
+       if (!tty->stopped || tty->flow_stopped)
+               return;
+       tty->stopped = 0;
+       if (tty->link && tty->link->packet) {
+               tty->ctrl_status &= ~TIOCPKT_STOP;
+               tty->ctrl_status |= TIOCPKT_START;
+               wake_up_interruptible(&tty->link->read_wait);
+       }
+       if (tty->driver->start)
+               (tty->driver->start)(tty);
+
+       /* If we have a running line discipline it may need kicking */
+       tty_wakeup(tty);
+       wake_up_interruptible(&tty->write_wait);
+}
+
+EXPORT_SYMBOL(start_tty);
+
+static ssize_t tty_read(struct file * file, char __user * buf, size_t count, 
+                       loff_t *ppos)
+{
+       int i;
+       struct tty_struct * tty;
+       struct inode *inode;
+       struct tty_ldisc *ld;
+
+       tty = (struct tty_struct *)file->private_data;
+       inode = file->f_dentry->d_inode;
+       if (tty_paranoia_check(tty, inode, "tty_read"))
+               return -EIO;
+       if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
+               return -EIO;
+
+       /* We want to wait for the line discipline to sort out in this
+          situation */
+       ld = tty_ldisc_ref_wait(tty);
+       lock_kernel();
+       if (ld->read)
+               i = (ld->read)(tty,file,buf,count);
+       else
+               i = -EIO;
+       tty_ldisc_deref(ld);
+       unlock_kernel();
+       if (i > 0)
+               inode->i_atime = CURRENT_TIME;
+       return i;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+       ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char __user *, size_t),
+       struct tty_struct *tty,
+       struct file *file,
+       const unsigned char __user *buf,
+       size_t count)
+{
+       ssize_t ret = 0, written = 0;
+       
+       if (down_interruptible(&tty->atomic_write)) {
+               return -ERESTARTSYS;
+       }
+       if ( test_bit(TTY_NO_WRITE_SPLIT, &tty->flags) ) {
+               lock_kernel();
+               written = write(tty, file, buf, count);
+               unlock_kernel();
+       } else {
+               for (;;) {
+                       unsigned long size = max((unsigned long)PAGE_SIZE*2, 16384UL);
+                       if (size > count)
+                               size = count;
+                       lock_kernel();
+                       ret = write(tty, file, buf, size);
+                       unlock_kernel();
+                       if (ret <= 0)
+                               break;
+                       written += ret;
+                       buf += ret;
+                       count -= ret;
+                       if (!count)
+                               break;
+                       ret = -ERESTARTSYS;
+                       if (signal_pending(current))
+                               break;
+                       cond_resched();
+               }
+       }
+       if (written) {
+               file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
+               ret = written;
+       }
+       up(&tty->atomic_write);
+       return ret;
+}
+
+
+static ssize_t tty_write(struct file * file, const char __user * buf, size_t count,
+                        loff_t *ppos)
+{
+       struct tty_struct * tty;
+       struct inode *inode = file->f_dentry->d_inode;
+       ssize_t ret;
+       struct tty_ldisc *ld;
+       
+       tty = (struct tty_struct *)file->private_data;
+       if (tty_paranoia_check(tty, inode, "tty_write"))
+               return -EIO;
+       if (!tty || !tty->driver->write || (test_bit(TTY_IO_ERROR, &tty->flags)))
+               return -EIO;
+
+       ld = tty_ldisc_ref_wait(tty);           
+       if (!ld->write)
+               ret = -EIO;
+       else
+               ret = do_tty_write(ld->write, tty, file,
+                           (const unsigned char __user *)buf, count);
+       tty_ldisc_deref(ld);
+       return ret;
+}
+
+ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t count,
+                        loff_t *ppos)
+{
+       struct file *p = NULL;
+
+       spin_lock(&redirect_lock);
+       if (redirect) {
+               get_file(redirect);
+               p = redirect;
+       }
+       spin_unlock(&redirect_lock);
+
+       if (p) {
+               ssize_t res;
+               res = vfs_write(p, buf, count, &p->f_pos);
+               fput(p);
+               return res;
+       }
+
+       return tty_write(file, buf, count, ppos);
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+       int i = index + driver->name_base;
+       /* ->name is initialized to "ttyp", but "tty" is expected */
+       sprintf(p, "%s%c%x",
+                       driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
+                       ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+static inline void tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+       sprintf(p, "%s%d", driver->name, index + driver->name_base);
+}
+
+/*
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open.  The new code protects the open with a semaphore, so it's
+ * really quite straightforward.  The semaphore locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ */
+static int init_dev(struct tty_driver *driver, int idx,
+       struct tty_struct **ret_tty)
+{
+       struct tty_struct *tty, *o_tty;
+       struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
+       struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
+       int retval=0;
+
+       /* 
+        * Check whether we need to acquire the tty semaphore to avoid
+        * race conditions.  For now, play it safe.
+        */
+       down(&tty_sem);
+
+       /* check whether we're reopening an existing tty */
+       if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
+               tty = devpts_get_tty(idx);
+               if (tty && driver->subtype == PTY_TYPE_MASTER)
+                       tty = tty->link;
+       } else {
+               tty = driver->ttys[idx];
+       }
+       if (tty) goto fast_track;
+
+       /*
+        * First time open is complex, especially for PTY devices.
+        * This code guarantees that either everything succeeds and the
+        * TTY is ready for operation, or else the table slots are vacated
+        * and the allocated memory released.  (Except that the termios 
+        * and locked termios may be retained.)
+        */
+
+       if (!try_module_get(driver->owner)) {
+               retval = -ENODEV;
+               goto end_init;
+       }
+
+       o_tty = NULL;
+       tp = o_tp = NULL;
+       ltp = o_ltp = NULL;
+
+       tty = alloc_tty_struct();
+       if(!tty)
+               goto fail_no_mem;
+       initialize_tty_struct(tty);
+       tty->driver = driver;
+       tty->index = idx;
+       tty_line_name(driver, idx, tty->name);
+
+       if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
+               tp_loc = &tty->termios;
+               ltp_loc = &tty->termios_locked;
+       } else {
+               tp_loc = &driver->termios[idx];
+               ltp_loc = &driver->termios_locked[idx];
+       }
+
+       if (!*tp_loc) {
+               tp = (struct termios *) kmalloc(sizeof(struct termios),
+                                               GFP_KERNEL);
+               if (!tp)
+                       goto free_mem_out;
+               *tp = driver->init_termios;
+       }
+
+       if (!*ltp_loc) {
+               ltp = (struct termios *) kmalloc(sizeof(struct termios),
+                                                GFP_KERNEL);
+               if (!ltp)
+                       goto free_mem_out;
+               memset(ltp, 0, sizeof(struct termios));
+       }
+
+       if (driver->type == TTY_DRIVER_TYPE_PTY) {
+               o_tty = alloc_tty_struct();
+               if (!o_tty)
+                       goto free_mem_out;
+               initialize_tty_struct(o_tty);
+               o_tty->driver = driver->other;
+               o_tty->index = idx;
+               tty_line_name(driver->other, idx, o_tty->name);
+
+               if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
+                       o_tp_loc = &o_tty->termios;
+                       o_ltp_loc = &o_tty->termios_locked;
+               } else {
+                       o_tp_loc = &driver->other->termios[idx];
+                       o_ltp_loc = &driver->other->termios_locked[idx];
+               }
+
+               if (!*o_tp_loc) {
+                       o_tp = (struct termios *)
+                               kmalloc(sizeof(struct termios), GFP_KERNEL);
+                       if (!o_tp)
+                               goto free_mem_out;
+                       *o_tp = driver->other->init_termios;
+               }
+
+               if (!*o_ltp_loc) {
+                       o_ltp = (struct termios *)
+                               kmalloc(sizeof(struct termios), GFP_KERNEL);
+                       if (!o_ltp)
+                               goto free_mem_out;
+                       memset(o_ltp, 0, sizeof(struct termios));
+               }
+
+               /*
+                * Everything allocated ... set up the o_tty structure.
+                */
+               if (!(driver->other->flags & TTY_DRIVER_DEVPTS_MEM)) {
+                       driver->other->ttys[idx] = o_tty;
+               }
+               if (!*o_tp_loc)
+                       *o_tp_loc = o_tp;
+               if (!*o_ltp_loc)
+                       *o_ltp_loc = o_ltp;
+               o_tty->termios = *o_tp_loc;
+               o_tty->termios_locked = *o_ltp_loc;
+               driver->other->refcount++;
+               if (driver->subtype == PTY_TYPE_MASTER)
+                       o_tty->count++;
+
+               /* Establish the links in both directions */
+               tty->link   = o_tty;
+               o_tty->link = tty;
+       }
+
+       /* 
+        * All structures have been allocated, so now we install them.
+        * Failures after this point use release_mem to clean up, so 
+        * there's no need to null out the local pointers.
+        */
+       if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+               driver->ttys[idx] = tty;
+       }
+       
+       if (!*tp_loc)
+               *tp_loc = tp;
+       if (!*ltp_loc)
+               *ltp_loc = ltp;
+       tty->termios = *tp_loc;
+       tty->termios_locked = *ltp_loc;
+       driver->refcount++;
+       tty->count++;
+
+       /* 
+        * Structures all installed ... call the ldisc open routines.
+        * If we fail here just call release_mem to clean up.  No need
+        * to decrement the use counts, as release_mem doesn't care.
+        */
+
+       if (tty->ldisc.open) {
+               retval = (tty->ldisc.open)(tty);
+               if (retval)
+                       goto release_mem_out;
+       }
+       if (o_tty && o_tty->ldisc.open) {
+               retval = (o_tty->ldisc.open)(o_tty);
+               if (retval) {
+                       if (tty->ldisc.close)
+                               (tty->ldisc.close)(tty);
+                       goto release_mem_out;
+               }
+               tty_ldisc_enable(o_tty);
+       }
+       tty_ldisc_enable(tty);
+       goto success;
+
+       /*
+        * This fast open can be used if the tty is already open.
+        * No memory is allocated, and the only failures are from
+        * attempting to open a closing tty or attempting multiple
+        * opens on a pty master.
+        */
+fast_track:
+       if (test_bit(TTY_CLOSING, &tty->flags)) {
+               retval = -EIO;
+               goto end_init;
+       }
+       if (driver->type == TTY_DRIVER_TYPE_PTY &&
+           driver->subtype == PTY_TYPE_MASTER) {
+               /*
+                * special case for PTY masters: only one open permitted, 
+                * and the slave side open count is incremented as well.
+                */
+               if (tty->count) {
+                       retval = -EIO;
+                       goto end_init;
+               }
+               tty->link->count++;
+       }
+       tty->count++;
+       tty->driver = driver; /* N.B. why do this every time?? */
+
+       /* FIXME */
+       if(!test_bit(TTY_LDISC, &tty->flags))
+               printk(KERN_ERR "init_dev but no ldisc\n");
+success:
+       *ret_tty = tty;
+       
+       /* All paths come through here to release the semaphore */
+end_init:
+       up(&tty_sem);
+       return retval;
+
+       /* Release locally allocated memory ... nothing placed in slots */
+free_mem_out:
+       if (o_tp)
+               kfree(o_tp);
+       if (o_tty)
+               free_tty_struct(o_tty);
+       if (ltp)
+               kfree(ltp);
+       if (tp)
+               kfree(tp);
+       free_tty_struct(tty);
+
+fail_no_mem:
+       module_put(driver->owner);
+       retval = -ENOMEM;
+       goto end_init;
+
+       /* call the tty release_mem routine to clean out this slot */
+release_mem_out:
+       printk(KERN_INFO "init_dev: ldisc open failed, "
+                        "clearing slot %d\n", idx);
+       release_mem(tty, idx);
+       goto end_init;
+}
+
+/*
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots.
+ */
+static void release_mem(struct tty_struct *tty, int idx)
+{
+       struct tty_struct *o_tty;
+       struct termios *tp;
+       int devpts = tty->driver->flags & TTY_DRIVER_DEVPTS_MEM;
+
+       if ((o_tty = tty->link) != NULL) {
+               if (!devpts)
+                       o_tty->driver->ttys[idx] = NULL;
+               if (o_tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+                       tp = o_tty->termios;
+                       if (!devpts)
+                               o_tty->driver->termios[idx] = NULL;
+                       kfree(tp);
+
+                       tp = o_tty->termios_locked;
+                       if (!devpts)
+                               o_tty->driver->termios_locked[idx] = NULL;
+                       kfree(tp);
+               }
+               o_tty->magic = 0;
+               o_tty->driver->refcount--;
+               file_list_lock();
+               list_del_init(&o_tty->tty_files);
+               file_list_unlock();
+               free_tty_struct(o_tty);
+       }
+
+       if (!devpts)
+               tty->driver->ttys[idx] = NULL;
+       if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+               tp = tty->termios;
+               if (!devpts)
+                       tty->driver->termios[idx] = NULL;
+               kfree(tp);
+
+               tp = tty->termios_locked;
+               if (!devpts)
+                       tty->driver->termios_locked[idx] = NULL;
+               kfree(tp);
+       }
+
+       tty->magic = 0;
+       tty->driver->refcount--;
+       file_list_lock();
+       list_del_init(&tty->tty_files);
+       file_list_unlock();
+       module_put(tty->driver->owner);
+       free_tty_struct(tty);
+}
+
+/*
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+static void release_dev(struct file * filp)
+{
+       struct tty_struct *tty, *o_tty;
+       int     pty_master, tty_closing, o_tty_closing, do_sleep;
+       int     devpts_master, devpts;
+       int     idx;
+       char    buf[64];
+       unsigned long flags;
+       
+       tty = (struct tty_struct *)filp->private_data;
+       if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "release_dev"))
+               return;
+
+       check_tty_count(tty, "release_dev");
+
+       tty_fasync(-1, filp, 0);
+
+       idx = tty->index;
+       pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+                     tty->driver->subtype == PTY_TYPE_MASTER);
+       devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
+       devpts_master = pty_master && devpts;
+       o_tty = tty->link;
+
+#ifdef TTY_PARANOIA_CHECK
+       if (idx < 0 || idx >= tty->driver->num) {
+               printk(KERN_DEBUG "release_dev: bad idx when trying to "
+                                 "free (%s)\n", tty->name);
+               return;
+       }
+       if (!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+               if (tty != tty->driver->ttys[idx]) {
+                       printk(KERN_DEBUG "release_dev: driver.table[%d] not tty "
+                              "for (%s)\n", idx, tty->name);
+                       return;
+               }
+               if (tty->termios != tty->driver->termios[idx]) {
+                       printk(KERN_DEBUG "release_dev: driver.termios[%d] not termios "
+                              "for (%s)\n",
+                              idx, tty->name);
+                       return;
+               }
+               if (tty->termios_locked != tty->driver->termios_locked[idx]) {
+                       printk(KERN_DEBUG "release_dev: driver.termios_locked[%d] not "
+                              "termios_locked for (%s)\n",
+                              idx, tty->name);
+                       return;
+               }
+       }
+#endif
+
+#ifdef TTY_DEBUG_HANGUP
+       printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
+              tty_name(tty, buf), tty->count);
+#endif
+
+#ifdef TTY_PARANOIA_CHECK
+       if (tty->driver->other &&
+            !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+               if (o_tty != tty->driver->other->ttys[idx]) {
+                       printk(KERN_DEBUG "release_dev: other->table[%d] "
+                                         "not o_tty for (%s)\n",
+                              idx, tty->name);
+                       return;
+               }
+               if (o_tty->termios != tty->driver->other->termios[idx]) {
+                       printk(KERN_DEBUG "release_dev: other->termios[%d] "
+                                         "not o_termios for (%s)\n",
+                              idx, tty->name);
+                       return;
+               }
+               if (o_tty->termios_locked != 
+                     tty->driver->other->termios_locked[idx]) {
+                       printk(KERN_DEBUG "release_dev: other->termios_locked["
+                                         "%d] not o_termios_locked for (%s)\n",
+                              idx, tty->name);
+                       return;
+               }
+               if (o_tty->link != tty) {
+                       printk(KERN_DEBUG "release_dev: bad pty pointers\n");
+                       return;
+               }
+       }
+#endif
+       if (tty->driver->close)
+               tty->driver->close(tty, filp);
+
+       /*
+        * Sanity check: if tty->count is going to zero, there shouldn't be
+        * any waiters on tty->read_wait or tty->write_wait.  We test the
+        * wait queues and kick everyone out _before_ actually starting to
+        * close.  This ensures that we won't block while releasing the tty
+        * structure.
+        *
+        * The test for the o_tty closing is necessary, since the master and
+        * slave sides may close in any order.  If the slave side closes out
+        * first, its count will be one, since the master side holds an open.
+        * Thus this test wouldn't be triggered at the time the slave closes,
+        * so we do it now.
+        *
+        * Note that it's possible for the tty to be opened again while we're
+        * flushing out waiters.  By recalculating the closing flags before
+        * each iteration we avoid any problems.
+        */
+       while (1) {
+               tty_closing = tty->count <= 1;
+               o_tty_closing = o_tty &&
+                       (o_tty->count <= (pty_master ? 1 : 0));
+               do_sleep = 0;
+
+               if (tty_closing) {
+                       if (waitqueue_active(&tty->read_wait)) {
+                               wake_up(&tty->read_wait);
+                               do_sleep++;
+                       }
+                       if (waitqueue_active(&tty->write_wait)) {
+                               wake_up(&tty->write_wait);
+                               do_sleep++;
+                       }
+               }
+               if (o_tty_closing) {
+                       if (waitqueue_active(&o_tty->read_wait)) {
+                               wake_up(&o_tty->read_wait);
+                               do_sleep++;
+                       }
+                       if (waitqueue_active(&o_tty->write_wait)) {
+                               wake_up(&o_tty->write_wait);
+                               do_sleep++;
+                       }
+               }
+               if (!do_sleep)
+                       break;
+
+               printk(KERN_WARNING "release_dev: %s: read/write wait queue "
+                                   "active!\n", tty_name(tty, buf));
+               schedule();
+       }       
+
+       /*
+        * The closing flags are now consistent with the open counts on 
+        * both sides, and we've completed the last operation that could 
+        * block, so it's safe to proceed with closing.
+        */
+       if (pty_master) {
+               if (--o_tty->count < 0) {
+                       printk(KERN_WARNING "release_dev: bad pty slave count "
+                                           "(%d) for %s\n",
+                              o_tty->count, tty_name(o_tty, buf));
+                       o_tty->count = 0;
+               }
+       }
+       if (--tty->count < 0) {
+               printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n",
+                      tty->count, tty_name(tty, buf));
+               tty->count = 0;
+       }
+
+       /*
+        * We've decremented tty->count, so we need to remove this file
+        * descriptor off the tty->tty_files list; this serves two
+        * purposes:
+        *  - check_tty_count sees the correct number of file descriptors
+        *    associated with this tty.
+        *  - do_tty_hangup no longer sees this file descriptor as
+        *    something that needs to be handled for hangups.
+        */
+       file_kill(filp);
+       filp->private_data = NULL;
+
+       /*
+        * Perform some housekeeping before deciding whether to return.
+        *
+        * Set the TTY_CLOSING flag if this was the last open.  In the
+        * case of a pty we may have to wait around for the other side
+        * to close, and TTY_CLOSING makes sure we can't be reopened.
+        */
+       if(tty_closing)
+               set_bit(TTY_CLOSING, &tty->flags);
+       if(o_tty_closing)
+               set_bit(TTY_CLOSING, &o_tty->flags);
+
+       /*
+        * If _either_ side is closing, make sure there aren't any
+        * processes that still think tty or o_tty is their controlling
+        * tty.
+        */
+       if (tty_closing || o_tty_closing) {
+               struct task_struct *p;
+
+               read_lock(&tasklist_lock);
+               do_each_task_pid(tty->session, PIDTYPE_SID, p) {
+                       p->signal->tty = NULL;
+               } while_each_task_pid(tty->session, PIDTYPE_SID, p);
+               if (o_tty)
+                       do_each_task_pid(o_tty->session, PIDTYPE_SID, p) {
+                               p->signal->tty = NULL;
+                       } while_each_task_pid(o_tty->session, PIDTYPE_SID, p);
+               read_unlock(&tasklist_lock);
+       }
+
+       /* check whether both sides are closing ... */
+       if (!tty_closing || (o_tty && !o_tty_closing))
+               return;
+       
+#ifdef TTY_DEBUG_HANGUP
+       printk(KERN_DEBUG "freeing tty structure...");
+#endif
+       /*
+        * Prevent flush_to_ldisc() from rescheduling the work for later.  Then
+        * kill any delayed work. As this is the final close it does not
+        * race with the set_ldisc code path.
+        */
+       clear_bit(TTY_LDISC, &tty->flags);
+       clear_bit(TTY_DONT_FLIP, &tty->flags);
+       cancel_delayed_work(&tty->flip.work);
+
+       /*
+        * Wait for ->hangup_work and ->flip.work handlers to terminate
+        */
+        
+       flush_scheduled_work();
+       
+       /*
+        * Wait for any short term users (we know they are just driver
+        * side waiters as the file is closing so user count on the file
+        * side is zero.
+        */
+       spin_lock_irqsave(&tty_ldisc_lock, flags);
+       while(tty->ldisc.refcount)
+       {
+               spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+               wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
+               spin_lock_irqsave(&tty_ldisc_lock, flags);
+       }
+       spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+       /*
+        * Shutdown the current line discipline, and reset it to N_TTY.
+        * N.B. why reset ldisc when we're releasing the memory??
+        *
+        * FIXME: this MUST get fixed for the new reflocking
+        */
+       if (tty->ldisc.close)
+               (tty->ldisc.close)(tty);
+       tty_ldisc_put(tty->ldisc.num);
+       
+       /*
+        *      Switch the line discipline back
+        */
+       tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+       tty_set_termios_ldisc(tty,N_TTY); 
+       if (o_tty) {
+               /* FIXME: could o_tty be in setldisc here ? */
+               clear_bit(TTY_LDISC, &o_tty->flags);
+               if (o_tty->ldisc.close)
+                       (o_tty->ldisc.close)(o_tty);
+               tty_ldisc_put(o_tty->ldisc.num);
+               tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY));
+               tty_set_termios_ldisc(o_tty,N_TTY); 
+       }
+       /*
+        * The release_mem function takes care of the details of clearing
+        * the slots and preserving the termios structure.
+        */
+       release_mem(tty, idx);
+
+#ifdef CONFIG_UNIX98_PTYS
+       /* Make this pty number available for reallocation */
+       if (devpts) {
+               down(&allocated_ptys_lock);
+               idr_remove(&allocated_ptys, idx);
+               up(&allocated_ptys_lock);
+       }
+#endif
+
+}
+
+/*
+ * tty_open and tty_release keep up the tty count that contains the
+ * number of opens done on a tty. We cannot use the inode-count, as
+ * different inodes might point to the same tty.
+ *
+ * Open-counting is needed for pty masters, as well as for keeping
+ * track of serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now.  - Ted 1/27/92)
+ *
+ * The termios state of a pty is reset on first open so that
+ * settings don't persist across reuse.
+ */
+static int tty_open(struct inode * inode, struct file * filp)
+{
+       struct tty_struct *tty;
+       int noctty, retval;
+       struct tty_driver *driver;
+       int index;
+       dev_t device = inode->i_rdev;
+       unsigned short saved_flags = filp->f_flags;
+
+       nonseekable_open(inode, filp);
+       
+retry_open:
+       noctty = filp->f_flags & O_NOCTTY;
+       index  = -1;
+       retval = 0;
+
+       if (device == MKDEV(TTYAUX_MAJOR,0)) {
+               if (!current->signal->tty)
+                       return -ENXIO;
+               driver = current->signal->tty->driver;
+               index = current->signal->tty->index;
+               filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+               /* noctty = 1; */
+               goto got_driver;
+       }
+#ifdef CONFIG_VT
+       if (console_use_vt && device == MKDEV(TTY_MAJOR,0)) {
+               extern int fg_console;
+               extern struct tty_driver *console_driver;
+               driver = console_driver;
+               index = fg_console;
+               noctty = 1;
+               goto got_driver;
+       }
+#endif
+       if (device == MKDEV(TTYAUX_MAJOR,1)) {
+               driver = console_device(&index);
+               if (driver) {
+                       /* Don't let /dev/console block */
+                       filp->f_flags |= O_NONBLOCK;
+                       noctty = 1;
+                       goto got_driver;
+               }
+               return -ENODEV;
+       }
+
+       driver = get_tty_driver(device, &index);
+       if (!driver)
+               return -ENODEV;
+got_driver:
+       retval = init_dev(driver, index, &tty);
+       if (retval)
+               return retval;
+
+       filp->private_data = tty;
+       file_move(filp, &tty->tty_files);
+       check_tty_count(tty, "tty_open");
+       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+           tty->driver->subtype == PTY_TYPE_MASTER)
+               noctty = 1;
+#ifdef TTY_DEBUG_HANGUP
+       printk(KERN_DEBUG "opening %s...", tty->name);
+#endif
+       if (!retval) {
+               if (tty->driver->open)
+                       retval = tty->driver->open(tty, filp);
+               else
+                       retval = -ENODEV;
+       }
+       filp->f_flags = saved_flags;
+
+       if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+               retval = -EBUSY;
+
+       if (retval) {
+#ifdef TTY_DEBUG_HANGUP
+               printk(KERN_DEBUG "error %d in opening %s...", retval,
+                      tty->name);
+#endif
+               release_dev(filp);
+               if (retval != -ERESTARTSYS)
+                       return retval;
+               if (signal_pending(current))
+                       return retval;
+               schedule();
+               /*
+                * Need to reset f_op in case a hangup happened.
+                */
+               if (filp->f_op == &hung_up_tty_fops)
+                       filp->f_op = &tty_fops;
+               goto retry_open;
+       }
+       if (!noctty &&
+           current->signal->leader &&
+           !current->signal->tty &&
+           tty->session == 0) {
+               task_lock(current);
+               current->signal->tty = tty;
+               task_unlock(current);
+               current->signal->tty_old_pgrp = 0;
+               tty->session = current->signal->session;
+               tty->pgrp = process_group(current);
+       }
+       return 0;
+}
+
+#ifdef CONFIG_UNIX98_PTYS
+static int ptmx_open(struct inode * inode, struct file * filp)
+{
+       struct tty_struct *tty;
+       int retval;
+       int index;
+       int idr_ret;
+
+       nonseekable_open(inode, filp);
+
+       /* find a device that is not in use. */
+       down(&allocated_ptys_lock);
+       if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
+               up(&allocated_ptys_lock);
+               return -ENOMEM;
+       }
+       idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
+       if (idr_ret < 0) {
+               up(&allocated_ptys_lock);
+               if (idr_ret == -EAGAIN)
+                       return -ENOMEM;
+               return -EIO;
+       }
+       if (index >= pty_limit) {
+               idr_remove(&allocated_ptys, index);
+               up(&allocated_ptys_lock);
+               return -EIO;
+       }
+       up(&allocated_ptys_lock);
+
+       retval = init_dev(ptm_driver, index, &tty);
+       if (retval)
+               goto out;
+
+       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+       filp->private_data = tty;
+       file_move(filp, &tty->tty_files);
+
+       retval = -ENOMEM;
+       if (devpts_pty_new(tty->link))
+               goto out1;
+
+       check_tty_count(tty, "tty_open");
+       retval = ptm_driver->open(tty, filp);
+       if (!retval)
+               return 0;
+out1:
+       release_dev(filp);
+out:
+       down(&allocated_ptys_lock);
+       idr_remove(&allocated_ptys, index);
+       up(&allocated_ptys_lock);
+       return retval;
+}
+#endif
+
+static int tty_release(struct inode * inode, struct file * filp)
+{
+       lock_kernel();
+       release_dev(filp);
+       unlock_kernel();
+       return 0;
+}
+
+/* No kernel lock held - fine */
+static unsigned int tty_poll(struct file * filp, poll_table * wait)
+{
+       struct tty_struct * tty;
+       struct tty_ldisc *ld;
+       int ret = 0;
+
+       tty = (struct tty_struct *)filp->private_data;
+       if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_poll"))
+               return 0;
+               
+       ld = tty_ldisc_ref_wait(tty);
+       if (ld->poll)
+               ret = (ld->poll)(tty, filp, wait);
+       tty_ldisc_deref(ld);
+       return ret;
+}
+
+static int tty_fasync(int fd, struct file * filp, int on)
+{
+       struct tty_struct * tty;
+       int retval;
+
+       tty = (struct tty_struct *)filp->private_data;
+       if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_fasync"))
+               return 0;
+       
+       retval = fasync_helper(fd, filp, on, &tty->fasync);
+       if (retval <= 0)
+               return retval;
+
+       if (on) {
+               if (!waitqueue_active(&tty->read_wait))
+                       tty->minimum_to_wake = 1;
+               retval = f_setown(filp, (-tty->pgrp) ? : current->pid, 0);
+               if (retval)
+                       return retval;
+       } else {
+               if (!tty->fasync && !waitqueue_active(&tty->read_wait))
+                       tty->minimum_to_wake = N_TTY_BUF_SIZE;
+       }
+       return 0;
+}
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+       char ch, mbz = 0;
+       struct tty_ldisc *ld;
+       
+       if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (get_user(ch, p))
+               return -EFAULT;
+       ld = tty_ldisc_ref_wait(tty);
+       ld->receive_buf(tty, &ch, &mbz, 1);
+       tty_ldisc_deref(ld);
+       return 0;
+}
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
+{
+       if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
+               return -EFAULT;
+       return 0;
+}
+
+static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
+       struct winsize __user * arg)
+{
+       struct winsize tmp_ws;
+
+       if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+               return -EFAULT;
+       if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
+               return 0;
+#ifdef CONFIG_VT
+       if (console_use_vt && tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
+               unsigned int currcons = tty->index;
+               int rc;
+
+               acquire_console_sem();
+               rc = vc_resize(currcons, tmp_ws.ws_col, tmp_ws.ws_row);
+               release_console_sem();
+               if (rc)
+                       return -ENXIO;
+       }
+#endif
+       if (tty->pgrp > 0)
+               kill_pg(tty->pgrp, SIGWINCH, 1);
+       if ((real_tty->pgrp != tty->pgrp) && (real_tty->pgrp > 0))
+               kill_pg(real_tty->pgrp, SIGWINCH, 1);
+       tty->winsize = tmp_ws;
+       real_tty->winsize = tmp_ws;
+       return 0;
+}
+
+static int tioccons(struct file *file)
+{
+       if (file->f_op->write == redirected_tty_write) {
+               struct file *f;
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               spin_lock(&redirect_lock);
+               f = redirect;
+               redirect = NULL;
+               spin_unlock(&redirect_lock);
+               if (f)
+                       fput(f);
+               return 0;
+       }
+       spin_lock(&redirect_lock);
+       if (redirect) {
+               spin_unlock(&redirect_lock);
+               return -EBUSY;
+       }
+       get_file(file);
+       redirect = file;
+       spin_unlock(&redirect_lock);
+       return 0;
+}
+
+
+static int fionbio(struct file *file, int __user *p)
+{
+       int nonblock;
+
+       if (get_user(nonblock, p))
+               return -EFAULT;
+
+       if (nonblock)
+               file->f_flags |= O_NONBLOCK;
+       else
+               file->f_flags &= ~O_NONBLOCK;
+       return 0;
+}
+
+static int tiocsctty(struct tty_struct *tty, int arg)
+{
+       task_t *p;
+
+       if (current->signal->leader &&
+           (current->signal->session == tty->session))
+               return 0;
+       /*
+        * The process must be a session leader and
+        * not have a controlling tty already.
+        */
+       if (!current->signal->leader || current->signal->tty)
+               return -EPERM;
+       if (tty->session > 0) {
+               /*
+                * This tty is already the controlling
+                * tty for another session group!
+                */
+               if ((arg == 1) && capable(CAP_SYS_ADMIN)) {
+                       /*
+                        * Steal it away
+                        */
+
+                       read_lock(&tasklist_lock);
+                       do_each_task_pid(tty->session, PIDTYPE_SID, p) {
+                               p->signal->tty = NULL;
+                       } while_each_task_pid(tty->session, PIDTYPE_SID, p);
+                       read_unlock(&tasklist_lock);
+               } else
+                       return -EPERM;
+       }
+       task_lock(current);
+       current->signal->tty = tty;
+       task_unlock(current);
+       current->signal->tty_old_pgrp = 0;
+       tty->session = current->signal->session;
+       tty->pgrp = process_group(current);
+       return 0;
+}
+
+static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+       /*
+        * (tty == real_tty) is a cheap way of
+        * testing if the tty is NOT a master pty.
+        */
+       if (tty == real_tty && current->signal->tty != real_tty)
+               return -ENOTTY;
+       return put_user(real_tty->pgrp, p);
+}
+
+static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+       pid_t pgrp;
+       int retval = tty_check_change(real_tty);
+
+       if (retval == -EIO)
+               return -ENOTTY;
+       if (retval)
+               return retval;
+       if (!current->signal->tty ||
+           (current->signal->tty != real_tty) ||
+           (real_tty->session != current->signal->session))
+               return -ENOTTY;
+       if (get_user(pgrp, p))
+               return -EFAULT;
+       if (pgrp < 0)
+               return -EINVAL;
+       if (session_of_pgrp(pgrp) != current->signal->session)
+               return -EPERM;
+       real_tty->pgrp = pgrp;
+       return 0;
+}
+
+static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+       /*
+        * (tty == real_tty) is a cheap way of
+        * testing if the tty is NOT a master pty.
+       */
+       if (tty == real_tty && current->signal->tty != real_tty)
+               return -ENOTTY;
+       if (real_tty->session <= 0)
+               return -ENOTTY;
+       return put_user(real_tty->session, p);
+}
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+       int ldisc;
+
+       if (get_user(ldisc, p))
+               return -EFAULT;
+       return tty_set_ldisc(tty, ldisc);
+}
+
+static int send_break(struct tty_struct *tty, int duration)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+
+       tty->driver->break_ctl(tty, -1);
+       if (!signal_pending(current))
+               schedule_timeout(duration);
+       tty->driver->break_ctl(tty, 0);
+       if (signal_pending(current))
+               return -EINTR;
+       return 0;
+}
+
+static int
+tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
+{
+       int retval = -EINVAL;
+
+       if (tty->driver->tiocmget) {
+               retval = tty->driver->tiocmget(tty, file);
+
+               if (retval >= 0)
+                       retval = put_user(retval, p);
+       }
+       return retval;
+}
+
+static int
+tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
+            unsigned __user *p)
+{
+       int retval = -EINVAL;
+
+       if (tty->driver->tiocmset) {
+               unsigned int set, clear, val;
+
+               retval = get_user(val, p);
+               if (retval)
+                       return retval;
+
+               set = clear = 0;
+               switch (cmd) {
+               case TIOCMBIS:
+                       set = val;
+                       break;
+               case TIOCMBIC:
+                       clear = val;
+                       break;
+               case TIOCMSET:
+                       set = val;
+                       clear = ~val;
+                       break;
+               }
+
+               set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+               clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+
+               retval = tty->driver->tiocmset(tty, file, set, clear);
+       }
+       return retval;
+}
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+int tty_ioctl(struct inode * inode, struct file * file,
+             unsigned int cmd, unsigned long arg)
+{
+       struct tty_struct *tty, *real_tty;
+       void __user *p = (void __user *)arg;
+       int retval;
+       struct tty_ldisc *ld;
+       
+       tty = (struct tty_struct *)file->private_data;
+       if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+               return -EINVAL;
+
+       real_tty = tty;
+       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+           tty->driver->subtype == PTY_TYPE_MASTER)
+               real_tty = tty->link;
+
+       /*
+        * Break handling by driver
+        */
+       if (!tty->driver->break_ctl) {
+               switch(cmd) {
+               case TIOCSBRK:
+               case TIOCCBRK:
+                       if (tty->driver->ioctl)
+                               return tty->driver->ioctl(tty, file, cmd, arg);
+                       return -EINVAL;
+                       
+               /* These two ioctl's always return success; even if */
+               /* the driver doesn't support them. */
+               case TCSBRK:
+               case TCSBRKP:
+                       if (!tty->driver->ioctl)
+                               return 0;
+                       retval = tty->driver->ioctl(tty, file, cmd, arg);
+                       if (retval == -ENOIOCTLCMD)
+                               retval = 0;
+                       return retval;
+               }
+       }
+
+       /*
+        * Factor out some common prep work
+        */
+       switch (cmd) {
+       case TIOCSETD:
+       case TIOCSBRK:
+       case TIOCCBRK:
+       case TCSBRK:
+       case TCSBRKP:                   
+               retval = tty_check_change(tty);
+               if (retval)
+                       return retval;
+               if (cmd != TIOCCBRK) {
+                       tty_wait_until_sent(tty, 0);
+                       if (signal_pending(current))
+                               return -EINTR;
+               }
+               break;
+       }
+
+       switch (cmd) {
+               case TIOCSTI:
+                       return tiocsti(tty, p);
+               case TIOCGWINSZ:
+                       return tiocgwinsz(tty, p);
+               case TIOCSWINSZ:
+                       return tiocswinsz(tty, real_tty, p);
+               case TIOCCONS:
+                       return real_tty!=tty ? -EINVAL : tioccons(file);
+               case FIONBIO:
+                       return fionbio(file, p);
+               case TIOCEXCL:
+                       set_bit(TTY_EXCLUSIVE, &tty->flags);
+                       return 0;
+               case TIOCNXCL:
+                       clear_bit(TTY_EXCLUSIVE, &tty->flags);
+                       return 0;
+               case TIOCNOTTY:
+                       if (current->signal->tty != tty)
+                               return -ENOTTY;
+                       if (current->signal->leader)
+                               disassociate_ctty(0);
+                       task_lock(current);
+                       current->signal->tty = NULL;
+                       task_unlock(current);
+                       return 0;
+               case TIOCSCTTY:
+                       return tiocsctty(tty, arg);
+               case TIOCGPGRP:
+                       return tiocgpgrp(tty, real_tty, p);
+               case TIOCSPGRP:
+                       return tiocspgrp(tty, real_tty, p);
+               case TIOCGSID:
+                       return tiocgsid(tty, real_tty, p);
+               case TIOCGETD:
+                       /* FIXME: check this is ok */
+                       return put_user(tty->ldisc.num, (int __user *)p);
+               case TIOCSETD:
+                       return tiocsetd(tty, p);
+#ifdef CONFIG_VT
+               case TIOCLINUX:
+                       return tioclinux(tty, arg);
+#endif
+               /*
+                * Break handling
+                */
+               case TIOCSBRK:  /* Turn break on, unconditionally */
+                       tty->driver->break_ctl(tty, -1);
+                       return 0;
+                       
+               case TIOCCBRK:  /* Turn break off, unconditionally */
+                       tty->driver->break_ctl(tty, 0);
+                       return 0;
+               case TCSBRK:   /* SVID version: non-zero arg --> no break */
+                       /*
+                        * XXX is the above comment correct, or the
+                        * code below correct?  Is this ioctl used at
+                        * all by anyone?
+                        */
+                       if (!arg)
+                               return send_break(tty, HZ/4);
+                       return 0;
+               case TCSBRKP:   /* support for POSIX tcsendbreak() */   
+                       return send_break(tty, arg ? arg*(HZ/10) : HZ/4);
+
+               case TIOCMGET:
+                       return tty_tiocmget(tty, file, p);
+
+               case TIOCMSET:
+               case TIOCMBIC:
+               case TIOCMBIS:
+                       return tty_tiocmset(tty, file, cmd, p);
+       }
+       if (tty->driver->ioctl) {
+               retval = (tty->driver->ioctl)(tty, file, cmd, arg);
+               if (retval != -ENOIOCTLCMD)
+                       return retval;
+       }
+       ld = tty_ldisc_ref_wait(tty);
+       retval = -EINVAL;
+       if (ld->ioctl) {
+               retval = ld->ioctl(tty, file, cmd, arg);
+               if (retval == -ENOIOCTLCMD)
+                       retval = -EINVAL;
+       }
+       tty_ldisc_deref(ld);
+       return retval;
+}
+
+
+/*
+ * This implements the "Secure Attention Key" ---  the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key".  Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ * 
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal.  But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context.  This can
+ * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
+ */
+static void __do_SAK(void *arg)
+{
+#ifdef TTY_SOFT_SAK
+       tty_hangup(tty);
+#else
+       struct tty_struct *tty = arg;
+       struct task_struct *p;
+       int session;
+       int             i;
+       struct file     *filp;
+       struct tty_ldisc *disc;
+       
+       if (!tty)
+               return;
+       session  = tty->session;
+       
+       /* We don't want an ldisc switch during this */
+       disc = tty_ldisc_ref(tty);
+       if (disc && disc->flush_buffer)
+               disc->flush_buffer(tty);
+       tty_ldisc_deref(disc);
+
+       if (tty->driver->flush_buffer)
+               tty->driver->flush_buffer(tty);
+       
+       read_lock(&tasklist_lock);
+       do_each_task_pid(session, PIDTYPE_SID, p) {
+               if (p->signal->tty == tty || session > 0) {
+                       printk(KERN_NOTICE "SAK: killed process %d"
+                           " (%s): p->signal->session==tty->session\n",
+                           p->pid, p->comm);
+                       send_sig(SIGKILL, p, 1);
+                       continue;
+               }
+               task_lock(p);
+               if (p->files) {
+                       spin_lock(&p->files->file_lock);
+                       for (i=0; i < p->files->max_fds; i++) {
+                               filp = fcheck_files(p->files, i);
+                               if (!filp)
+                                       continue;
+                               if (filp->f_op->read == tty_read &&
+                                   filp->private_data == tty) {
+                                       printk(KERN_NOTICE "SAK: killed process %d"
+                                           " (%s): fd#%d opened to the tty\n",
+                                           p->pid, p->comm, i);
+                                       send_sig(SIGKILL, p, 1);
+                                       break;
+                               }
+                       }
+                       spin_unlock(&p->files->file_lock);
+               }
+               task_unlock(p);
+       } while_each_task_pid(session, PIDTYPE_SID, p);
+       read_unlock(&tasklist_lock);
+#endif
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+       if (!tty)
+               return;
+       PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+       schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+/*
+ * This routine is called out of the software interrupt to flush data
+ * from the flip buffer to the line discipline. 
+ */
+static void flush_to_ldisc(void *private_)
+{
+       struct tty_struct *tty = (struct tty_struct *) private_;
+       unsigned char   *cp;
+       char            *fp;
+       int             count;
+       unsigned long   flags;
+       struct tty_ldisc *disc;
+
+       disc = tty_ldisc_ref(tty);
+       if (disc == NULL)       /*  !TTY_LDISC */
+               return;
+
+       if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
+               /*
+                * Do it after the next timer tick:
+                */
+               schedule_delayed_work(&tty->flip.work, 1);
+               goto out;
+       }
+       spin_lock_irqsave(&tty->read_lock, flags);
+       if (tty->flip.buf_num) {
+               cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
+               fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
+               tty->flip.buf_num = 0;
+               tty->flip.char_buf_ptr = tty->flip.char_buf;
+               tty->flip.flag_buf_ptr = tty->flip.flag_buf;
+       } else {
+               cp = tty->flip.char_buf;
+               fp = tty->flip.flag_buf;
+               tty->flip.buf_num = 1;
+               tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
+               tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
+       }
+       count = tty->flip.count;
+       tty->flip.count = 0;
+       spin_unlock_irqrestore(&tty->read_lock, flags);
+
+       disc->receive_buf(tty, cp, fp, count);
+out:
+       tty_ldisc_deref(disc);
+}
+
+/*
+ *     Call the ldisc flush directly from a driver. This function may
+ *     return an error and need retrying by the user.
+ */
+
+int tty_push_data(struct tty_struct *tty, unsigned char *cp, unsigned char *fp, int count)
+{
+       int ret = 0;
+       struct tty_ldisc *disc;
+       
+       disc = tty_ldisc_ref(tty);
+       if(test_bit(TTY_DONT_FLIP, &tty->flags))
+               ret = -EAGAIN;
+       else if(disc == NULL)
+               ret = -EIO;
+       else
+               disc->receive_buf(tty, cp, fp, count);
+       tty_ldisc_deref(disc);
+       return ret;
+       
+}
+
+/*
+ * Routine which returns the baud rate of the tty
+ *
+ * Note that the baud_table needs to be kept in sync with the
+ * include/asm/termbits.h file.
+ */
+static int baud_table[] = {
+       0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+       9600, 19200, 38400, 57600, 115200, 230400, 460800,
+#ifdef __sparc__
+       76800, 153600, 307200, 614400, 921600
+#else
+       500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
+       2500000, 3000000, 3500000, 4000000
+#endif
+};
+
+static int n_baud_table = ARRAY_SIZE(baud_table);
+
+/**
+ *     tty_termios_baud_rate
+ *     @termios: termios structure
+ *
+ *     Convert termios baud rate data into a speed. This should be called
+ *     with the termios lock held if this termios is a terminal termios
+ *     structure. May change the termios data.
+ */
+int tty_termios_baud_rate(struct termios *termios)
+{
+       unsigned int cbaud;
+       
+       cbaud = termios->c_cflag & CBAUD;
+
+       if (cbaud & CBAUDEX) {
+               cbaud &= ~CBAUDEX;
+
+               if (cbaud < 1 || cbaud + 15 > n_baud_table)
+                       termios->c_cflag &= ~CBAUDEX;
+               else
+                       cbaud += 15;
+       }
+       return baud_table[cbaud];
+}
+
+EXPORT_SYMBOL(tty_termios_baud_rate);
+
+/**
+ *     tty_get_baud_rate       -       get tty bit rates
+ *     @tty: tty to query
+ *
+ *     Returns the baud rate as an integer for this terminal. The
+ *     termios lock must be held by the caller and the terminal bit
+ *     flags may be updated.
+ */
+int tty_get_baud_rate(struct tty_struct *tty)
+{
+       int baud = tty_termios_baud_rate(tty->termios);
+
+       if (baud == 38400 && tty->alt_speed) {
+               if (!tty->warned) {
+                       printk(KERN_WARNING "Use of setserial/setrocket to "
+                                           "set SPD_* flags is deprecated\n");
+                       tty->warned = 1;
+               }
+               baud = tty->alt_speed;
+       }
+       
+       return baud;
+}
+
+EXPORT_SYMBOL(tty_get_baud_rate);
+
+/**
+ *     tty_flip_buffer_push    -       terminal
+ *     @tty: tty to push
+ *
+ *     Queue a push of the terminal flip buffers to the line discipline. This
+ *     function must not be called from IRQ context if tty->low_latency is set.
+ *
+ *     In the event of the queue being busy for flipping the work will be
+ *     held off and retried later.
+ */
+
+void tty_flip_buffer_push(struct tty_struct *tty)
+{
+       if (tty->low_latency)
+               flush_to_ldisc((void *) tty);
+       else
+               schedule_delayed_work(&tty->flip.work, 1);
+}
+
+EXPORT_SYMBOL(tty_flip_buffer_push);
+
+/*
+ * This subroutine initializes a tty structure.
+ */
+static void initialize_tty_struct(struct tty_struct *tty)
+{
+       memset(tty, 0, sizeof(struct tty_struct));
+       tty->magic = TTY_MAGIC;
+       tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+       tty->pgrp = -1;
+       tty->flip.char_buf_ptr = tty->flip.char_buf;
+       tty->flip.flag_buf_ptr = tty->flip.flag_buf;
+       INIT_WORK(&tty->flip.work, flush_to_ldisc, tty);
+       init_MUTEX(&tty->flip.pty_sem);
+       init_MUTEX(&tty->termios_sem);
+       init_waitqueue_head(&tty->write_wait);
+       init_waitqueue_head(&tty->read_wait);
+       INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+       sema_init(&tty->atomic_read, 1);
+       sema_init(&tty->atomic_write, 1);
+       spin_lock_init(&tty->read_lock);
+       INIT_LIST_HEAD(&tty->tty_files);
+       INIT_WORK(&tty->SAK_work, NULL, NULL);
+}
+
+/*
+ * The default put_char routine if the driver did not define one.
+ */
+static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
+{
+       tty->driver->write(tty, 0, &ch, 1);
+}
+
+static struct class_simple *tty_class;
+
+/**
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ *     This field is optional, if there is no known struct device for this
+ *     tty device it can be set to NULL safely.
+ *
+ * This call is required to be made to register an individual tty device if
+ * the tty driver's flags have the TTY_DRIVER_NO_DEVFS bit set.  If that
+ * bit is not set, this function should not be called.
+ */
+void tty_register_device(struct tty_driver *driver, unsigned index,
+                        struct device *device)
+{
+       char name[64];
+       dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+       if (index >= driver->num) {
+               printk(KERN_ERR "Attempt to register invalid tty line number "
+                      " (%d).\n", index);
+               return;
+       }
+
+       devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
+                       "%s%d", driver->devfs_name, index + driver->name_base);
+
+       if (driver->type == TTY_DRIVER_TYPE_PTY)
+               pty_line_name(driver, index, name);
+       else
+               tty_line_name(driver, index, name);
+       class_simple_device_add(tty_class, dev, device, name);
+}
+
+/**
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ *
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be made when the tty device is gone.
+ */
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+       devfs_remove("%s%d", driver->devfs_name, index + driver->name_base);
+       class_simple_device_remove(MKDEV(driver->major, driver->minor_start) + index);
+}
+
+EXPORT_SYMBOL(tty_register_device);
+EXPORT_SYMBOL(tty_unregister_device);
+
+struct tty_driver *alloc_tty_driver(int lines)
+{
+       struct tty_driver *driver;
+
+       driver = kmalloc(sizeof(struct tty_driver), GFP_KERNEL);
+       if (driver) {
+               memset(driver, 0, sizeof(struct tty_driver));
+               driver->magic = TTY_DRIVER_MAGIC;
+               driver->num = lines;
+               /* later we'll move allocation of tables here */
+       }
+       return driver;
+}
+
+void put_tty_driver(struct tty_driver *driver)
+{
+       kfree(driver);
+}
+
+void tty_set_operations(struct tty_driver *driver, struct tty_operations *op)
+{
+       driver->open = op->open;
+       driver->close = op->close;
+       driver->write = op->write;
+       driver->put_char = op->put_char;
+       driver->flush_chars = op->flush_chars;
+       driver->write_room = op->write_room;
+       driver->chars_in_buffer = op->chars_in_buffer;
+       driver->ioctl = op->ioctl;
+       driver->set_termios = op->set_termios;
+       driver->throttle = op->throttle;
+       driver->unthrottle = op->unthrottle;
+       driver->stop = op->stop;
+       driver->start = op->start;
+       driver->hangup = op->hangup;
+       driver->break_ctl = op->break_ctl;
+       driver->flush_buffer = op->flush_buffer;
+       driver->set_ldisc = op->set_ldisc;
+       driver->wait_until_sent = op->wait_until_sent;
+       driver->send_xchar = op->send_xchar;
+       driver->read_proc = op->read_proc;
+       driver->write_proc = op->write_proc;
+       driver->tiocmget = op->tiocmget;
+       driver->tiocmset = op->tiocmset;
+}
+
+
+EXPORT_SYMBOL(alloc_tty_driver);
+EXPORT_SYMBOL(put_tty_driver);
+EXPORT_SYMBOL(tty_set_operations);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+       int error;
+        int i;
+       dev_t dev;
+       void **p = NULL;
+
+       if (driver->flags & TTY_DRIVER_INSTALLED)
+               return 0;
+
+       if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+               p = kmalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL);
+               if (!p)
+                       return -ENOMEM;
+               memset(p, 0, driver->num * 3 * sizeof(void *));
+       }
+
+       if (!driver->major) {
+               error = alloc_chrdev_region(&dev, driver->minor_start, driver->num,
+                                               (char*)driver->name);
+               if (!error) {
+                       driver->major = MAJOR(dev);
+                       driver->minor_start = MINOR(dev);
+               }
+       } else {
+               dev = MKDEV(driver->major, driver->minor_start);
+               error = register_chrdev_region(dev, driver->num,
+                                               (char*)driver->name);
+       }
+       if (error < 0) {
+               kfree(p);
+               return error;
+       }
+
+       if (p) {
+               driver->ttys = (struct tty_struct **)p;
+               driver->termios = (struct termios **)(p + driver->num);
+               driver->termios_locked = (struct termios **)(p + driver->num * 2);
+       } else {
+               driver->ttys = NULL;
+               driver->termios = NULL;
+               driver->termios_locked = NULL;
+       }
+
+       cdev_init(&driver->cdev, &tty_fops);
+       driver->cdev.owner = driver->owner;
+       error = cdev_add(&driver->cdev, dev, driver->num);
+       if (error) {
+               cdev_del(&driver->cdev);
+               unregister_chrdev_region(dev, driver->num);
+               driver->ttys = NULL;
+               driver->termios = driver->termios_locked = NULL;
+               kfree(p);
+               return error;
+       }
+
+       if (!driver->put_char)
+               driver->put_char = tty_default_put_char;
+       
+       list_add(&driver->tty_drivers, &tty_drivers);
+       
+       if ( !(driver->flags & TTY_DRIVER_NO_DEVFS) ) {
+               for(i = 0; i < driver->num; i++)
+                   tty_register_device(driver, i, NULL);
+       }
+       proc_tty_register_driver(driver);
+       return 0;
+}
+
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+       int i;
+       struct termios *tp;
+       void *p;
+
+       if (driver->refcount)
+               return -EBUSY;
+
+       unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+                               driver->num);
+
+       list_del(&driver->tty_drivers);
+
+       /*
+        * Free the termios and termios_locked structures because
+        * we don't want to get memory leaks when modular tty
+        * drivers are removed from the kernel.
+        */
+       for (i = 0; i < driver->num; i++) {
+               tp = driver->termios[i];
+               if (tp) {
+                       driver->termios[i] = NULL;
+                       kfree(tp);
+               }
+               tp = driver->termios_locked[i];
+               if (tp) {
+                       driver->termios_locked[i] = NULL;
+                       kfree(tp);
+               }
+               if (!(driver->flags & TTY_DRIVER_NO_DEVFS))
+                       tty_unregister_device(driver, i);
+       }
+       p = driver->ttys;
+       proc_tty_unregister_driver(driver);
+       driver->ttys = NULL;
+       driver->termios = driver->termios_locked = NULL;
+       kfree(p);
+       cdev_del(&driver->cdev);
+       return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+
+/*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+ * Just do some early initializations, and do the complex setup
+ * later.
+ */
+void __init console_init(void)
+{
+       initcall_t *call;
+
+       /* Setup the default TTY line discipline. */
+       (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
+
+       /*
+        * set up the console device so that later boot sequences can 
+        * inform about problems etc..
+        */
+#ifdef CONFIG_EARLY_PRINTK
+       disable_early_printk();
+#endif
+#ifdef CONFIG_SERIAL_68360
+       /* This is not a console initcall. I know not what it's doing here.
+          So I haven't moved it. dwmw2 */
+        rs_360_init();
+#endif
+       call = &__con_initcall_start;
+       while (call < &__con_initcall_end) {
+               (*call)();
+               call++;
+       }
+}
+
+#ifdef CONFIG_VT
+extern int vty_init(void);
+#endif
+
+static int __init tty_class_init(void)
+{
+       tty_class = class_simple_create(THIS_MODULE, "tty");
+       if (IS_ERR(tty_class))
+               return PTR_ERR(tty_class);
+       return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+
+static struct cdev tty_cdev, console_cdev;
+#ifdef CONFIG_UNIX98_PTYS
+static struct cdev ptmx_cdev;
+#endif
+#ifdef CONFIG_VT
+static struct cdev vc0_cdev;
+#endif
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+static int __init tty_init(void)
+{
+       cdev_init(&tty_cdev, &tty_fops);
+       if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+               panic("Couldn't register /dev/tty driver\n");
+       devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 0), S_IFCHR|S_IRUGO|S_IWUGO, "tty");
+       class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+
+       cdev_init(&console_cdev, &console_fops);
+       if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
+               panic("Couldn't register /dev/console driver\n");
+       devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 1), S_IFCHR|S_IRUSR|S_IWUSR, "console");
+       class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
+
+#ifdef CONFIG_UNIX98_PTYS
+       cdev_init(&ptmx_cdev, &ptmx_fops);
+       if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
+               panic("Couldn't register /dev/ptmx driver\n");
+       devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 2), S_IFCHR|S_IRUGO|S_IWUGO, "ptmx");
+       class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
+#endif
+
+#ifdef CONFIG_VT
+       if (console_use_vt) {
+               cdev_init(&vc0_cdev, &console_fops);
+               if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+                   register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1,
+                                          "/dev/vc/0") < 0)
+                       panic("Couldn't register /dev/tty0 driver\n");
+               devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR,
+                             "vc/0");
+               class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL,
+                                       "tty0");
+
+               vty_init();
+       }
+#endif
+       return 0;
+}
+module_init(tty_init);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/Makefile
new file mode 100644 (file)
index 0000000..e181171
--- /dev/null
@@ -0,0 +1,12 @@
+
+
+obj-y  += console/
+obj-y  += evtchn/
+obj-y  += privcmd/
+obj-y   += balloon/
+
+obj-$(CONFIG_XEN_BLKDEV_BACKEND)       += blkback/
+obj-$(CONFIG_XEN_NETDEV_BACKEND)       += netback/
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND)      += blkfront/
+obj-$(CONFIG_XEN_NETDEV_FRONTEND)      += netfront/
+
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/balloon/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/balloon/Makefile
new file mode 100644 (file)
index 0000000..0e3a348
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y += balloon.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c b/linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c
new file mode 100644 (file)
index 0000000..b9ebda1
--- /dev/null
@@ -0,0 +1,712 @@
+/******************************************************************************
+ * balloon.c
+ *
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <asm-xen/xen_proc.h>
+#else
+#include <asm/xen_proc.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/ctrl_if.h>
+#else
+#include <asm/hypervisor.h>
+#include <asm/ctrl_if.h>
+#endif
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+
+/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
+#define USER_INFLATE_BALLOON  1   /* return mem to hypervisor */
+#define USER_DEFLATE_BALLOON  2   /* claim mem from hypervisor */
+typedef struct user_balloon_op {
+    unsigned int  op;
+    unsigned long size;
+} user_balloon_op_t;
+/* END OF USER DEFINE */
+
+static struct proc_dir_entry *balloon_pde;
+
+unsigned long credit;
+static unsigned long current_pages, most_seen_pages;
+
+/*
+ * Dead entry written into balloon-owned entries in the PMT.
+ * It is deliberately different to INVALID_P2M_ENTRY.
+ */
+#define DEAD 0xdead1234
+
+static inline pte_t *get_ptep(unsigned long addr)
+{
+    pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
+    pgd = pgd_offset_k(addr);
+
+    if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
+
+    pmd = pmd_offset(pgd, addr);
+    if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    ptep = pte_offset_kernel(pmd, addr);
+#else
+    ptep = pte_offset(pmd, addr);
+#endif
+
+    return ptep;
+}
+
+/* Main function for relinquishing memory. */
+static unsigned long inflate_balloon(unsigned long num_pages)
+{
+    unsigned long *parray;
+    unsigned long *currp;
+    unsigned long curraddr;
+    unsigned long ret = 0;
+    unsigned long i, j;
+
+    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
+    if ( parray == NULL )
+    {
+        printk(KERN_ERR "inflate_balloon: Unable to vmalloc parray\n");
+        return -EFAULT;
+    }
+
+    currp = parray;
+
+    for ( i = 0; i < num_pages; i++, currp++ )
+    {
+        struct page *page = alloc_page(GFP_HIGHUSER);
+        unsigned long pfn = page - mem_map;
+
+        /* If allocation fails then free all reserved pages. */
+        if ( page == NULL )
+        {
+            printk(KERN_ERR "Unable to inflate balloon by %ld, only"
+                   " %ld pages free.", num_pages, i);
+            currp = parray;
+            for ( j = 0; j < i; j++, currp++ )
+                __free_page((struct page *) (mem_map + *currp));
+            ret = -EFAULT;
+            goto cleanup;
+        }
+
+        *currp = pfn;
+    }
+
+
+    for ( i = 0, currp = parray; i < num_pages; i++, currp++ )
+    {
+        unsigned long mfn = phys_to_machine_mapping[*currp];
+        curraddr = (unsigned long)page_address(mem_map + *currp);
+        /* Blow away page contents for security, and also p.t. ref if any. */
+        if ( curraddr != 0 )
+        {
+            scrub_pages(curraddr, 1);
+            queue_l1_entry_update(get_ptep(curraddr), 0);
+        }
+#ifdef CONFIG_XEN_SCRUB_PAGES
+        else
+        {
+            void *p = kmap(&mem_map[*currp]);
+            scrub_pages(p, 1);
+            kunmap(&mem_map[*currp]);
+        }
+#endif
+        phys_to_machine_mapping[*currp] = DEAD;
+        *currp = mfn;
+    }
+
+    /* Flush updates through and flush the TLB. */
+    xen_tlb_flush();
+
+    ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
+                                parray, num_pages, 0);
+    if ( unlikely(ret != num_pages) )
+    {
+        printk(KERN_ERR "Unable to inflate balloon, error %lx\n", ret);
+        goto cleanup;
+    }
+
+    credit += num_pages;
+    ret = num_pages;
+
+ cleanup:
+    vfree(parray);
+
+    return ret;
+}
+
+/*
+ * Install new mem pages obtained by deflate_balloon. function walks 
+ * phys->machine mapping table looking for DEAD entries and populates
+ * them.
+ */
+static unsigned long process_returned_pages(unsigned long * parray, 
+                                       unsigned long num)
+{
+    /* currently, this function is rather simplistic as 
+     * it is assumed that domain reclaims only number of 
+     * pages previously released. this is to change soon
+     * and the code to extend page tables etc. will be 
+     * incorporated here.
+     */
+     
+    unsigned long tot_pages = most_seen_pages;   
+    unsigned long * curr = parray;
+    unsigned long num_installed;
+    unsigned long i;
+
+    num_installed = 0;
+    for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
+    {
+        if ( phys_to_machine_mapping[i] == DEAD )
+        {
+            phys_to_machine_mapping[i] = *curr;
+            queue_machphys_update(*curr, i);
+            if (i<max_low_pfn)
+              queue_l1_entry_update(
+                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
+                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+
+            __free_page(mem_map + i);
+
+            curr++;
+            num_installed++;
+        }
+    }
+
+    return num_installed;
+}
+
+unsigned long deflate_balloon(unsigned long num_pages)
+{
+    unsigned long ret;
+    unsigned long * parray;
+
+    if ( num_pages > credit )
+    {
+        printk(KERN_ERR "deflate_balloon: %lu pages > %lu credit.\n",
+               num_pages, credit);
+        return -EAGAIN;
+    }
+
+    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
+    if ( parray == NULL )
+    {
+        printk(KERN_ERR "deflate_balloon: Unable to vmalloc parray\n");
+        return 0;
+    }
+
+    ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
+                                parray, num_pages, 0);
+    if ( unlikely(ret != num_pages) )
+    {
+        printk(KERN_ERR "deflate_balloon: xen increase_reservation err %lx\n",
+               ret);
+        goto cleanup;
+    }
+
+    if ( (ret = process_returned_pages(parray, num_pages)) < num_pages )
+    {
+        printk(KERN_WARNING
+               "deflate_balloon: restored only %lx of %lx pages.\n",
+           ret, num_pages);
+        goto cleanup;
+    }
+
+    ret = num_pages;
+    credit -= num_pages;
+
+ cleanup:
+    vfree(parray);
+
+    return ret;
+}
+
+#define PAGE_TO_MB_SHIFT 8
+
+/*
+ * pagetable_extend() mimics pagetable_init() from arch/xen/mm/init.c 
+ * The loops do go through all of low memory (ZONE_NORMAL).  The
+ * old pages have _PAGE_PRESENT set and so get skipped.
+ * If low memory is not full, the new pages are used to fill it, going
+ * from cur_low_pfn to low_pfn.   high memory is not direct mapped so
+ * no extension is needed for new high memory.
+ */
+
+static void pagetable_extend (int cur_low_pfn, int newpages)
+{
+    unsigned long vaddr, end;
+    pgd_t *kpgd, *pgd, *pgd_base;
+    int i, j, k;
+    pmd_t *kpmd, *pmd;
+    pte_t *kpte, *pte, *pte_base;
+    int low_pfn = min(cur_low_pfn+newpages,(int)max_low_pfn);
+
+    /*
+     * This can be zero as well - no problem, in that case we exit
+     * the loops anyway due to the PTRS_PER_* conditions.
+     */
+    end = (unsigned long)__va(low_pfn*PAGE_SIZE);
+
+    pgd_base = init_mm.pgd;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    i = pgd_index(PAGE_OFFSET);
+#else
+    i = __pgd_offset(PAGE_OFFSET);
+#endif
+    pgd = pgd_base + i;
+
+    for (; i < PTRS_PER_PGD; pgd++, i++) {
+        vaddr = i*PGDIR_SIZE;
+        if (end && (vaddr >= end))
+            break;
+        pmd = (pmd_t *)pgd;
+        for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
+            vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
+            if (end && (vaddr >= end))
+                break;
+
+            /* Filled in for us already? */
+            if ( pmd_val(*pmd) & _PAGE_PRESENT )
+                continue;
+
+            pte_base = pte = (pte_t *) __get_free_page(GFP_KERNEL);
+
+            for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
+                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
+                if (end && (vaddr >= end))
+                    break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+                *pte = mk_pte(virt_to_page(vaddr), PAGE_KERNEL);
+#else
+               *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
+#endif
+            }
+            kpgd = pgd_offset_k((unsigned long)pte_base);
+            kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+            kpte = pte_offset_kernel(kpmd, (unsigned long)pte_base);
+#else
+           kpte = pte_offset(kpmd, (unsigned long)pte_base);
+#endif
+            queue_l1_entry_update(kpte,
+                                  (*(unsigned long *)kpte)&~_PAGE_RW);
+            set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
+            XEN_flush_page_update_queue();
+        }
+    }
+}
+
+/*
+ * claim_new_pages() asks xen to increase this domain's memory  reservation
+ * and return a list of the new pages of memory.  This new pages are
+ * added to the free list of the memory manager.
+ *
+ * Available RAM does not normally change while Linux runs.  To make this work,
+ * the linux mem= boottime command line param must say how big memory could
+ * possibly grow.  Then setup_arch() in arch/xen/kernel/setup.c
+ * sets max_pfn, max_low_pfn and the zones according to
+ * this max memory size.   The page tables themselves can only be
+ * extended after xen has assigned new pages to this domain.
+ */
+
+static unsigned long
+claim_new_pages(unsigned long num_pages)
+{
+    unsigned long new_page_cnt, pfn;
+    unsigned long * parray, *curr;
+
+    if (most_seen_pages+num_pages> max_pfn)
+        num_pages = max_pfn-most_seen_pages;
+    if (num_pages==0) return -EINVAL;
+
+    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
+    if ( parray == NULL )
+    {
+        printk(KERN_ERR "claim_new_pages: Unable to vmalloc parray\n");
+        return 0;
+    }
+
+    new_page_cnt = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
+                                parray, num_pages, 0);
+    if ( new_page_cnt != num_pages )
+    {
+        printk(KERN_WARNING
+            "claim_new_pages: xen granted only %lu of %lu requested pages\n",
+            new_page_cnt, num_pages);
+
+        /* 
+         * Avoid xen lockup when user forgot to setdomainmaxmem. Xen
+         * usually can dribble out a few pages and then hangs.
+         */
+        if ( new_page_cnt < 1000 )
+        {
+            printk(KERN_WARNING "Remember to use setdomainmaxmem\n");
+            HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
+                                parray, new_page_cnt, 0);
+            return -EFAULT;
+        }
+    }
+    memcpy(phys_to_machine_mapping+most_seen_pages, parray,
+           new_page_cnt * sizeof(unsigned long));
+
+    pagetable_extend(most_seen_pages,new_page_cnt);
+
+    for ( pfn = most_seen_pages, curr = parray;
+          pfn < most_seen_pages+new_page_cnt;
+          pfn++, curr++ )
+    {
+        struct page *page = mem_map + pfn;
+
+#ifndef CONFIG_HIGHMEM
+        if ( pfn>=max_low_pfn )
+        {
+            printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+               pfn>>PAGE_TO_MB_SHIFT);
+            printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+            break;
+        }
+#endif
+        queue_machphys_update(*curr, pfn);
+        if ( pfn < max_low_pfn )
+            queue_l1_entry_update(
+                get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
+                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+        
+        XEN_flush_page_update_queue();
+        
+        /* this next bit mimics arch/xen/mm/init.c:one_highpage_init() */
+        ClearPageReserved(page);
+        if ( pfn >= max_low_pfn )
+            set_bit(PG_highmem, &page->flags);
+        set_page_count(page, 1);
+        __free_page(page);
+    }
+
+    vfree(parray);
+
+    return new_page_cnt;
+}
+
+
+static int balloon_try_target(int target)
+{
+    int change, reclaim;
+
+    if ( target < current_pages )
+    {
+        int change = inflate_balloon(current_pages-target);
+        if ( change <= 0 )
+            return change;
+
+        current_pages -= change;
+        printk(KERN_INFO "Relinquish %dMB to xen. Domain now has %luMB\n",
+            change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
+    }
+    else if ( target > current_pages )
+    {
+        reclaim = min((unsigned long)target,most_seen_pages) - current_pages;
+
+        if ( reclaim )
+        {
+            change = deflate_balloon( reclaim );
+            if ( change <= 0 )
+                return change;
+            current_pages += change;
+            printk(KERN_INFO "Reclaim %dMB from xen. Domain now has %luMB\n",
+                change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
+        }
+
+        if ( most_seen_pages < target )
+        {
+            int growth = claim_new_pages(target-most_seen_pages);
+            if ( growth <= 0 )
+                return growth;
+            most_seen_pages += growth;
+            current_pages += growth;
+            printk(KERN_INFO "Granted %dMB new mem. Dom now has %luMB\n",
+                growth>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
+        }
+    }
+
+    return 1;
+}
+
+
+static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    switch ( msg->subtype )
+    {
+    case CMSG_MEM_REQUEST_SET:
+        if ( msg->length != sizeof(mem_request_t) )
+            goto parse_error;
+        {
+            mem_request_t *req = (mem_request_t *)&msg->msg[0];
+            req->status = balloon_try_target(req->target);
+        }
+        break;        
+    default:
+        goto parse_error;
+    }
+
+    ctrl_if_send_response(msg);
+    return;
+
+ parse_error:
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static int balloon_write(struct file *file, const char *buffer,
+                         size_t count, loff_t *offp)
+{
+    char memstring[64], *endchar;
+    int len, i;
+    unsigned long target;
+    unsigned long long targetbytes;
+
+    /* Only admin can play with the balloon :) */
+    if ( !capable(CAP_SYS_ADMIN) )
+        return -EPERM;
+
+    if ( count > sizeof(memstring) )
+        return -EFBIG;
+
+    len = strnlen_user(buffer, count);
+    if ( len == 0 ) return -EBADMSG;
+    if ( len == 1 ) return 1; /* input starts with a NUL char */
+    if ( strncpy_from_user(memstring, buffer, len) < 0 )
+        return -EFAULT;
+
+    endchar = memstring;
+    for ( i = 0; i < len; ++i, ++endchar )
+        if ( (memstring[i] < '0') || (memstring[i] > '9') )
+            break;
+    if ( i == 0 )
+        return -EBADMSG;
+
+    targetbytes = memparse(memstring,&endchar);
+    target = targetbytes >> PAGE_SHIFT;
+
+    i = balloon_try_target(target);
+
+    if ( i <= 0 ) return i;
+
+    *offp += len;
+    return len;
+}
+
+static int balloon_read(struct file *filp, char *buffer,
+                        size_t count, loff_t *offp)
+{
+    static char priv_buf[32];
+    char *priv_bufp = priv_buf;
+    int len;
+    len = sprintf(priv_buf,"%lu\n",current_pages<<PAGE_SHIFT);
+
+    len -= *offp;
+    priv_bufp += *offp;
+    if (len>count) len = count;
+    if (len<0) len = 0;
+
+    copy_to_user(buffer, priv_bufp, len);
+
+    *offp += len;
+    return len;
+}
+
+static struct file_operations balloon_fops = {
+    .read  = balloon_read,
+    .write = balloon_write
+};
+#else
+
+
+static int balloon_write(struct file *file, const char *buffer,
+                         u_long count, void *data)
+{
+    char memstring[64], *endchar;
+    int len, i;
+    unsigned long target;
+    unsigned long long targetbytes;
+
+    /* Only admin can play with the balloon :) */
+    if ( !capable(CAP_SYS_ADMIN) )
+        return -EPERM;
+
+    if ( count > sizeof(memstring) )
+        return -EFBIG;
+
+    len = strnlen_user(buffer, count);
+    if ( len == 0 ) return -EBADMSG;
+    if ( len == 1 ) return 1; /* input starts with a NUL char */
+    if ( strncpy_from_user(memstring, buffer, len) < 0 )
+        return -EFAULT;
+
+    endchar = memstring;
+    for ( i = 0; i < len; ++i, ++endchar )
+        if ( (memstring[i] < '0') || (memstring[i] > '9') )
+            break;
+    if ( i == 0 )
+        return -EBADMSG;
+
+    targetbytes = memparse(memstring,&endchar);
+    target = targetbytes >> PAGE_SHIFT;
+
+    if ( target < current_pages )
+    {
+        int change = inflate_balloon(current_pages-target);
+        if ( change <= 0 )
+            return change;
+
+        current_pages -= change;
+        printk(KERN_INFO "Relinquish %dMB to xen. Domain now has %luMB\n",
+            change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
+    }
+    else if ( target > current_pages )
+    {
+        int change, reclaim = min(target,most_seen_pages) - current_pages;
+
+        if ( reclaim )
+        {
+            change = deflate_balloon( reclaim);
+            if ( change <= 0 )
+                return change;
+            current_pages += change;
+            printk(KERN_INFO "Reclaim %dMB from xen. Domain now has %luMB\n",
+                change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
+        }
+
+        if ( most_seen_pages < target )
+        {
+            int growth = claim_new_pages(target-most_seen_pages);
+            if ( growth <= 0 )
+                return growth;
+            most_seen_pages += growth;
+            current_pages += growth;
+            printk(KERN_INFO "Granted %dMB new mem. Dom now has %luMB\n",
+                growth>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
+        }
+    }
+
+
+    return len;
+}
+
+static int balloon_read(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+  int len;
+  len = sprintf(page,"%lu\n",current_pages<<PAGE_SHIFT);
+  
+  if (len <= off+count) *eof = 1;
+  *start = page + off;
+  len -= off;
+  if (len>count) len = count;
+  if (len<0) len = 0;
+  return len;
+}
+
+
+
+#endif
+
+
+static int __init balloon_init(void)
+{
+    printk(KERN_ALERT "Starting Xen Balloon driver\n");
+
+    most_seen_pages = current_pages = min(xen_start_info.nr_pages,max_pfn);
+    if ( (balloon_pde = create_xen_proc_entry("memory_target", 0644)) == NULL )
+    {
+        printk(KERN_ALERT "Unable to create balloon driver proc entry!");
+        return -1;
+    }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    balloon_pde->owner     = THIS_MODULE;
+    balloon_pde->nlink     = 1;
+    balloon_pde->proc_fops = &balloon_fops;
+#else
+    balloon_pde->write_proc = balloon_write;
+    balloon_pde->read_proc  = balloon_read;
+#endif
+
+    (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx,
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+
+    /* 
+     * make_module a new phys map if mem= says xen can give us memory  to grow
+     */
+    if ( max_pfn > xen_start_info.nr_pages )
+    {
+        extern unsigned long *phys_to_machine_mapping;
+        unsigned long *newmap;
+        newmap = (unsigned long *)vmalloc(max_pfn * sizeof(unsigned long));
+        memset(newmap, ~0, max_pfn * sizeof(unsigned long));
+        memcpy(newmap, phys_to_machine_mapping,
+               xen_start_info.nr_pages * sizeof(unsigned long));
+        phys_to_machine_mapping = newmap;
+    }
+
+    return 0;
+}
+
+static void __exit balloon_cleanup(void)
+{
+    if ( balloon_pde != NULL )
+    {
+        remove_xen_proc_entry("memory_target");
+        balloon_pde = NULL;
+    }
+}
+
+module_init(balloon_init);
+module_exit(balloon_cleanup);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkback/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/blkback/Makefile
new file mode 100644 (file)
index 0000000..a27fe65
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := blkback.o control.o interface.o vbd.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkback/blkback.c b/linux-2.6.9-xen-sparse/drivers/xen/blkback/blkback.c
new file mode 100644 (file)
index 0000000..6d20102
--- /dev/null
@@ -0,0 +1,586 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/main.c
+ * 
+ * Back-end of the driver for virtual block devices. This portion of the
+ * driver exports a 'unified' block-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A 
+ * reference front-end implementation can be found in:
+ *  arch/xen/drivers/blkif/frontend
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ */
+
+#include "common.h"
+
+/*
+ * These are rather arbitrary. They are fairly large because adjacent requests
+ * pulled from a communication ring are quite likely to end up being part of
+ * the same scatter/gather request at the disc.
+ * 
+ * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW **
+ * This will increase the chances of being able to write whole tracks.
+ * 64 should be enough to keep us competitive with Linux.
+ */
+#define MAX_PENDING_REQS 64
+#define BATCH_PER_DOMAIN 16
+
+static unsigned long mmap_vstart;
+#define MMAP_PAGES_PER_REQUEST \
+    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
+#define MMAP_PAGES             \
+    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg)                        \
+    (mmap_vstart +                                   \
+     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+     ((_seg) * PAGE_SIZE))
+
+/*
+ * Each outstanding request that we've passed to the lower device layers has a 
+ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
+ * the pendcnt towards zero. When it hits zero, the specified domain has a 
+ * response queued for it, with the saved 'id' passed back.
+ */
+typedef struct {
+    blkif_t       *blkif;
+    unsigned long  id;
+    int            nr_pages;
+    atomic_t       pendcnt;
+    unsigned short operation;
+    int            status;
+} pending_req_t;
+
+/*
+ * We can't allocate pending_req's in order, since they may complete out of 
+ * order. We therefore maintain an allocation ring. This ring also indicates 
+ * when enough work has been passed down -- at that point the allocation ring 
+ * will be empty.
+ */
+static pending_req_t pending_reqs[MAX_PENDING_REQS];
+static unsigned char pending_ring[MAX_PENDING_REQS];
+static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
+/* NB. We use a different index type to differentiate from shared blk rings. */
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static kmem_cache_t *buffer_head_cachep;
+#endif
+
+static int do_block_io_op(blkif_t *blkif, int max_to_do);
+static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
+static void make_response(blkif_t *blkif, unsigned long id, 
+                          unsigned short op, int st);
+
+static void fast_flush_area(int idx, int nr_pages)
+{
+    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+    int               i;
+
+    for ( i = 0; i < nr_pages; i++ )
+    {
+        mcl[i].op = __HYPERVISOR_update_va_mapping;
+        mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
+        mcl[i].args[1] = 0;
+        mcl[i].args[2] = 0;
+    }
+
+    mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
+    if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
+        BUG();
+}
+
+
+/******************************************************************
+ * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
+ */
+
+static struct list_head blkio_schedule_list;
+static spinlock_t blkio_schedule_list_lock;
+
+static int __on_blkdev_list(blkif_t *blkif)
+{
+    return blkif->blkdev_list.next != NULL;
+}
+
+static void remove_from_blkdev_list(blkif_t *blkif)
+{
+    unsigned long flags;
+    if ( !__on_blkdev_list(blkif) ) return;
+    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+    if ( __on_blkdev_list(blkif) )
+    {
+        list_del(&blkif->blkdev_list);
+        blkif->blkdev_list.next = NULL;
+        blkif_put(blkif);
+    }
+    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+}
+
+static void add_to_blkdev_list_tail(blkif_t *blkif)
+{
+    unsigned long flags;
+    if ( __on_blkdev_list(blkif) ) return;
+    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
+    {
+        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+        blkif_get(blkif);
+    }
+    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+}
+
+
+/******************************************************************
+ * SCHEDULER FUNCTIONS
+ */
+
+static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
+
+static int blkio_schedule(void *arg)
+{
+    DECLARE_WAITQUEUE(wq, current);
+
+    blkif_t          *blkif;
+    struct list_head *ent;
+
+    daemonize(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+        "xenblkd"
+#endif
+        );
+
+    for ( ; ; )
+    {
+        /* Wait for work to do. */
+        add_wait_queue(&blkio_schedule_wait, &wq);
+        set_current_state(TASK_INTERRUPTIBLE);
+        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
+             list_empty(&blkio_schedule_list) )
+            schedule();
+        __set_current_state(TASK_RUNNING);
+        remove_wait_queue(&blkio_schedule_wait, &wq);
+
+        /* Queue up a batch of requests. */
+        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+                !list_empty(&blkio_schedule_list) )
+        {
+            ent = blkio_schedule_list.next;
+            blkif = list_entry(ent, blkif_t, blkdev_list);
+            blkif_get(blkif);
+            remove_from_blkdev_list(blkif);
+            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
+                add_to_blkdev_list_tail(blkif);
+            blkif_put(blkif);
+        }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+        /* Push the batch through to disc. */
+        run_task_queue(&tq_disk);
+#endif
+    }
+}
+
+static void maybe_trigger_blkio_schedule(void)
+{
+    /*
+     * Needed so that two processes, who together make the following predicate
+     * true, don't both read stale values and evaluate the predicate
+     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
+     */
+    smp_mb();
+
+    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+         !list_empty(&blkio_schedule_list) )
+        wake_up(&blkio_schedule_wait);
+}
+
+
+
+/******************************************************************
+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
+ */
+
+static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
+{
+    unsigned long flags;
+
+    /* An error fails the entire request. */
+    if ( !uptodate )
+    {
+        DPRINTK("Buffer not up-to-date at end of operation\n");
+        pending_req->status = BLKIF_RSP_ERROR;
+    }
+
+    if ( atomic_dec_and_test(&pending_req->pendcnt) )
+    {
+        int pending_idx = pending_req - pending_reqs;
+        fast_flush_area(pending_idx, pending_req->nr_pages);
+        make_response(pending_req->blkif, pending_req->id,
+                      pending_req->operation, pending_req->status);
+        blkif_put(pending_req->blkif);
+        spin_lock_irqsave(&pend_prod_lock, flags);
+        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+        spin_unlock_irqrestore(&pend_prod_lock, flags);
+        maybe_trigger_blkio_schedule();
+    }
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static void end_block_io_op(struct buffer_head *bh, int uptodate)
+{
+    __end_block_io_op(bh->b_private, uptodate);
+    kmem_cache_free(buffer_head_cachep, bh);
+}
+#else
+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
+{
+    if ( done || error )
+        __end_block_io_op(bio->bi_private, (done && !error));
+    bio_put(bio);
+    return error;
+}
+#endif
+
+
+/******************************************************************************
+ * NOTIFICATION FROM GUEST OS.
+ */
+
+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+    blkif_t *blkif = dev_id;
+    add_to_blkdev_list_tail(blkif);
+    maybe_trigger_blkio_schedule();
+    return IRQ_HANDLED;
+}
+
+
+
+/******************************************************************
+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
+ */
+
+static int do_block_io_op(blkif_t *blkif, int max_to_do)
+{
+    blkif_ring_t *blk_ring = blkif->blk_ring_base;
+    blkif_request_t *req;
+    BLKIF_RING_IDX i, rp;
+    int more_to_do = 0;
+
+    rp = blk_ring->req_prod;
+    rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+    /* Take items off the comms ring, taking care not to overflow. */
+    for ( i = blkif->blk_req_cons; 
+          (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
+          i++ )
+    {
+        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
+        {
+            more_to_do = 1;
+            break;
+        }
+        
+        req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
+        switch ( req->operation )
+        {
+        case BLKIF_OP_READ:
+        case BLKIF_OP_WRITE:
+            dispatch_rw_block_io(blkif, req);
+            break;
+
+        case BLKIF_OP_PROBE:
+            dispatch_probe(blkif, req);
+            break;
+
+        default:
+            DPRINTK("error: unknown block io operation [%d]\n",
+                    blk_ring->ring[i].req.operation);
+            make_response(blkif, blk_ring->ring[i].req.id, 
+                          blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
+            break;
+        }
+    }
+
+    blkif->blk_req_cons = i;
+    return more_to_do;
+}
+
+static void dispatch_probe(blkif_t *blkif, blkif_request_t *req)
+{
+    int rsp = BLKIF_RSP_ERROR;
+    int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+    /* We expect one buffer only. */
+    if ( unlikely(req->nr_segments != 1) )
+        goto out;
+
+    /* Make sure the buffer is page-sized. */
+    if ( (blkif_first_sect(req->frame_and_sects[0]) != 0) ||
+         (blkif_last_sect(req->frame_and_sects[0]) != 7) )
+        goto out;
+
+    if ( HYPERVISOR_update_va_mapping_otherdomain(
+        MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
+        (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
+        0, blkif->domid) )
+        goto out;
+
+    rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), 
+                    PAGE_SIZE / sizeof(vdisk_t));
+
+ out:
+    fast_flush_area(pending_idx, 1);
+    make_response(blkif, req->id, req->operation, rsp);
+}
+
+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
+{
+    extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
+    int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
+    short nr_sects;
+    unsigned long buffer, fas;
+    int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+    pending_req_t *pending_req;
+    unsigned long  remap_prot;
+    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+
+    /* We map virtual scatter/gather segments to physical segments. */
+    int new_segs, nr_psegs = 0;
+    phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
+
+    /* Check that number of segments is sane. */
+    if ( unlikely(req->nr_segments == 0) || 
+         unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
+    {
+        DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
+        goto bad_descriptor;
+    }
+
+    /*
+     * Check each address/size pair is sane, and convert into a
+     * physical device and block offset. Note that if the offset and size
+     * crosses a virtual extent boundary, we may end up with more
+     * physical scatter/gather segments than virtual segments.
+     */
+    for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
+    {
+        fas      = req->frame_and_sects[i];
+        buffer   = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
+        nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+
+        if ( nr_sects <= 0 )
+            goto bad_descriptor;
+
+        phys_seg[nr_psegs].dev           = req->device;
+        phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
+        phys_seg[nr_psegs].buffer        = buffer;
+        phys_seg[nr_psegs].nr_sects      = nr_sects;
+
+        /* Translate the request into the relevant 'physical device' */
+        new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation);
+        if ( new_segs < 0 )
+        { 
+            DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
+                    operation == READ ? "read" : "write", 
+                    req->sector_number + tot_sects, 
+                    req->sector_number + tot_sects + nr_sects, 
+                    req->device); 
+            goto bad_descriptor;
+        }
+  
+        nr_psegs += new_segs;
+        ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1));
+    }
+
+    /* Nonsensical zero-sized request? */
+    if ( unlikely(nr_psegs == 0) )
+        goto bad_descriptor;
+
+    if ( operation == READ )
+        remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
+    else
+        remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED;
+
+    for ( i = 0; i < nr_psegs; i++ )
+    {
+        mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
+        mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
+        mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
+        mcl[i].args[2] = 0;
+        mcl[i].args[3] = blkif->domid;
+
+        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+            FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
+    }
+
+    if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
+        BUG();
+
+    for ( i = 0; i < nr_psegs; i++ )
+    {
+        if ( unlikely(mcl[i].args[5] != 0) )
+        {
+            DPRINTK("invalid buffer -- could not remap it\n");
+            fast_flush_area(pending_idx, nr_psegs);
+            goto bad_descriptor;
+        }
+    }
+
+    pending_req = &pending_reqs[pending_idx];
+    pending_req->blkif     = blkif;
+    pending_req->id        = req->id;
+    pending_req->operation = operation;
+    pending_req->status    = BLKIF_RSP_OKAY;
+    pending_req->nr_pages  = nr_psegs;
+    atomic_set(&pending_req->pendcnt, nr_psegs);
+    pending_cons++;
+
+    blkif_get(blkif);
+
+    /* Now we pass each segment down to the real blkdev layer. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    for ( i = 0; i < nr_psegs; i++ )
+    {
+        struct buffer_head *bh;
+
+        bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC);
+        if ( unlikely(bh == NULL) )
+        {
+            __end_block_io_op(pending_req, 0);
+            continue;
+        }
+
+        memset(bh, 0, sizeof (struct buffer_head));
+
+        init_waitqueue_head(&bh->b_wait);
+        bh->b_size          = phys_seg[i].nr_sects << 9;
+        bh->b_dev           = phys_seg[i].dev;
+        bh->b_rdev          = phys_seg[i].dev;
+        bh->b_rsector       = (unsigned long)phys_seg[i].sector_number;
+        bh->b_data          = (char *)MMAP_VADDR(pending_idx, i) +
+            (phys_seg[i].buffer & ~PAGE_MASK);
+        bh->b_page          = virt_to_page(MMAP_VADDR(pending_idx, i));
+        bh->b_end_io        = end_block_io_op;
+        bh->b_private       = pending_req;
+
+        bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | 
+            (1 << BH_Req) | (1 << BH_Launder);
+        if ( operation == WRITE )
+            bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate);
+
+        atomic_set(&bh->b_count, 1);
+
+        /* Dispatch a single request. We'll flush it to disc later. */
+        generic_make_request(operation, bh);
+    }
+#else
+    for ( i = 0; i < nr_psegs; i++ )
+    {
+        struct bio *bio;
+        struct bio_vec *bv;
+
+        bio = bio_alloc(GFP_ATOMIC, 1);
+        if ( unlikely(bio == NULL) )
+        {
+            __end_block_io_op(pending_req, 0);
+            continue;
+        }
+
+        bio->bi_bdev    = phys_seg[i].bdev;
+        bio->bi_private = pending_req;
+        bio->bi_end_io  = end_block_io_op;
+        bio->bi_sector  = phys_seg[i].sector_number;
+        bio->bi_rw      = operation;
+
+        bv = bio_iovec_idx(bio, 0);
+        bv->bv_page   = virt_to_page(MMAP_VADDR(pending_idx, i));
+        bv->bv_len    = phys_seg[i].nr_sects << 9;
+        bv->bv_offset = phys_seg[i].buffer & ~PAGE_MASK;
+
+        bio->bi_size    = bv->bv_len;
+        bio->bi_vcnt++;
+
+        submit_bio(operation, bio);
+    }
+#endif
+
+    return;
+
+ bad_descriptor:
+    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+} 
+
+
+
+/******************************************************************
+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
+ */
+
+
+static void make_response(blkif_t *blkif, unsigned long id, 
+                          unsigned short op, int st)
+{
+    blkif_response_t *resp;
+    unsigned long     flags;
+
+    /* Place on the response ring for the relevant domain. */ 
+    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+    resp = &blkif->blk_ring_base->
+        ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
+    resp->id        = id;
+    resp->operation = op;
+    resp->status    = st;
+    wmb(); /* Ensure other side can see the response fields. */
+    blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
+    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+    /* Kick the relevant domain. */
+    notify_via_evtchn(blkif->evtchn);
+}
+
+void blkif_deschedule(blkif_t *blkif)
+{
+    remove_from_blkdev_list(blkif);
+}
+
+static int __init blkif_init(void)
+{
+    int i;
+
+    if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
+         !(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+        return 0;
+
+    blkif_interface_init();
+
+    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
+        BUG();
+
+    pending_cons = 0;
+    pending_prod = MAX_PENDING_REQS;
+    memset(pending_reqs, 0, sizeof(pending_reqs));
+    for ( i = 0; i < MAX_PENDING_REQS; i++ )
+        pending_ring[i] = i;
+    
+    spin_lock_init(&blkio_schedule_list_lock);
+    INIT_LIST_HEAD(&blkio_schedule_list);
+
+    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
+        BUG();
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    buffer_head_cachep = kmem_cache_create(
+        "buffer_head_cache", sizeof(struct buffer_head),
+        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+#endif
+
+    blkif_ctrlif_init();
+
+    return 0;
+}
+
+__initcall(blkif_init);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkback/common.h b/linux-2.6.9-xen-sparse/drivers/xen/blkback/common.h
new file mode 100644 (file)
index 0000000..0fa60cd
--- /dev/null
@@ -0,0 +1,120 @@
+
+#ifndef __BLKIF__BACKEND__COMMON_H__
+#define __BLKIF__BACKEND__COMMON_H__
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/hypervisor-ifs/io/blkif.h>
+
+#if 0
+#define ASSERT(_p) \
+    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+    __LINE__, __FILE__); *(int*)0=0; }
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+                           __FILE__ , __LINE__ , ## _a )
+#else
+#define ASSERT(_p) ((void)0)
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+typedef struct rb_root rb_root_t;
+typedef struct rb_node rb_node_t;
+#else
+struct block_device;
+#endif
+
+typedef struct blkif_st {
+    /* Unique identifier for this interface. */
+    domid_t          domid;
+    unsigned int     handle;
+    /* Physical parameters of the comms window. */
+    unsigned long    shmem_frame;
+    unsigned int     evtchn;
+    int              irq;
+    /* Comms information. */
+    blkif_ring_t    *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
+    BLKIF_RING_IDX     blk_req_cons;  /* Request consumer. */
+    BLKIF_RING_IDX     blk_resp_prod; /* Private version of resp. producer. */
+    /* VBDs attached to this interface. */
+    rb_root_t        vbd_rb;        /* Mapping from 16-bit vdevices to VBDs. */
+    spinlock_t       vbd_lock;      /* Protects VBD mapping. */
+    /* Private fields. */
+    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+    /*
+     * DISCONNECT response is deferred until pending requests are ack'ed.
+     * We therefore need to store the id from the original request.
+     */
+    u8               disconnect_rspid;
+    struct blkif_st *hash_next;
+    struct list_head blkdev_list;
+    spinlock_t       blk_ring_lock;
+    atomic_t         refcnt;
+
+    struct work_struct work;
+} blkif_t;
+
+void blkif_create(blkif_be_create_t *create);
+void blkif_destroy(blkif_be_destroy_t *destroy);
+void blkif_connect(blkif_be_connect_t *connect);
+int  blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id);
+void blkif_disconnect_complete(blkif_t *blkif);
+blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define blkif_put(_b)                             \
+    do {                                          \
+        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+            blkif_disconnect_complete(_b);        \
+    } while (0)
+
+/* An entry in a list of xen_extents. */
+typedef struct _blkif_extent_le { 
+    blkif_extent_t extent;               /* an individual extent */
+    struct _blkif_extent_le *next;       /* and a pointer to the next */ 
+    struct block_device *bdev;
+} blkif_extent_le_t; 
+
+typedef struct _vbd { 
+    blkif_vdev_t       vdevice;   /* what the domain refers to this vbd as */
+    unsigned char      readonly;  /* Non-zero -> read-only */
+    unsigned char      type;      /* VDISK_TYPE_xxx */
+    blkif_extent_le_t *extents;   /* list of xen_extents making up this vbd */
+    rb_node_t          rb;        /* for linking into R-B tree lookup struct */
+} vbd_t; 
+
+void vbd_create(blkif_be_vbd_create_t *create); 
+void vbd_grow(blkif_be_vbd_grow_t *grow); 
+void vbd_shrink(blkif_be_vbd_shrink_t *shrink);
+void vbd_destroy(blkif_be_vbd_destroy_t *delete); 
+int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds);
+void destroy_all_vbds(blkif_t *blkif);
+
+/* Describes a [partial] disk extent (part of a block io request) */
+typedef struct {
+    unsigned short       dev;
+    unsigned short       nr_sects;
+    struct block_device *bdev;
+    unsigned long        buffer;
+    blkif_sector_t       sector_number;
+} phys_seg_t;
+
+int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation); 
+
+void blkif_interface_init(void);
+void blkif_ctrlif_init(void);
+
+void blkif_deschedule(blkif_t *blkif);
+
+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+#endif /* __BLKIF__BACKEND__COMMON_H__ */
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkback/control.c b/linux-2.6.9-xen-sparse/drivers/xen/blkback/control.c
new file mode 100644 (file)
index 0000000..b55bae7
--- /dev/null
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/control.c
+ * 
+ * Routines for interfacing with the control plane.
+ * 
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    DPRINTK("Received blkif backend message, subtype=%d\n", msg->subtype);
+    
+    switch ( msg->subtype )
+    {
+    case CMSG_BLKIF_BE_CREATE:
+        if ( msg->length != sizeof(blkif_be_create_t) )
+            goto parse_error;
+        blkif_create((blkif_be_create_t *)&msg->msg[0]);
+        break;        
+    case CMSG_BLKIF_BE_DESTROY:
+        if ( msg->length != sizeof(blkif_be_destroy_t) )
+            goto parse_error;
+        blkif_destroy((blkif_be_destroy_t *)&msg->msg[0]);
+        break;        
+    case CMSG_BLKIF_BE_CONNECT:
+        if ( msg->length != sizeof(blkif_be_connect_t) )
+            goto parse_error;
+        blkif_connect((blkif_be_connect_t *)&msg->msg[0]);
+        break;        
+    case CMSG_BLKIF_BE_DISCONNECT:
+        if ( msg->length != sizeof(blkif_be_disconnect_t) )
+            goto parse_error;
+        if ( !blkif_disconnect((blkif_be_disconnect_t *)&msg->msg[0],msg->id) )
+            return; /* Sending the response is deferred until later. */
+        break;        
+    case CMSG_BLKIF_BE_VBD_CREATE:
+        if ( msg->length != sizeof(blkif_be_vbd_create_t) )
+            goto parse_error;
+        vbd_create((blkif_be_vbd_create_t *)&msg->msg[0]);
+        break;
+    case CMSG_BLKIF_BE_VBD_DESTROY:
+        if ( msg->length != sizeof(blkif_be_vbd_destroy_t) )
+            goto parse_error;
+        vbd_destroy((blkif_be_vbd_destroy_t *)&msg->msg[0]);
+        break;
+    case CMSG_BLKIF_BE_VBD_GROW:
+        if ( msg->length != sizeof(blkif_be_vbd_grow_t) )
+            goto parse_error;
+        vbd_grow((blkif_be_vbd_grow_t *)&msg->msg[0]);
+        break;
+    case CMSG_BLKIF_BE_VBD_SHRINK:
+        if ( msg->length != sizeof(blkif_be_vbd_shrink_t) )
+            goto parse_error;
+        vbd_shrink((blkif_be_vbd_shrink_t *)&msg->msg[0]);
+        break;
+    default:
+        goto parse_error;
+    }
+
+    ctrl_if_send_response(msg);
+    return;
+
+ parse_error:
+    DPRINTK("Parse error while reading message subtype %d, len %d\n",
+            msg->subtype, msg->length);
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+void blkif_ctrlif_init(void)
+{
+    ctrl_msg_t cmsg;
+    blkif_be_driver_status_t st;
+
+    (void)ctrl_if_register_receiver(CMSG_BLKIF_BE, blkif_ctrlif_rx, 
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+
+    /* Send a driver-UP notification to the domain controller. */
+    cmsg.type      = CMSG_BLKIF_BE;
+    cmsg.subtype   = CMSG_BLKIF_BE_DRIVER_STATUS;
+    cmsg.length    = sizeof(blkif_be_driver_status_t);
+    st.status      = BLKIF_DRIVER_STATUS_UP;
+    memcpy(cmsg.msg, &st, sizeof(st));
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkback/interface.c b/linux-2.6.9-xen-sparse/drivers/xen/blkback/interface.c
new file mode 100644 (file)
index 0000000..4196014
--- /dev/null
@@ -0,0 +1,246 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/interface.c
+ * 
+ * Block-device interface management.
+ * 
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#endif
+
+#define BLKIF_HASHSZ 1024
+#define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1))
+
+static kmem_cache_t *blkif_cachep;
+static blkif_t      *blkif_hash[BLKIF_HASHSZ];
+
+blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle)
+{
+    blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)];
+    while ( (blkif != NULL) && 
+            ((blkif->domid != domid) || (blkif->handle != handle)) )
+        blkif = blkif->hash_next;
+    return blkif;
+}
+
+static void __blkif_disconnect_complete(void *arg)
+{
+    blkif_t              *blkif = (blkif_t *)arg;
+    ctrl_msg_t            cmsg;
+    blkif_be_disconnect_t disc;
+
+    /*
+     * These can't be done in blkif_disconnect() because at that point there
+     * may be outstanding requests at the disc whose asynchronous responses
+     * must still be notified to the remote driver.
+     */
+    unbind_evtchn_from_irq(blkif->evtchn);
+    vfree(blkif->blk_ring_base);
+
+    /* Construct the deferred response message. */
+    cmsg.type         = CMSG_BLKIF_BE;
+    cmsg.subtype      = CMSG_BLKIF_BE_DISCONNECT;
+    cmsg.id           = blkif->disconnect_rspid;
+    cmsg.length       = sizeof(blkif_be_disconnect_t);
+    disc.domid        = blkif->domid;
+    disc.blkif_handle = blkif->handle;
+    disc.status       = BLKIF_BE_STATUS_OKAY;
+    memcpy(cmsg.msg, &disc, sizeof(disc));
+
+    /*
+     * Make sure message is constructed /before/ status change, because
+     * after the status change the 'blkif' structure could be deallocated at
+     * any time. Also make sure we send the response /after/ status change,
+     * as otherwise a subsequent CONNECT request could spuriously fail if
+     * another CPU doesn't see the status change yet.
+     */
+    mb();
+    if ( blkif->status != DISCONNECTING )
+        BUG();
+    blkif->status = DISCONNECTED;
+    mb();
+
+    /* Send the successful response. */
+    ctrl_if_send_response(&cmsg);
+}
+
+void blkif_disconnect_complete(blkif_t *blkif)
+{
+    INIT_WORK(&blkif->work, __blkif_disconnect_complete, (void *)blkif);
+    schedule_work(&blkif->work);
+}
+
+void blkif_create(blkif_be_create_t *create)
+{
+    domid_t       domid  = create->domid;
+    unsigned int  handle = create->blkif_handle;
+    blkif_t     **pblkif, *blkif;
+
+    if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL )
+    {
+        DPRINTK("Could not create blkif: out of memory\n");
+        create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+        return;
+    }
+
+    memset(blkif, 0, sizeof(*blkif));
+    blkif->domid  = domid;
+    blkif->handle = handle;
+    blkif->status = DISCONNECTED;
+    spin_lock_init(&blkif->vbd_lock);
+    spin_lock_init(&blkif->blk_ring_lock);
+    atomic_set(&blkif->refcnt, 0);
+
+    pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+    while ( *pblkif != NULL )
+    {
+        if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) )
+        {
+            DPRINTK("Could not create blkif: already exists\n");
+            create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS;
+            kmem_cache_free(blkif_cachep, blkif);
+            return;
+        }
+        pblkif = &(*pblkif)->hash_next;
+    }
+
+    blkif->hash_next = *pblkif;
+    *pblkif = blkif;
+
+    DPRINTK("Successfully created blkif\n");
+    create->status = BLKIF_BE_STATUS_OKAY;
+}
+
+void blkif_destroy(blkif_be_destroy_t *destroy)
+{
+    domid_t       domid  = destroy->domid;
+    unsigned int  handle = destroy->blkif_handle;
+    blkif_t     **pblkif, *blkif;
+
+    pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+    while ( (blkif = *pblkif) != NULL )
+    {
+        if ( (blkif->domid == domid) && (blkif->handle == handle) )
+        {
+            if ( blkif->status != DISCONNECTED )
+                goto still_connected;
+            goto destroy;
+        }
+        pblkif = &blkif->hash_next;
+    }
+
+    destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+    return;
+
+ still_connected:
+    destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+    return;
+
+ destroy:
+    *pblkif = blkif->hash_next;
+    destroy_all_vbds(blkif);
+    kmem_cache_free(blkif_cachep, blkif);
+    destroy->status = BLKIF_BE_STATUS_OKAY;
+}
+
+void blkif_connect(blkif_be_connect_t *connect)
+{
+    domid_t       domid  = connect->domid;
+    unsigned int  handle = connect->blkif_handle;
+    unsigned int  evtchn = connect->evtchn;
+    unsigned long shmem_frame = connect->shmem_frame;
+    struct vm_struct *vma;
+    pgprot_t      prot;
+    int           error;
+    blkif_t      *blkif;
+
+    blkif = blkif_find_by_handle(domid, handle);
+    if ( unlikely(blkif == NULL) )
+    {
+        DPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n", 
+                connect->domid, connect->blkif_handle); 
+        connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return;
+    }
+
+    if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
+    {
+        connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+        return;
+    }
+
+    prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+    error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
+                                    shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+                                    prot, domid);
+    if ( error != 0 )
+    {
+        if ( error == -ENOMEM )
+            connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+        else if ( error == -EFAULT )
+            connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
+        else
+            connect->status = BLKIF_BE_STATUS_ERROR;
+        vfree(vma->addr);
+        return;
+    }
+
+    if ( blkif->status != DISCONNECTED )
+    {
+        connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+        vfree(vma->addr);
+        return;
+    }
+
+    blkif->evtchn        = evtchn;
+    blkif->irq           = bind_evtchn_to_irq(evtchn);
+    blkif->shmem_frame   = shmem_frame;
+    blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
+    blkif->status        = CONNECTED;
+    blkif_get(blkif);
+
+    request_irq(blkif->irq, blkif_be_int, 0, "blkif-backend", blkif);
+
+    connect->status = BLKIF_BE_STATUS_OKAY;
+}
+
+int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+    domid_t       domid  = disconnect->domid;
+    unsigned int  handle = disconnect->blkif_handle;
+    blkif_t      *blkif;
+
+    blkif = blkif_find_by_handle(domid, handle);
+    if ( unlikely(blkif == NULL) )
+    {
+        DPRINTK("blkif_disconnect attempted for non-existent blkif"
+                " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle); 
+        disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return 1; /* Caller will send response error message. */
+    }
+
+    if ( blkif->status == CONNECTED )
+    {
+        blkif->status = DISCONNECTING;
+        blkif->disconnect_rspid = rsp_id;
+        wmb(); /* Let other CPUs see the status change. */
+        free_irq(blkif->irq, blkif);
+        blkif_deschedule(blkif);
+        blkif_put(blkif);
+        return 0; /* Caller should not send response message. */
+    }
+
+    disconnect->status = BLKIF_BE_STATUS_OKAY;
+    return 1;
+}
+
+void __init blkif_interface_init(void)
+{
+    blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
+                                     0, 0, NULL, NULL);
+    memset(blkif_hash, 0, sizeof(blkif_hash));
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkback/vbd.c b/linux-2.6.9-xen-sparse/drivers/xen/blkback/vbd.c
new file mode 100644 (file)
index 0000000..b530128
--- /dev/null
@@ -0,0 +1,576 @@
+/******************************************************************************
+ * blkback/vbd.c
+ * 
+ * Routines for managing virtual block devices (VBDs).
+ * 
+ * NOTE: vbd_lock protects updates to the rb_tree against concurrent lookups 
+ * in vbd_translate.  All other lookups are implicitly protected because the 
+ * only caller (the control message dispatch routine) serializes the calls.
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ */
+
+#include "common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static dev_t vbd_map_devnum(blkif_pdev_t);
+#endif
+
+void vbd_create(blkif_be_vbd_create_t *create) 
+{
+    vbd_t       *vbd; 
+    rb_node_t  **rb_p, *rb_parent = NULL;
+    blkif_t     *blkif;
+    blkif_vdev_t vdevice = create->vdevice;
+
+    blkif = blkif_find_by_handle(create->domid, create->blkif_handle);
+    if ( unlikely(blkif == NULL) )
+    {
+        DPRINTK("vbd_create attempted for non-existent blkif (%u,%u)\n", 
+                create->domid, create->blkif_handle); 
+        create->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return;
+    }
+
+    rb_p = &blkif->vbd_rb.rb_node;
+    while ( *rb_p != NULL )
+    {
+        rb_parent = *rb_p;
+        vbd = rb_entry(rb_parent, vbd_t, rb);
+        if ( vdevice < vbd->vdevice )
+        {
+            rb_p = &rb_parent->rb_left;
+        }
+        else if ( vdevice > vbd->vdevice )
+        {
+            rb_p = &rb_parent->rb_right;
+        }
+        else
+        {
+            DPRINTK("vbd_create attempted for already existing vbd\n");
+            create->status = BLKIF_BE_STATUS_VBD_EXISTS;
+            return;
+        }
+    }
+
+    if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) )
+    {
+        DPRINTK("vbd_create: out of memory\n");
+        create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+        return;
+    }
+
+    vbd->vdevice  = vdevice; 
+    vbd->readonly = create->readonly;
+    vbd->type     = VDISK_TYPE_DISK | VDISK_FLAG_VIRT;
+    vbd->extents  = NULL; 
+
+    spin_lock(&blkif->vbd_lock);
+    rb_link_node(&vbd->rb, rb_parent, rb_p);
+    rb_insert_color(&vbd->rb, &blkif->vbd_rb);
+    spin_unlock(&blkif->vbd_lock);
+
+    DPRINTK("Successful creation of vdev=%04x (dom=%u)\n",
+            vdevice, create->domid);
+    create->status = BLKIF_BE_STATUS_OKAY;
+}
+
+
+/* Grow a VBD by appending a new extent. Fails if the VBD doesn't exist. */
+void vbd_grow(blkif_be_vbd_grow_t *grow) 
+{
+    blkif_t            *blkif;
+    blkif_extent_le_t **px, *x; 
+    vbd_t              *vbd = NULL;
+    rb_node_t          *rb;
+    blkif_vdev_t        vdevice = grow->vdevice;
+    unsigned long       sz;
+
+    blkif = blkif_find_by_handle(grow->domid, grow->blkif_handle);
+    if ( unlikely(blkif == NULL) )
+    {
+        DPRINTK("vbd_grow attempted for non-existent blkif (%u,%u)\n", 
+                grow->domid, grow->blkif_handle); 
+        grow->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return;
+    }
+
+    rb = blkif->vbd_rb.rb_node;
+    while ( rb != NULL )
+    {
+        vbd = rb_entry(rb, vbd_t, rb);
+        if ( vdevice < vbd->vdevice )
+            rb = rb->rb_left;
+        else if ( vdevice > vbd->vdevice )
+            rb = rb->rb_right;
+        else
+            break;
+    }
+
+    if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
+    {
+        DPRINTK("vbd_grow: attempted to append extent to non-existent VBD.\n");
+        grow->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+        return;
+    } 
+
+    if ( grow->extent.sector_start > 0 )
+    {
+        DPRINTK("vbd_grow: dev %08x start not zero.\n", grow->extent.device);
+        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+        return;
+    }
+
+    if ( unlikely((x = kmalloc(sizeof(blkif_extent_le_t), 
+                               GFP_KERNEL)) == NULL) )
+    {
+        DPRINTK("vbd_grow: out of memory\n");
+        grow->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+        return;
+    }
+
+    x->extent.device        = grow->extent.device;
+    x->extent.sector_start  = grow->extent.sector_start;
+    x->extent.sector_length = grow->extent.sector_length;
+    x->next                 = (blkif_extent_le_t *)NULL;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    x->bdev = open_by_devnum(vbd_map_devnum(x->extent.device),
+                             vbd->readonly ? FMODE_READ : FMODE_WRITE);
+    if ( IS_ERR(x->bdev) )
+    {
+        DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
+        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+        goto out;
+    }
+    /* XXXcl maybe bd_claim? */
+
+    if ( (x->bdev->bd_disk == NULL) )
+    {
+        DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
+        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+        blkdev_put(x->bdev);
+        goto out;
+    }
+
+    /* get size in sectors */   
+    if ( x->bdev->bd_part )
+        sz = x->bdev->bd_part->nr_sects;
+    else
+        sz = x->bdev->bd_disk->capacity;
+
+#else
+    if( !blk_size[MAJOR(x->extent.device)] )
+    {
+        DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
+        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+        goto out;
+    }
+    
+    /* convert blocks (1KB) to sectors */
+    sz = blk_size[MAJOR(x->extent.device)][MINOR(x->extent.device)] * 2;    
+    
+    if ( sz == 0 )
+    {
+        DPRINTK("vbd_grow: device %08x zero size!\n", x->extent.device);
+        grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+        goto out;
+    }
+#endif
+
+    /*
+     * NB. This test assumes sector_start == 0, which is always the case
+     * in Xen 1.3. In fact the whole grow/shrink interface could do with
+     * some simplification.
+     */
+    if ( x->extent.sector_length > sz )
+        x->extent.sector_length = sz;
+    
+    DPRINTK("vbd_grow: requested_len %llu actual_len %lu\n", 
+            x->extent.sector_length, sz);
+
+    for ( px = &vbd->extents; *px != NULL; px = &(*px)->next ) 
+        continue;
+    
+    *px = x; /* ATOMIC: no need for vbd_lock. */
+
+    DPRINTK("Successful grow of vdev=%04x (dom=%u)\n",
+            vdevice, grow->domid);
+    
+    grow->status = BLKIF_BE_STATUS_OKAY;
+    return;
+
+ out:
+    kfree(x);
+}
+
+
+void vbd_shrink(blkif_be_vbd_shrink_t *shrink)
+{
+    blkif_t            *blkif;
+    blkif_extent_le_t **px, *x; 
+    vbd_t              *vbd = NULL;
+    rb_node_t          *rb;
+    blkif_vdev_t        vdevice = shrink->vdevice;
+
+    blkif = blkif_find_by_handle(shrink->domid, shrink->blkif_handle);
+    if ( unlikely(blkif == NULL) )
+    {
+        DPRINTK("vbd_shrink attempted for non-existent blkif (%u,%u)\n", 
+                shrink->domid, shrink->blkif_handle); 
+        shrink->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return;
+    }
+
+    rb = blkif->vbd_rb.rb_node;
+    while ( rb != NULL )
+    {
+        vbd = rb_entry(rb, vbd_t, rb);
+        if ( vdevice < vbd->vdevice )
+            rb = rb->rb_left;
+        else if ( vdevice > vbd->vdevice )
+            rb = rb->rb_right;
+        else
+            break;
+    }
+
+    if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
+    {
+        shrink->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+        return;
+    }
+
+    if ( unlikely(vbd->extents == NULL) )
+    {
+        shrink->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+        return;
+    }
+
+    /* Find the last extent. We now know that there is at least one. */
+    for ( px = &vbd->extents; (*px)->next != NULL; px = &(*px)->next )
+        continue;
+
+    x   = *px;
+    *px = x->next; /* ATOMIC: no need for vbd_lock. */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    blkdev_put(x->bdev);
+#endif
+    kfree(x);
+
+    shrink->status = BLKIF_BE_STATUS_OKAY;
+}
+
+
+void vbd_destroy(blkif_be_vbd_destroy_t *destroy) 
+{
+    blkif_t           *blkif;
+    vbd_t             *vbd;
+    rb_node_t         *rb;
+    blkif_extent_le_t *x, *t;
+    blkif_vdev_t       vdevice = destroy->vdevice;
+
+    blkif = blkif_find_by_handle(destroy->domid, destroy->blkif_handle);
+    if ( unlikely(blkif == NULL) )
+    {
+        DPRINTK("vbd_destroy attempted for non-existent blkif (%u,%u)\n", 
+                destroy->domid, destroy->blkif_handle); 
+        destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return;
+    }
+
+    rb = blkif->vbd_rb.rb_node;
+    while ( rb != NULL )
+    {
+        vbd = rb_entry(rb, vbd_t, rb);
+        if ( vdevice < vbd->vdevice )
+            rb = rb->rb_left;
+        else if ( vdevice > vbd->vdevice )
+            rb = rb->rb_right;
+        else
+            goto found;
+    }
+
+    destroy->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+    return;
+
+ found:
+    spin_lock(&blkif->vbd_lock);
+    rb_erase(rb, &blkif->vbd_rb);
+    spin_unlock(&blkif->vbd_lock);
+
+    x = vbd->extents;
+    kfree(vbd);
+
+    while ( x != NULL )
+    {
+        t = x->next;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+        blkdev_put(x->bdev);
+#endif
+        kfree(x);
+        x = t;
+    }
+}
+
+
+void destroy_all_vbds(blkif_t *blkif)
+{
+    vbd_t             *vbd;
+    rb_node_t         *rb;
+    blkif_extent_le_t *x, *t;
+
+    spin_lock(&blkif->vbd_lock);
+
+    while ( (rb = blkif->vbd_rb.rb_node) != NULL )
+    {
+        vbd = rb_entry(rb, vbd_t, rb);
+
+        rb_erase(rb, &blkif->vbd_rb);
+        x = vbd->extents;
+        kfree(vbd);
+        
+        while ( x != NULL )
+        {
+            t = x->next;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+            blkdev_put(x->bdev);
+#endif
+            kfree(x);
+            x = t;
+        }          
+    }
+
+    spin_unlock(&blkif->vbd_lock);
+}
+
+
+static int vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd)
+{
+    blkif_extent_le_t *x; 
+
+    vbd_info->device = vbd->vdevice; 
+    vbd_info->info   = vbd->type;
+    if ( vbd->readonly )
+        vbd_info->info |= VDISK_FLAG_RO; 
+    vbd_info->capacity = 0ULL;
+    for ( x = vbd->extents; x != NULL; x = x->next )
+        vbd_info->capacity += x->extent.sector_length; 
+        
+    return 0;
+}
+
+
+int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds)
+{
+    int        rc = 0, nr_vbds = 0;
+    rb_node_t *rb;
+
+    spin_lock(&blkif->vbd_lock);
+
+    if ( (rb = blkif->vbd_rb.rb_node) == NULL )
+        goto out;
+
+ new_subtree:
+    /* STEP 1. Find least node (it'll be left-most). */
+    while ( rb->rb_left != NULL )
+        rb = rb->rb_left;
+
+    for ( ; ; )
+    {
+        /* STEP 2. Dealt with left subtree. Now process current node. */
+        if ( (rc = vbd_probe_single(blkif, &vbd_info[nr_vbds], 
+                                    rb_entry(rb, vbd_t, rb))) != 0 )
+            goto out;
+        if ( ++nr_vbds == max_vbds )
+            goto out;
+
+        /* STEP 3. Process right subtree, if any. */
+        if ( rb->rb_right != NULL )
+        {
+            rb = rb->rb_right;
+            goto new_subtree;
+        }
+
+        /* STEP 4. Done both subtrees. Head back through ancesstors. */
+        for ( ; ; ) 
+        {
+            /* We're done when we get back to the root node. */
+            if ( rb->rb_parent == NULL )
+                goto out;
+            /* If we are left of parent, then parent is next to process. */
+            if ( rb->rb_parent->rb_left == rb )
+                break;
+            /* If we are right of parent, then we climb to grandparent. */
+            rb = rb->rb_parent;
+        }
+
+        rb = rb->rb_parent;
+    }
+
+ out:
+    spin_unlock(&blkif->vbd_lock);
+    return (rc == 0) ? nr_vbds : rc;  
+}
+
+
+int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
+{
+    blkif_extent_le_t *x; 
+    vbd_t             *vbd;
+    rb_node_t         *rb;
+    blkif_sector_t     sec_off;
+    unsigned long      nr_secs;
+
+    /* Take the vbd_lock because another thread could be updating the tree. */
+    spin_lock(&blkif->vbd_lock);
+
+    rb = blkif->vbd_rb.rb_node;
+    while ( rb != NULL )
+    {
+        vbd = rb_entry(rb, vbd_t, rb);
+        if ( pseg->dev < vbd->vdevice )
+            rb = rb->rb_left;
+        else if ( pseg->dev > vbd->vdevice )
+            rb = rb->rb_right;
+        else
+            goto found;
+    }
+
+    DPRINTK("vbd_translate; domain %u attempted to access "
+            "non-existent VBD.\n", blkif->domid);
+
+    spin_unlock(&blkif->vbd_lock);
+    return -ENODEV; 
+
+ found:
+
+    if ( (operation == WRITE) && vbd->readonly )
+    {
+        spin_unlock(&blkif->vbd_lock);
+        return -EACCES; 
+    }
+
+    /*
+     * Now iterate through the list of blkif_extents, working out which should 
+     * be used to perform the translation.
+     */
+    sec_off = pseg->sector_number; 
+    nr_secs = pseg->nr_sects;
+    for ( x = vbd->extents; x != NULL; x = x->next )
+    { 
+        if ( sec_off < x->extent.sector_length )
+        {
+            pseg->dev  = x->extent.device;
+            pseg->bdev = x->bdev;
+            pseg->sector_number = x->extent.sector_start + sec_off;
+            if ( unlikely((sec_off + nr_secs) > x->extent.sector_length) )
+                goto overrun;
+            spin_unlock(&blkif->vbd_lock);
+            return 1;
+        } 
+        sec_off -= x->extent.sector_length; 
+    }
+
+    DPRINTK("vbd_translate: end of vbd.\n");
+    spin_unlock(&blkif->vbd_lock);
+    return -EACCES; 
+
+    /*
+     * Here we deal with overrun onto the following extent. We don't deal with 
+     * overrun of more than one boundary since each request is restricted to 
+     * 2^9 512-byte sectors, so it should be trivial for control software to 
+     * ensure that extents are large enough to prevent excessive overrun.
+     */
+ overrun:
+
+    /* Adjust length of first chunk to run to end of first extent. */
+    pseg[0].nr_sects = x->extent.sector_length - sec_off;
+
+    /* Set second chunk buffer and length to start where first chunk ended. */
+    pseg[1].buffer   = pseg[0].buffer + (pseg[0].nr_sects << 9);
+    pseg[1].nr_sects = nr_secs - pseg[0].nr_sects;
+
+    /* Now move to the next extent. Check it exists and is long enough! */
+    if ( unlikely((x = x->next) == NULL) || 
+         unlikely(x->extent.sector_length < pseg[1].nr_sects) )
+    {
+        DPRINTK("vbd_translate: multiple overruns or end of vbd.\n");
+        spin_unlock(&blkif->vbd_lock);
+        return -EACCES;
+    }
+
+    /* Store the real device and start sector for the second chunk. */
+    pseg[1].dev           = x->extent.device;
+    pseg[1].bdev          = x->bdev;
+    pseg[1].sector_number = x->extent.sector_start;
+    
+    spin_unlock(&blkif->vbd_lock);
+    return 2;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+#define MAJOR_XEN(dev) ((dev)>>8)
+#define MINOR_XEN(dev) ((dev) & 0xff)
+
+#ifndef FANCY_REMAPPING
+static dev_t vbd_map_devnum(blkif_pdev_t cookie)
+{
+    int major = MAJOR_XEN(cookie);
+    int minor = MINOR_XEN(cookie);
+
+    return MKDEV(major, minor);
+}
+#else
+#define XEN_IDE0_MAJOR IDE0_MAJOR
+#define XEN_IDE1_MAJOR IDE1_MAJOR
+#define XEN_IDE2_MAJOR IDE2_MAJOR
+#define XEN_IDE3_MAJOR IDE3_MAJOR
+#define XEN_IDE4_MAJOR IDE4_MAJOR
+#define XEN_IDE5_MAJOR IDE5_MAJOR
+#define XEN_IDE6_MAJOR IDE6_MAJOR
+#define XEN_IDE7_MAJOR IDE7_MAJOR
+#define XEN_IDE8_MAJOR IDE8_MAJOR
+#define XEN_IDE9_MAJOR IDE9_MAJOR
+#define XEN_SCSI_DISK0_MAJOR SCSI_DISK0_MAJOR
+#define XEN_SCSI_DISK1_MAJOR SCSI_DISK1_MAJOR
+#define XEN_SCSI_DISK2_MAJOR SCSI_DISK2_MAJOR
+#define XEN_SCSI_DISK3_MAJOR SCSI_DISK3_MAJOR
+#define XEN_SCSI_DISK4_MAJOR SCSI_DISK4_MAJOR
+#define XEN_SCSI_DISK5_MAJOR SCSI_DISK5_MAJOR
+#define XEN_SCSI_DISK6_MAJOR SCSI_DISK6_MAJOR
+#define XEN_SCSI_DISK7_MAJOR SCSI_DISK7_MAJOR
+#define XEN_SCSI_CDROM_MAJOR SCSI_CDROM_MAJOR
+
+static dev_t vbd_map_devnum(blkif_pdev_t cookie)
+{
+    int new_major;
+    int major = MAJOR_XEN(cookie);
+    int minor = MINOR_XEN(cookie);
+
+    switch (major) {
+    case XEN_IDE0_MAJOR: new_major = IDE0_MAJOR; break;
+    case XEN_IDE1_MAJOR: new_major = IDE1_MAJOR; break;
+    case XEN_IDE2_MAJOR: new_major = IDE2_MAJOR; break;
+    case XEN_IDE3_MAJOR: new_major = IDE3_MAJOR; break;
+    case XEN_IDE4_MAJOR: new_major = IDE4_MAJOR; break;
+    case XEN_IDE5_MAJOR: new_major = IDE5_MAJOR; break;
+    case XEN_IDE6_MAJOR: new_major = IDE6_MAJOR; break;
+    case XEN_IDE7_MAJOR: new_major = IDE7_MAJOR; break;
+    case XEN_IDE8_MAJOR: new_major = IDE8_MAJOR; break;
+    case XEN_IDE9_MAJOR: new_major = IDE9_MAJOR; break;
+    case XEN_SCSI_DISK0_MAJOR: new_major = SCSI_DISK0_MAJOR; break;
+    case XEN_SCSI_DISK1_MAJOR ... XEN_SCSI_DISK7_MAJOR:
+        new_major = SCSI_DISK1_MAJOR + major - XEN_SCSI_DISK1_MAJOR;
+        break;
+    case XEN_SCSI_CDROM_MAJOR: new_major = SCSI_CDROM_MAJOR; break;
+    default: new_major = 0; break;
+    }
+
+    return MKDEV(new_major, minor);
+}
+#endif
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION_CODE(2,6,0) */
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkfront/Kconfig b/linux-2.6.9-xen-sparse/drivers/xen/blkfront/Kconfig
new file mode 100644 (file)
index 0000000..edde837
--- /dev/null
@@ -0,0 +1,6 @@
+
+config XENBLOCK
+       tristate "Block device driver"
+       depends on ARCH_XEN
+       help
+         Block device driver for Xen
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkfront/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/blkfront/Makefile
new file mode 100644 (file)
index 0000000..5d1707d
--- /dev/null
@@ -0,0 +1,3 @@
+
+obj-y  := blkfront.o vbd.o
+
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c b/linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c
new file mode 100644 (file)
index 0000000..b98e7c3
--- /dev/null
@@ -0,0 +1,1424 @@
+/******************************************************************************
+ * blkfront.c
+ * 
+ * XenLinux virtual block-device driver.
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include "block.h"
+#else
+#include "common.h"
+#include <linux/blk.h>
+#include <linux/tqueue.h>
+#endif
+
+#include <linux/cdrom.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <asm-xen/ctrl_if.h>
+
+typedef unsigned char byte; /* from linux/ide.h */
+
+/* Control whether runtime update of vbds is enabled. */
+#define ENABLE_VBD_UPDATE 1
+
+#if ENABLE_VBD_UPDATE
+static void vbd_update(void);
+#else
+static void vbd_update(void){};
+#endif
+
+#define BLKIF_STATE_CLOSED       0
+#define BLKIF_STATE_DISCONNECTED 1
+#define BLKIF_STATE_CONNECTED    2
+
+static char *blkif_state_name[] = {
+    [BLKIF_STATE_CLOSED]       = "closed",
+    [BLKIF_STATE_DISCONNECTED] = "disconnected",
+    [BLKIF_STATE_CONNECTED]    = "connected",
+};
+
+static char * blkif_status_name[] = {
+    [BLKIF_INTERFACE_STATUS_CLOSED]       = "closed",
+    [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
+    [BLKIF_INTERFACE_STATUS_CONNECTED]    = "connected",
+    [BLKIF_INTERFACE_STATUS_CHANGED]      = "changed",
+};
+
+#if 1
+#define dprintf(fmt, args...) \
+printk(KERN_ALERT "[XEN:%s:%s:%d] " fmt, __FUNCTION__, __FILE__, __LINE__, ##args)
+#endif
+
+#define WPRINTK(fmt, args...) printk(KERN_WARNING "[XEN] " fmt, ##args)
+
+static int blkif_handle = 0;
+static unsigned int blkif_state = BLKIF_STATE_CLOSED;
+static unsigned int blkif_evtchn = 0;
+static unsigned int blkif_irq = 0;
+
+static int blkif_control_rsp_valid;
+static blkif_response_t blkif_control_rsp;
+
+static blkif_ring_t *blk_ring = NULL;
+static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
+static BLKIF_RING_IDX req_prod;  /* Private request producer.         */
+
+unsigned long rec_ring_free;
+blkif_request_t rec_ring[BLKIF_RING_SIZE];
+
+static int recovery = 0;           /* "Recovery in progress" flag.  Protected
+                                    * by the blkif_io_lock */
+
+/* We plug the I/O ring if the driver is suspended or if the ring is full. */
+#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
+                         (blkif_state != BLKIF_STATE_CONNECTED))
+
+static inline void translate_req_to_mfn(blkif_request_t *xreq,
+                                        blkif_request_t *req);
+
+static inline void translate_req_to_pfn(blkif_request_t *xreq,
+                                        blkif_request_t *req);
+
+static inline void flush_requests(void);
+
+static void kick_pending_request_queues(void);
+
+int __init xlblk_init(void);
+
+void blkif_completion( blkif_request_t *req );
+
+static inline int GET_ID_FROM_FREELIST( void )
+{
+    unsigned long free = rec_ring_free;
+
+    if ( free > BLKIF_RING_SIZE )
+        BUG();
+
+    rec_ring_free = rec_ring[free].id;
+
+    rec_ring[free].id = 0x0fffffee; /* debug */
+
+    return free;
+}
+
+static inline void ADD_ID_TO_FREELIST( unsigned long id )
+{
+    rec_ring[id].id = rec_ring_free;
+    rec_ring_free = id;
+}
+
+
+/**************************  KERNEL VERSION 2.6  **************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+#define DISABLE_SCATTERGATHER() 
+
+__initcall(xlblk_init);
+
+#if ENABLE_VBD_UPDATE
+static void vbd_update()
+{
+    dprintf(">\n");
+    dprintf("<\n");
+}
+#endif /* ENABLE_VBD_UPDATE */
+
+static void kick_pending_request_queues(void)
+{
+
+    if ( (xlbd_blk_queue != NULL) &&
+         test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
+    {
+        blk_start_queue(xlbd_blk_queue);
+        /* XXXcl call to request_fn should not be needed but
+         * we get stuck without...  needs investigating
+         */
+        xlbd_blk_queue->request_fn(xlbd_blk_queue);
+    }
+
+}
+
+
+int blkif_open(struct inode *inode, struct file *filep)
+{
+    struct gendisk *gd = inode->i_bdev->bd_disk;
+    struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+    /* Update of usage count is protected by per-device semaphore. */
+    di->mi->usage++;
+    
+    return 0;
+}
+
+
+int blkif_release(struct inode *inode, struct file *filep)
+{
+    struct gendisk *gd = inode->i_bdev->bd_disk;
+    struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+    /*
+     * When usage drops to zero it may allow more VBD updates to occur.
+     * Update of usage count is protected by a per-device semaphore.
+     */
+    if (--di->mi->usage == 0) {
+        vbd_update();
+    }
+
+    return 0;
+}
+
+
+int blkif_ioctl(struct inode *inode, struct file *filep,
+                unsigned command, unsigned long argument)
+{
+    /*  struct gendisk *gd = inode->i_bdev->bd_disk; */
+
+    DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+                  command, (long)argument, inode->i_rdev); 
+  
+    switch (command) {
+
+    case HDIO_GETGEO:
+        /* return ENOSYS to use defaults */
+        return -ENOSYS;
+
+    default:
+        printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
+               command);
+        return -ENOSYS;
+    }
+
+    return 0;
+}
+
+#if 0
+/* check media change: should probably do something here in some cases :-) */
+int blkif_check(kdev_t dev)
+{
+    DPRINTK("blkif_check\n");
+    return 0;
+}
+
+int blkif_revalidate(kdev_t dev)
+{
+    struct block_device *bd;
+    struct gendisk *gd;
+    xen_block_t *disk;
+    unsigned long capacity;
+    int i, rc = 0;
+    
+    if ( (bd = bdget(dev)) == NULL )
+        return -EINVAL;
+
+    /*
+     * Update of partition info, and check of usage count, is protected
+     * by the per-block-device semaphore.
+     */
+    down(&bd->bd_sem);
+
+    if ( ((gd = get_gendisk(dev)) == NULL) ||
+         ((disk = xldev_to_xldisk(dev)) == NULL) ||
+         ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    if ( disk->usage > 1 )
+    {
+        rc = -EBUSY;
+        goto out;
+    }
+
+    /* Only reread partition table if VBDs aren't mapped to partitions. */
+    if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+    {
+        for ( i = gd->max_p - 1; i >= 0; i-- )
+        {
+            invalidate_device(dev+i, 1);
+            gd->part[MINOR(dev+i)].start_sect = 0;
+            gd->part[MINOR(dev+i)].nr_sects   = 0;
+            gd->sizes[MINOR(dev+i)]           = 0;
+        }
+
+        grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+    }
+
+ out:
+    up(&bd->bd_sem);
+    bdput(bd);
+    return rc;
+}
+#endif
+
+/*
+ * blkif_queue_request
+ *
+ * request block io 
+ * 
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ *   virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+    struct xlbd_disk_info *di =
+        (struct xlbd_disk_info *)req->rq_disk->private_data;
+    unsigned long buffer_ma;
+    blkif_request_t *ring_req;
+    struct bio *bio;
+    struct bio_vec *bvec;
+    int idx, s;
+    unsigned long id;
+    unsigned int fsect, lsect;
+
+    if (unlikely(blkif_state != BLKIF_STATE_CONNECTED))
+        return 1;
+
+    /* Fill out a communications ring structure. */
+    ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+    id = GET_ID_FROM_FREELIST();
+    rec_ring[id].id = (unsigned long) req;
+
+    ring_req->id = id;
+    ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
+        BLKIF_OP_READ;
+    ring_req->sector_number = (blkif_sector_t)req->sector;
+    ring_req->device = di->xd_device;
+
+    s = 0;
+    ring_req->nr_segments = 0;
+    rq_for_each_bio(bio, req) {
+        bio_for_each_segment(bvec, bio, idx) {
+            buffer_ma = page_to_phys(bvec->bv_page);
+            if (unlikely((buffer_ma & ((1<<9)-1)) != 0))
+                BUG();
+
+            fsect = bvec->bv_offset >> 9;
+            lsect = fsect + (bvec->bv_len >> 9) - 1;
+            if (unlikely(lsect > 7))
+                BUG();
+
+            ring_req->frame_and_sects[ring_req->nr_segments++] =
+                buffer_ma | (fsect << 3) | lsect;
+            s += bvec->bv_len >> 9;
+        }
+    }
+
+    req_prod++;
+
+    /* Keep a private copy so we can reissue requests when recovering. */
+    translate_req_to_pfn( &rec_ring[id], ring_req);
+
+    return 0;
+}
+
+
+/*
+ * do_blkif_request
+ *  read a block; request is in a request queue
+ */
+void do_blkif_request(request_queue_t *rq)
+{
+    struct request *req;
+    int queued;
+
+    DPRINTK("Entered do_blkif_request\n"); 
+
+    queued = 0;
+
+    while ((req = elv_next_request(rq)) != NULL) {
+        if (!blk_fs_request(req)) {
+            end_request(req, 0);
+            continue;
+        }
+
+        if ( BLKIF_RING_FULL )
+        {
+            blk_stop_queue(rq);
+            break;
+        }
+        DPRINTK("do_blkif_request %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
+                req, req->cmd, req->sector, req->current_nr_sectors,
+                req->nr_sectors, req->buffer,
+                rq_data_dir(req) ? "write" : "read");
+        blkdev_dequeue_request(req);
+        if (blkif_queue_request(req)) {
+            blk_stop_queue(rq);
+            break;
+        }
+        queued++;
+    }
+
+    if (queued != 0)
+        flush_requests();
+}
+
+
+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+    struct request *req;
+    blkif_response_t *bret;
+    BLKIF_RING_IDX i, rp;
+    unsigned long flags; 
+
+    spin_lock_irqsave(&blkif_io_lock, flags);     
+
+    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 
+         unlikely(recovery) )
+    {
+        spin_unlock_irqrestore(&blkif_io_lock, flags);
+        return IRQ_HANDLED;
+    }
+
+    rp = blk_ring->resp_prod;
+    rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+    for ( i = resp_cons; i != rp; i++ )
+    {
+       unsigned long id;
+        bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
+
+       id = bret->id;
+       req = (struct request *)rec_ring[id].id;
+
+       blkif_completion( &rec_ring[id] );
+
+       ADD_ID_TO_FREELIST(id); /* overwrites req */
+
+        switch ( bret->operation )
+        {
+        case BLKIF_OP_READ:
+        case BLKIF_OP_WRITE:
+            if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
+                DPRINTK("Bad return from blkdev data request: %x\n",
+                        bret->status);
+           
+            if ( unlikely(end_that_request_first
+                          (req, 
+                           (bret->status == BLKIF_RSP_OKAY),
+                           req->hard_nr_sectors)) )
+                BUG();
+            end_that_request_last(req);
+
+            break;
+        case BLKIF_OP_PROBE:
+            memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+            blkif_control_rsp_valid = 1;
+            break;
+        default:
+            BUG();
+        }
+    }
+    
+    resp_cons = i;
+
+    kick_pending_request_queues();
+
+    spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+    return IRQ_HANDLED;
+}
+
+#else
+/**************************  KERNEL VERSION 2.4  **************************/
+
+static kdev_t        sg_dev;
+static int           sg_operation = -1;
+static unsigned long sg_next_sect;
+
+/*
+ * Request queues with outstanding work, but ring is currently full.
+ * We need no special lock here, as we always access this with the
+ * blkif_io_lock held. We only need a small maximum list.
+ */
+#define MAX_PENDING 8
+static request_queue_t *pending_queues[MAX_PENDING];
+static int nr_pending;
+
+
+#define DISABLE_SCATTERGATHER() (sg_operation = -1)
+
+#define blkif_io_lock io_request_lock
+
+/*============================================================================*/
+#if ENABLE_VBD_UPDATE
+
+/*
+ * blkif_update_int/update-vbds_task - handle VBD update events.
+ *  Schedule a task for keventd to run, which will update the VBDs and perform 
+ *  the corresponding updates to our view of VBD state.
+ */
+static void update_vbds_task(void *unused)
+{ 
+    xlvbd_update_vbds();
+}
+
+static void vbd_update(void)
+{
+    static struct tq_struct update_tq;
+    dprintf(">\n");
+    update_tq.routine = update_vbds_task;
+    schedule_task(&update_tq);
+    dprintf("<\n");
+}
+
+#endif /* ENABLE_VBD_UPDATE */
+/*============================================================================*/
+
+
+static void kick_pending_request_queues(void)
+{
+    /* We kick pending request queues if the ring is reasonably empty. */
+    if ( (nr_pending != 0) && 
+         ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
+    {
+        /* Attempt to drain the queue, but bail if the ring becomes full. */
+        while ( (nr_pending != 0) && !BLKIF_RING_FULL )
+            do_blkif_request(pending_queues[--nr_pending]);
+    }
+}
+
+int blkif_open(struct inode *inode, struct file *filep)
+{
+    short xldev = inode->i_rdev; 
+    struct gendisk *gd = get_gendisk(xldev);
+    xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+    short minor = MINOR(xldev); 
+
+    if ( gd->part[minor].nr_sects == 0 )
+    { 
+        /*
+         * Device either doesn't exist, or has zero capacity; we use a few
+         * cheesy heuristics to return the relevant error code
+         */
+        if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
+             ((minor & (gd->max_p - 1)) != 0) )
+        { 
+            /*
+             * We have a real device, but no such partition, or we just have a
+             * partition number so guess this is the problem.
+             */
+            return -ENXIO;     /* no such device or address */
+        }
+        else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
+        {
+            /* This is a removable device => assume that media is missing. */ 
+            return -ENOMEDIUM; /* media not present (this is a guess) */
+        } 
+        else
+        { 
+            /* Just go for the general 'no such device' error. */
+            return -ENODEV;    /* no such device */
+        }
+    }
+    
+    /* Update of usage count is protected by per-device semaphore. */
+    disk->usage++;
+
+    return 0;
+}
+
+
+int blkif_release(struct inode *inode, struct file *filep)
+{
+    xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+
+    /*
+     * When usage drops to zero it may allow more VBD updates to occur.
+     * Update of usage count is protected by a per-device semaphore.
+     */
+    if ( --disk->usage == 0 ) {
+        vbd_update();
+    }
+
+    return 0;
+}
+
+
+int blkif_ioctl(struct inode *inode, struct file *filep,
+                unsigned command, unsigned long argument)
+{
+    kdev_t dev = inode->i_rdev;
+    struct hd_geometry *geo = (struct hd_geometry *)argument;
+    struct gendisk *gd;     
+    struct hd_struct *part; 
+    int i;
+    unsigned short cylinders;
+    byte heads, sectors;
+
+    /* NB. No need to check permissions. That is done for us. */
+    
+    DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+                  command, (long) argument, dev); 
+  
+    gd = get_gendisk(dev);
+    part = &gd->part[MINOR(dev)]; 
+
+    switch ( command )
+    {
+    case BLKGETSIZE:
+        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 
+        return put_user(part->nr_sects, (unsigned long *) argument);
+
+    case BLKGETSIZE64:
+        DPRINTK_IOCTL("   BLKGETSIZE64: %x %llx\n", BLKGETSIZE64,
+                      (u64)part->nr_sects * 512);
+        return put_user((u64)part->nr_sects * 512, (u64 *) argument);
+
+    case BLKRRPART:                               /* re-read partition table */
+        DPRINTK_IOCTL("   BLKRRPART: %x\n", BLKRRPART);
+        return blkif_revalidate(dev);
+
+    case BLKSSZGET:
+        return hardsect_size[MAJOR(dev)][MINOR(dev)]; 
+
+    case BLKBSZGET:                                        /* get block size */
+        DPRINTK_IOCTL("   BLKBSZGET: %x\n", BLKBSZGET);
+        break;
+
+    case BLKBSZSET:                                        /* set block size */
+        DPRINTK_IOCTL("   BLKBSZSET: %x\n", BLKBSZSET);
+        break;
+
+    case BLKRASET:                                         /* set read-ahead */
+        DPRINTK_IOCTL("   BLKRASET: %x\n", BLKRASET);
+        break;
+
+    case BLKRAGET:                                         /* get read-ahead */
+        DPRINTK_IOCTL("   BLKRAFET: %x\n", BLKRAGET);
+        break;
+
+    case HDIO_GETGEO:
+        DPRINTK_IOCTL("   HDIO_GETGEO: %x\n", HDIO_GETGEO);
+        if (!argument) return -EINVAL;
+
+        /* We don't have real geometry info, but let's at least return
+          values consistent with the size of the device */
+
+        heads = 0xff;
+        sectors = 0x3f; 
+        cylinders = part->nr_sects / (heads * sectors);
+
+        if (put_user(0x00,  (unsigned long *) &geo->start)) return -EFAULT;
+        if (put_user(heads,  (byte *)&geo->heads)) return -EFAULT;
+        if (put_user(sectors,  (byte *)&geo->sectors)) return -EFAULT;
+        if (put_user(cylinders, (unsigned short *)&geo->cylinders)) return -EFAULT;
+
+        return 0;
+
+    case HDIO_GETGEO_BIG: 
+        DPRINTK_IOCTL("   HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
+        if (!argument) return -EINVAL;
+
+        /* We don't have real geometry info, but let's at least return
+          values consistent with the size of the device */
+
+        heads = 0xff;
+        sectors = 0x3f; 
+        cylinders = part->nr_sects / (heads * sectors);
+
+        if (put_user(0x00,  (unsigned long *) &geo->start))  return -EFAULT;
+        if (put_user(heads,  (byte *)&geo->heads))   return -EFAULT;
+        if (put_user(sectors,  (byte *)&geo->sectors)) return -EFAULT;
+        if (put_user(cylinders, (unsigned int *) &geo->cylinders)) return -EFAULT;
+
+        return 0;
+
+    case CDROMMULTISESSION:
+        DPRINTK("FIXME: support multisession CDs later\n");
+        for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
+            if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
+        return 0;
+
+    case SCSI_IOCTL_GET_BUS_NUMBER:
+        DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in XL blkif");
+        return -ENOSYS;
+
+    default:
+        printk(KERN_ALERT "ioctl %08x not supported by XL blkif\n", command);
+        return -ENOSYS;
+    }
+    
+    return 0;
+}
+
+
+
+/* check media change: should probably do something here in some cases :-) */
+int blkif_check(kdev_t dev)
+{
+    DPRINTK("blkif_check\n");
+    return 0;
+}
+
+int blkif_revalidate(kdev_t dev)
+{
+    struct block_device *bd;
+    struct gendisk *gd;
+    xl_disk_t *disk;
+    unsigned long capacity;
+    int i, rc = 0;
+    
+    if ( (bd = bdget(dev)) == NULL )
+        return -EINVAL;
+
+    /*
+     * Update of partition info, and check of usage count, is protected
+     * by the per-block-device semaphore.
+     */
+    down(&bd->bd_sem);
+
+    if ( ((gd = get_gendisk(dev)) == NULL) ||
+         ((disk = xldev_to_xldisk(dev)) == NULL) ||
+         ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    if ( disk->usage > 1 )
+    {
+        rc = -EBUSY;
+        goto out;
+    }
+
+    /* Only reread partition table if VBDs aren't mapped to partitions. */
+    if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+    {
+        for ( i = gd->max_p - 1; i >= 0; i-- )
+        {
+            invalidate_device(dev+i, 1);
+            gd->part[MINOR(dev+i)].start_sect = 0;
+            gd->part[MINOR(dev+i)].nr_sects   = 0;
+            gd->sizes[MINOR(dev+i)]           = 0;
+        }
+
+        grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+    }
+
+ out:
+    up(&bd->bd_sem);
+    bdput(bd);
+    return rc;
+}
+
+
+
+
+/*
+ * blkif_queue_request
+ *
+ * request block io 
+ * 
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ *   virtual address in the guest os.
+ */
+static int blkif_queue_request(unsigned long   id,
+                               int             operation,
+                               char *          buffer,
+                               unsigned long   sector_number,
+                               unsigned short  nr_sectors,
+                               kdev_t          device)
+{
+    unsigned long       buffer_ma = phys_to_machine(virt_to_phys(buffer)); 
+    unsigned long       xid;
+    struct gendisk     *gd;
+    blkif_request_t    *req;
+    struct buffer_head *bh;
+    unsigned int        fsect, lsect;
+
+    fsect = (buffer_ma & ~PAGE_MASK) >> 9;
+    lsect = fsect + nr_sectors - 1;
+
+    /* Buffer must be sector-aligned. Extent mustn't cross a page boundary. */
+    if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) )
+        BUG();
+    if ( lsect > 7 )
+        BUG();
+
+    buffer_ma &= PAGE_MASK;
+
+    if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+        return 1;
+
+    switch ( operation )
+    {
+
+    case BLKIF_OP_READ:
+    case BLKIF_OP_WRITE:
+        gd = get_gendisk(device); 
+
+        /*
+         * Update the sector_number we'll pass down as appropriate; note that
+         * we could sanity check that resulting sector will be in this
+         * partition, but this will happen in driver backend anyhow.
+         */
+        sector_number += gd->part[MINOR(device)].start_sect;
+
+        /*
+         * If this unit doesn't consist of virtual partitions then we clear 
+         * the partn bits from the device number.
+         */
+        if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & 
+               GENHD_FL_VIRT_PARTNS) )
+            device &= ~(gd->max_p - 1);
+
+        if ( (sg_operation == operation) &&
+             (sg_dev == device) &&
+             (sg_next_sect == sector_number) )
+        {
+
+            req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
+            bh = (struct buffer_head *)id;
+           
+            bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
+           
+
+           rec_ring[req->id].id = id;
+
+            req->frame_and_sects[req->nr_segments] = 
+                buffer_ma | (fsect<<3) | lsect;
+            if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST )
+                sg_next_sect += nr_sectors;
+            else
+                DISABLE_SCATTERGATHER();
+
+            /* Update the copy of the request in the recovery ring. */
+            translate_req_to_pfn(&rec_ring[req->id], req );
+
+            return 0;
+        }
+        else if ( BLKIF_RING_FULL )
+        {
+            return 1;
+        }
+        else
+        {
+            sg_operation = operation;
+            sg_dev       = device;
+            sg_next_sect = sector_number + nr_sectors;
+        }
+        break;
+
+    default:
+        panic("unknown op %d\n", operation);
+    }
+
+    /* Fill out a communications ring structure. */
+    req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+
+    xid = GET_ID_FROM_FREELIST();
+    rec_ring[xid].id = id;
+
+    req->id            = xid;
+    req->operation     = operation;
+    req->sector_number = (blkif_sector_t)sector_number;
+    req->device        = device; 
+    req->nr_segments   = 1;
+    req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
+
+    req_prod++;
+
+    /* Keep a private copy so we can reissue requests when recovering. */    
+    translate_req_to_pfn(&rec_ring[xid], req );
+
+
+
+    return 0;
+}
+
+
+/*
+ * do_blkif_request
+ *  read a block; request is in a request queue
+ */
+void do_blkif_request(request_queue_t *rq)
+{
+    struct request *req;
+    struct buffer_head *bh, *next_bh;
+    int rw, nsect, full, queued = 0;
+
+    DPRINTK("Entered do_blkif_request\n"); 
+
+    while ( !rq->plugged && !list_empty(&rq->queue_head))
+    {
+        if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 
+            goto out;
+  
+        DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
+                req, req->cmd, req->sector,
+                req->current_nr_sectors, req->nr_sectors, req->bh);
+
+        rw = req->cmd;
+        if ( rw == READA )
+            rw = READ;
+        if ( unlikely((rw != READ) && (rw != WRITE)) )
+            panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw);
+
+        req->errors = 0;
+
+        bh = req->bh;
+        while ( bh != NULL )
+        {
+            next_bh = bh->b_reqnext;
+            bh->b_reqnext = NULL;
+
+            full = blkif_queue_request(
+                (unsigned long)bh,
+                (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE, 
+                bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
+
+            if ( full )
+            { 
+                bh->b_reqnext = next_bh;
+                pending_queues[nr_pending++] = rq;
+                if ( unlikely(nr_pending >= MAX_PENDING) )
+                    BUG();
+                goto out; 
+            }
+
+            queued++;
+
+            /* Dequeue the buffer head from the request. */
+            nsect = bh->b_size >> 9;
+            bh = req->bh = next_bh;
+            
+            if ( bh != NULL )
+            {
+                /* There's another buffer head to do. Update the request. */
+                req->hard_sector += nsect;
+                req->hard_nr_sectors -= nsect;
+                req->sector = req->hard_sector;
+                req->nr_sectors = req->hard_nr_sectors;
+                req->current_nr_sectors = bh->b_size >> 9;
+                req->buffer = bh->b_data;
+            }
+            else
+            {
+                /* That was the last buffer head. Finalise the request. */
+                if ( unlikely(end_that_request_first(req, 1, "XenBlk")) )
+                    BUG();
+                blkdev_dequeue_request(req);
+                end_that_request_last(req);
+            }
+        }
+    }
+
+ out:
+    if ( queued != 0 )
+        flush_requests();
+}
+
+
+static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+    BLKIF_RING_IDX i, rp; 
+    unsigned long flags; 
+    struct buffer_head *bh, *next_bh;
+    
+    spin_lock_irqsave(&io_request_lock, flags);     
+
+    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
+    {
+        spin_unlock_irqrestore(&io_request_lock, flags);
+        return;
+    }
+
+    rp = blk_ring->resp_prod;
+    rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+    for ( i = resp_cons; i != rp; i++ )
+    {
+       unsigned long id;
+        blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
+
+       id = bret->id;
+       bh = (struct buffer_head *)rec_ring[id].id; 
+
+       blkif_completion( &rec_ring[id] );
+
+       ADD_ID_TO_FREELIST(id);
+
+        switch ( bret->operation )
+        {
+        case BLKIF_OP_READ:
+        case BLKIF_OP_WRITE:
+            if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
+                DPRINTK("Bad return from blkdev data request: %lx\n",
+                        bret->status);
+            for ( ; bh != NULL; bh = next_bh )
+            {
+                next_bh = bh->b_reqnext;
+                bh->b_reqnext = NULL;
+                bh->b_end_io(bh, bret->status == BLKIF_RSP_OKAY);
+            }
+
+            break;
+        case BLKIF_OP_PROBE:
+            memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+            blkif_control_rsp_valid = 1;
+            break;
+        default:
+            BUG();
+        }
+    }
+    
+    resp_cons = i;
+
+    kick_pending_request_queues();
+
+    spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+#endif
+
+/*****************************  COMMON CODE  *******************************/
+
+
+static inline void translate_req_to_pfn(blkif_request_t *xreq,
+                                        blkif_request_t *req)
+{
+    int i;
+
+    xreq->operation     = req->operation;
+    xreq->nr_segments   = req->nr_segments;
+    xreq->device        = req->device;
+    /* preserve id */
+    xreq->sector_number = req->sector_number;
+
+    for ( i = 0; i < req->nr_segments; i++ ){
+        xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+    }
+}
+
+static inline void translate_req_to_mfn(blkif_request_t *xreq,
+                                        blkif_request_t *req)
+{
+    int i;
+
+    xreq->operation     = req->operation;
+    xreq->nr_segments   = req->nr_segments;
+    xreq->device        = req->device;
+    xreq->id            = req->id;   /* copy id (unlike above) */
+    xreq->sector_number = req->sector_number;
+
+    for ( i = 0; i < req->nr_segments; i++ ){
+        xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+    }
+}
+
+
+
+static inline void flush_requests(void)
+{
+    DISABLE_SCATTERGATHER();
+    wmb(); /* Ensure that the frontend can see the requests. */
+    blk_ring->req_prod = req_prod;
+    notify_via_evtchn(blkif_evtchn);
+}
+
+
+void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
+{
+    unsigned long flags, id;
+
+ retry:
+    while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+    }
+
+    spin_lock_irqsave(&blkif_io_lock, flags);
+    if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+    {
+        spin_unlock_irqrestore(&blkif_io_lock, flags);
+        goto retry;
+    }
+
+    DISABLE_SCATTERGATHER();
+    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;    
+
+    id = GET_ID_FROM_FREELIST();
+    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
+    rec_ring[id].id = (unsigned long) req;
+
+    translate_req_to_pfn( &rec_ring[id], req );
+
+    req_prod++;
+    flush_requests();
+
+    spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+    while ( !blkif_control_rsp_valid )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+    }
+
+    memcpy(rsp, &blkif_control_rsp, sizeof(*rsp));
+    blkif_control_rsp_valid = 0;
+}
+
+
+/* Send a driver status notification to the domain controller. */
+static void send_driver_status(int ok){
+    ctrl_msg_t cmsg = {
+        .type    = CMSG_BLKIF_FE,
+        .subtype = CMSG_BLKIF_FE_DRIVER_STATUS,
+        .length  = sizeof(blkif_fe_driver_status_t),
+    };
+    blkif_fe_driver_status_t *msg = (void*)cmsg.msg;
+    
+    msg->status = (ok ? BLKIF_DRIVER_STATUS_UP : BLKIF_DRIVER_STATUS_DOWN);
+
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+/* Tell the controller to bring up the interface. */
+static void blkif_send_interface_connect(void){
+    ctrl_msg_t cmsg = {
+        .type    = CMSG_BLKIF_FE,
+        .subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
+        .length  = sizeof(blkif_fe_interface_connect_t),
+    };
+    blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+    
+    msg->handle      = 0;
+    msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
+    
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+static void blkif_free(void)
+{
+
+    printk(KERN_INFO "[XEN] Recovering virtual block device driver\n");
+            
+    /* Prevent new requests being issued until we fix things up. */
+    spin_lock_irq(&blkif_io_lock);
+    recovery = 1;
+    blkif_state = BLKIF_STATE_DISCONNECTED;
+    spin_unlock_irq(&blkif_io_lock);
+
+    /* Free resources associated with old device channel. */
+    if(blk_ring){
+        free_page((unsigned long)blk_ring);
+        blk_ring = 0;
+    }
+    free_irq(blkif_irq, NULL);
+    blkif_irq = 0;
+    
+    unbind_evtchn_from_irq(blkif_evtchn);
+    blkif_evtchn = 0;
+}
+
+static void blkif_close(void){
+}
+
+/* Move from CLOSED to DISCONNECTED state. */
+static void blkif_disconnect(void)
+{
+    if(blk_ring) free_page((unsigned long)blk_ring);
+    blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
+    blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
+    blkif_state  = BLKIF_STATE_DISCONNECTED;
+    blkif_send_interface_connect();
+}
+
+static void blkif_reset(void)
+{
+    printk(KERN_INFO "[XEN] Recovering virtual block device driver\n");
+    blkif_free();
+    blkif_disconnect();
+}
+
+static void blkif_recover(void)
+{
+
+    int i;
+
+    /* Hmm, requests might be re-ordered when we re-issue them.
+     * This will need to be fixed once we have barriers */
+
+    /* Stage 1 : Find active and move to safety. */
+    for ( i = 0; i < BLKIF_RING_SIZE; i++ ) {
+        if ( rec_ring[i].id >= PAGE_OFFSET ) {
+            translate_req_to_mfn(
+                &blk_ring->ring[req_prod].req, &rec_ring[i]);
+            req_prod++;
+        }
+    }
+
+    printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod);
+           
+    /* Stage 2 : Set up shadow list. */
+    for ( i = 0; i < req_prod; i++ ) {
+        rec_ring[i].id = blk_ring->ring[i].req.id;             
+        blk_ring->ring[i].req.id = i;
+        translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
+    }
+
+    /* Stage 3 : Set up free list. */
+    for ( ; i < BLKIF_RING_SIZE; i++ ){
+        rec_ring[i].id = i+1;
+    }
+    rec_ring_free = req_prod;
+    rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
+
+    /* blk_ring->req_prod will be set when we flush_requests().*/
+    wmb();
+
+    /* Switch off recovery mode, using a memory barrier to ensure that
+     * it's seen before we flush requests - we don't want to miss any
+     * interrupts. */
+    recovery = 0;
+    wmb();
+
+    /* Kicks things back into life. */
+    flush_requests();
+
+    /* Now safe to left other peope use interface. */
+    blkif_state = BLKIF_STATE_CONNECTED;
+}
+
+static void blkif_connect(blkif_fe_interface_status_t *status)
+{
+    int err = 0;
+
+    blkif_evtchn = status->evtchn;
+    blkif_irq    = bind_evtchn_to_irq(blkif_evtchn);
+
+    err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
+    if(err){
+        printk(KERN_ALERT "[XEN] blkfront request_irq failed (err=%d)\n", err);
+        return;
+    }
+
+    if ( recovery ) {
+        blkif_recover();
+    } else {
+        /* Transition to connected in case we need to do 
+         *  a partition probe on a whole disk. */
+        blkif_state = BLKIF_STATE_CONNECTED;
+        
+        /* Probe for discs attached to the interface. */
+        xlvbd_init();
+    }
+    
+    /* Kick pending requests. */
+    spin_lock_irq(&blkif_io_lock);
+    kick_pending_request_queues();
+    spin_unlock_irq(&blkif_io_lock);
+}
+
+static void unexpected(blkif_fe_interface_status_t *status)
+{
+    WPRINTK(" Unexpected blkif status %s in state %s\n", 
+           blkif_status_name[status->status],
+           blkif_state_name[blkif_state]);
+}
+
+static void blkif_status(blkif_fe_interface_status_t *status)
+{
+    if (status->handle != blkif_handle) {
+        WPRINTK(" Invalid blkif: handle=%u", status->handle);
+        return;
+    }
+
+    switch (status->status) {
+
+    case BLKIF_INTERFACE_STATUS_CLOSED:
+        switch(blkif_state){
+        case BLKIF_STATE_CLOSED:
+            unexpected(status);
+            break;
+        case BLKIF_STATE_DISCONNECTED:
+        case BLKIF_STATE_CONNECTED:
+            unexpected(status);
+            blkif_close();
+            break;
+        }
+        break;
+
+    case BLKIF_INTERFACE_STATUS_DISCONNECTED:
+        switch(blkif_state){
+        case BLKIF_STATE_CLOSED:
+            blkif_disconnect();
+            break;
+        case BLKIF_STATE_DISCONNECTED:
+        case BLKIF_STATE_CONNECTED:
+            unexpected(status);
+            blkif_reset();
+            break;
+        }
+        break;
+
+    case BLKIF_INTERFACE_STATUS_CONNECTED:
+        switch(blkif_state){
+        case BLKIF_STATE_CLOSED:
+            unexpected(status);
+            blkif_disconnect();
+            blkif_connect(status);
+            break;
+        case BLKIF_STATE_DISCONNECTED:
+            blkif_connect(status);
+            break;
+        case BLKIF_STATE_CONNECTED:
+            unexpected(status);
+            blkif_connect(status);
+            break;
+        }
+        break;
+
+   case BLKIF_INTERFACE_STATUS_CHANGED:
+        switch(blkif_state){
+        case BLKIF_STATE_CLOSED:
+        case BLKIF_STATE_DISCONNECTED:
+            unexpected(status);
+            break;
+        case BLKIF_STATE_CONNECTED:
+            vbd_update();
+            break;
+        }
+       break;
+
+    default:
+        WPRINTK(" Invalid blkif status: %d\n", status->status);
+        break;
+    }
+}
+
+
+static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    switch ( msg->subtype )
+    {
+    case CMSG_BLKIF_FE_INTERFACE_STATUS:
+        if ( msg->length != sizeof(blkif_fe_interface_status_t) )
+            goto parse_error;
+        blkif_status((blkif_fe_interface_status_t *)
+                     &msg->msg[0]);
+        break;        
+    default:
+        goto parse_error;
+    }
+
+    ctrl_if_send_response(msg);
+    return;
+
+ parse_error:
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+int wait_for_blkif(void){
+    int err = 0;
+    int i;
+    send_driver_status(1);
+
+    /*
+     * We should read 'nr_interfaces' from response message and wait
+     * for notifications before proceeding. For now we assume that we
+     * will be notified of exactly one interface.
+     */
+    for ( i=0; (blkif_state != BLKIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+    }
+
+    if (blkif_state != BLKIF_STATE_CONNECTED){
+        printk(KERN_INFO "[XEN] Timeout connecting block device driver!\n");
+        err = -ENOSYS;
+    }
+    return err;
+}
+
+int __init xlblk_init(void)
+{
+    int i;
+    
+    if ( (xen_start_info.flags & SIF_INITDOMAIN) 
+         || (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+        return 0;
+
+    printk(KERN_INFO "[XEN] Initialising virtual block device driver\n");
+
+    rec_ring_free = 0;
+    for (i=0; i<BLKIF_RING_SIZE; i++)
+    {
+       rec_ring[i].id = i+1;
+    }
+    rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
+
+    (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+
+    wait_for_blkif();
+
+    return 0;
+}
+
+void blkdev_suspend(void)
+{
+}
+
+void blkdev_resume(void)
+{
+    send_driver_status(1);
+}
+
+/* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
+
+void blkif_completion(blkif_request_t *req)
+{
+    int i;
+
+    switch ( req->operation )
+    {
+    case BLKIF_OP_READ:
+       for ( i = 0; i < req->nr_segments; i++ )
+       {
+           unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
+           unsigned long mfn = phys_to_machine_mapping[pfn];
+           xen_machphys_update(mfn, pfn);
+       }
+       break;
+    }
+    
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkfront/block.h b/linux-2.6.9-xen-sparse/drivers/xen/blkfront/block.h
new file mode 100644 (file)
index 0000000..76255e8
--- /dev/null
@@ -0,0 +1,112 @@
+/******************************************************************************
+ * block.h
+ * 
+ * Shared definitions between all levels of XenLinux Virtual block devices.
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_DRIVERS_BLOCK_H__
+#define __XEN_DRIVERS_BLOCK_H__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/major.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm-xen/hypervisor-ifs/io/blkif.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#if 0
+#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if 0
+#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
+#endif
+
+struct xlbd_type_info {
+       int partn_shift;
+       int devs_per_major;
+       int hardsect_size;
+       int max_sectors;
+       char *name;
+};
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'.  They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct xlbd_major_info {
+       int major;
+       int usage;
+       int xd_device;
+       struct xlbd_type_info *type;
+};
+
+struct xlbd_disk_info {
+       int xd_device;
+       struct xlbd_major_info *mi;
+};
+
+typedef struct xen_block {
+       int usage;
+} xen_block_t;
+
+extern struct request_queue *xlbd_blk_queue;
+extern spinlock_t blkif_io_lock;
+
+extern int blkif_open(struct inode *inode, struct file *filep);
+extern int blkif_release(struct inode *inode, struct file *filep);
+extern int blkif_ioctl(struct inode *inode, struct file *filep,
+                           unsigned command, unsigned long argument);
+extern int blkif_check(dev_t dev);
+extern int blkif_revalidate(dev_t dev);
+extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
+extern void do_blkif_request (request_queue_t *rq); 
+
+extern void xlvbd_update_vbds(void);
+
+/* Virtual block-device subsystem. */
+extern int  xlvbd_init(void);
+extern void xlvbd_cleanup(void); 
+
+#endif /* __XEN_DRIVERS_BLOCK_H__ */
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/blkfront/vbd.c b/linux-2.6.9-xen-sparse/drivers/xen/blkfront/vbd.c
new file mode 100644 (file)
index 0000000..d2cfb7c
--- /dev/null
@@ -0,0 +1,553 @@
+/******************************************************************************
+ * vbd.c
+ * 
+ * XenLinux virtual block-device driver (xvd).
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "block.h"
+#include <linux/blkdev.h>
+
+/*
+ * For convenience we distinguish between ide, scsi and 'other' (i.e.
+ * potentially combinations of the two) in the naming scheme and in a few 
+ * other places (like default readahead, etc).
+ */
+
+#define NUM_IDE_MAJORS 10
+#define NUM_SCSI_MAJORS 9
+#define NUM_VBD_MAJORS 1
+
+static struct xlbd_type_info xlbd_ide_type = {
+       .partn_shift = 6,
+       // XXXcl todo blksize_size[major]  = 1024;
+       .hardsect_size = 512,
+       .max_sectors = 128,  /* 'hwif->rqsize' if we knew it */
+       // XXXcl todo read_ahead[major]    = 8; /* from drivers/ide/ide-probe.c */
+       .name = "hd",
+};
+
+static struct xlbd_type_info xlbd_scsi_type = {
+       .partn_shift = 4,
+       // XXXcl todo blksize_size[major]  = 1024; /* XXX 512; */
+       .hardsect_size = 512,
+       .max_sectors = 128*8, /* XXX 128; */
+       // XXXcl todo read_ahead[major]    = 0; /* XXX 8; -- guessing */
+       .name = "sd",
+};
+
+static struct xlbd_type_info xlbd_vbd_type = {
+       .partn_shift = 4,
+       // XXXcl todo blksize_size[major]  = 512;
+       .hardsect_size = 512,
+       .max_sectors = 128,
+       // XXXcl todo read_ahead[major]    = 8;
+       .name = "xvd",
+};
+
+/* XXXcl handle cciss after finding out why it's "hacked" in */
+
+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
+                                        NUM_VBD_MAJORS];
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static int nr_vbds;
+static vdisk_t *vbd_info;
+
+struct request_queue *xlbd_blk_queue = NULL;
+
+#define MAJOR_XEN(dev) ((dev)>>8)
+#define MINOR_XEN(dev) ((dev) & 0xff)
+
+static struct block_device_operations xlvbd_block_fops = 
+{
+       .owner          = THIS_MODULE,
+       .open           = blkif_open,
+       .release        = blkif_release,
+       .ioctl          = blkif_ioctl,
+#if 0
+    check_media_change: blkif_check,
+    revalidate:         blkif_revalidate,
+#endif
+};
+
+spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
+
+static int xlvbd_get_vbd_info(vdisk_t *disk_info)
+{
+    vdisk_t         *buf = (vdisk_t *)__get_free_page(GFP_KERNEL);
+    blkif_request_t  req;
+    blkif_response_t rsp;
+    int              nr;
+
+    memset(&req, 0, sizeof(req));
+    req.operation   = BLKIF_OP_PROBE;
+    req.nr_segments = 1;
+    req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+
+    blkif_control_send(&req, &rsp);
+
+    if ( rsp.status <= 0 )
+    {
+        printk(KERN_ALERT "Could not probe disks (%d)\n", rsp.status);
+        return -1;
+    }
+
+    if ( (nr = rsp.status) > MAX_VBDS )
+         nr = MAX_VBDS;
+    memcpy(disk_info, buf, nr * sizeof(vdisk_t));
+
+    free_page((unsigned long)buf);
+
+    return nr;
+}
+
+static struct xlbd_major_info *xlbd_get_major_info(int xd_device, int *minor)
+{
+       int mi_idx, new_major;
+       int xd_major = MAJOR_XEN(xd_device); 
+       int xd_minor = MINOR_XEN(xd_device);
+
+       *minor = xd_minor;
+
+       switch (xd_major) {
+       case IDE0_MAJOR: mi_idx = 0; new_major = IDE0_MAJOR; break;
+       case IDE1_MAJOR: mi_idx = 1; new_major = IDE1_MAJOR; break;
+       case IDE2_MAJOR: mi_idx = 2; new_major = IDE2_MAJOR; break;
+       case IDE3_MAJOR: mi_idx = 3; new_major = IDE3_MAJOR; break;
+       case IDE4_MAJOR: mi_idx = 4; new_major = IDE4_MAJOR; break;
+       case IDE5_MAJOR: mi_idx = 5; new_major = IDE5_MAJOR; break;
+       case IDE6_MAJOR: mi_idx = 6; new_major = IDE6_MAJOR; break;
+       case IDE7_MAJOR: mi_idx = 7; new_major = IDE7_MAJOR; break;
+       case IDE8_MAJOR: mi_idx = 8; new_major = IDE8_MAJOR; break;
+       case IDE9_MAJOR: mi_idx = 9; new_major = IDE9_MAJOR; break;
+       case SCSI_DISK0_MAJOR: mi_idx = 10; new_major = SCSI_DISK0_MAJOR; break;
+       case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
+               mi_idx = 11 + xd_major - SCSI_DISK1_MAJOR;
+               new_major = SCSI_DISK1_MAJOR + xd_major - SCSI_DISK1_MAJOR;
+               break;
+       case SCSI_CDROM_MAJOR: mi_idx = 18; new_major = SCSI_CDROM_MAJOR; break;
+       default: mi_idx = 19; new_major = 0;/* XXXcl notyet */ break;
+       }
+
+       if (major_info[mi_idx])
+               return major_info[mi_idx];
+
+       major_info[mi_idx] = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
+       if (major_info[mi_idx] == NULL)
+               return NULL;
+
+       memset(major_info[mi_idx], 0, sizeof(struct xlbd_major_info));
+
+       switch (mi_idx) {
+       case 0 ... (NUM_IDE_MAJORS - 1):
+               major_info[mi_idx]->type = &xlbd_ide_type;
+               break;
+       case NUM_IDE_MAJORS ... (NUM_IDE_MAJORS + NUM_SCSI_MAJORS - 1):
+               major_info[mi_idx]->type = &xlbd_scsi_type;
+               break;
+       case (NUM_IDE_MAJORS + NUM_SCSI_MAJORS) ...
+               (NUM_IDE_MAJORS + NUM_SCSI_MAJORS + NUM_VBD_MAJORS - 1):
+               major_info[mi_idx]->type = &xlbd_vbd_type;
+               break;
+       }
+       major_info[mi_idx]->major = new_major;
+
+       if (register_blkdev(major_info[mi_idx]->major, major_info[mi_idx]->type->name)) {
+               printk(KERN_ALERT "XL VBD: can't get major %d with name %s\n",
+                   major_info[mi_idx]->major, major_info[mi_idx]->type->name);
+               goto out;
+       }
+
+       devfs_mk_dir(major_info[mi_idx]->type->name);
+
+       return major_info[mi_idx];
+
+ out:
+       kfree(major_info[mi_idx]);
+       major_info[mi_idx] = NULL;
+       return NULL;
+}
+
+static struct gendisk *xlvbd_get_gendisk(struct xlbd_major_info *mi,
+                                        int xd_minor, vdisk_t *xd)
+{
+       struct gendisk *gd;
+       struct xlbd_disk_info *di;
+       int device, partno;
+
+       device = MKDEV(mi->major, xd_minor);
+       gd = get_gendisk(device, &partno);
+       if (gd)
+               return gd;
+
+       di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL);
+       if (di == NULL)
+               return NULL;
+       di->mi = mi;
+       di->xd_device = xd->device;
+
+       /* Construct an appropriate gendisk structure. */
+       gd = alloc_disk(1);
+       if (gd == NULL)
+               goto out;
+
+       gd->major = mi->major;
+       gd->first_minor = xd_minor;
+       gd->fops = &xlvbd_block_fops;
+       gd->private_data = di;
+       sprintf(gd->disk_name, "%s%c%d", mi->type->name,
+           'a' + (xd_minor >> mi->type->partn_shift),
+           xd_minor & ((1 << mi->type->partn_shift) - 1));
+       /*  sprintf(gd->devfs_name, "%s%s/disc%d", mi->type->name, , ); XXXdevfs */
+
+       set_capacity(gd, xd->capacity);
+
+       if (xlbd_blk_queue == NULL) {
+               xlbd_blk_queue = blk_init_queue(do_blkif_request,
+                                               &blkif_io_lock);
+               if (xlbd_blk_queue == NULL)
+                       goto out;
+               elevator_init(xlbd_blk_queue, &elevator_noop);
+
+               /*
+                * Turn off barking 'headactive' mode. We dequeue
+                * buffer heads as soon as we pass them to back-end
+                * driver.
+                */
+               blk_queue_headactive(xlbd_blk_queue, 0); /* XXXcl: noop according to blkdev.h */
+
+               blk_queue_hardsect_size(xlbd_blk_queue,
+                                       mi->type->hardsect_size);
+               blk_queue_max_sectors(xlbd_blk_queue, mi->type->max_sectors); /* 'hwif->rqsize' if we knew it */
+
+               /* XXXcl: set mask to PAGE_SIZE for now, to improve either use 
+                  - blk_queue_merge_bvec to merge requests with adjacent ma's
+                  - the tags infrastructure
+                  - the dma infrastructure
+               */
+               blk_queue_segment_boundary(xlbd_blk_queue, PAGE_SIZE - 1);
+
+               blk_queue_max_phys_segments(xlbd_blk_queue,
+                    BLKIF_MAX_SEGMENTS_PER_REQUEST);
+               blk_queue_max_hw_segments(xlbd_blk_queue,
+                    BLKIF_MAX_SEGMENTS_PER_REQUEST); /* XXXcl not needed? */
+
+
+       }
+       gd->queue = xlbd_blk_queue;
+
+       add_disk(gd);
+
+       return gd;
+
+ out:
+       if (gd)
+               del_gendisk(gd);
+       kfree(di);
+       return NULL;
+}
+
+/*
+ * xlvbd_init_device - initialise a VBD device
+ * @disk:              a vdisk_t describing the VBD
+ *
+ * Takes a vdisk_t * that describes a VBD the domain has access to.
+ * Performs appropriate initialisation and registration of the device.
+ *
+ * Care needs to be taken when making re-entrant calls to ensure that
+ * corruption does not occur.  Also, devices that are in use should not have
+ * their details updated.  This is the caller's responsibility.
+ */
+static int xlvbd_init_device(vdisk_t *xd)
+{
+       struct block_device *bd;
+       struct gendisk *gd;
+       struct xlbd_major_info *mi;
+       int device;
+       int minor;
+
+       int err = -ENOMEM;
+
+       mi = xlbd_get_major_info(xd->device, &minor);
+       if (mi == NULL)
+               return -EPERM;
+
+       device = MKDEV(mi->major, minor);
+
+       if ((bd = bdget(device)) == NULL)
+               return -EPERM;
+
+       /*
+        * Update of partition info, and check of usage count, is protected
+        * by the per-block-device semaphore.
+        */
+       down(&bd->bd_sem);
+
+       gd = xlvbd_get_gendisk(mi, minor, xd);
+       if (mi == NULL) {
+               err = -EPERM;
+               goto out;
+       }
+
+       if (VDISK_READONLY(xd->info))
+               set_disk_ro(gd, 1); 
+
+       /* Some final fix-ups depending on the device type */
+       switch (VDISK_TYPE(xd->info)) { 
+       case VDISK_TYPE_CDROM:
+               gd->flags |= GENHD_FL_REMOVABLE | GENHD_FL_CD; 
+               /* FALLTHROUGH */
+       case VDISK_TYPE_FLOPPY: 
+       case VDISK_TYPE_TAPE:
+               gd->flags |= GENHD_FL_REMOVABLE; 
+               break; 
+
+       case VDISK_TYPE_DISK:
+               break; 
+
+       default:
+               printk(KERN_ALERT "XenLinux: unknown device type %d\n", 
+                   VDISK_TYPE(xd->info)); 
+               break; 
+       }
+
+       err = 0;
+ out:
+       up(&bd->bd_sem);
+       bdput(bd);    
+       return err;
+}
+
+#if 0
+/*
+ * xlvbd_remove_device - remove a device node if possible
+ * @device:       numeric device ID
+ *
+ * Updates the gendisk structure and invalidates devices.
+ *
+ * This is OK for now but in future, should perhaps consider where this should
+ * deallocate gendisks / unregister devices.
+ */
+static int xlvbd_remove_device(int device)
+{
+    int i, rc = 0, minor = MINOR(device);
+    struct gendisk *gd;
+    struct block_device *bd;
+    xen_block_t *disk = NULL;
+
+    if ( (bd = bdget(device)) == NULL )
+        return -1;
+
+    /*
+     * Update of partition info, and check of usage count, is protected
+     * by the per-block-device semaphore.
+     */
+    down(&bd->bd_sem);
+
+    if ( ((gd = get_gendisk(device)) == NULL) ||
+         ((disk = xldev_to_xldisk(device)) == NULL) )
+        BUG();
+
+    if ( disk->usage != 0 )
+    {
+        printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device);
+        rc = -1;
+        goto out;
+    }
+    if ( (minor & (gd->max_p-1)) != 0 )
+    {
+        /* 1: The VBD is mapped to a partition rather than a whole unit. */
+        invalidate_device(device, 1);
+       gd->part[minor].start_sect = 0;
+        gd->part[minor].nr_sects   = 0;
+        gd->sizes[minor]           = 0;
+
+        /* Clear the consists-of-virtual-partitions flag if possible. */
+        gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS;
+        for ( i = 1; i < gd->max_p; i++ )
+            if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 )
+                gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
+
+        /*
+         * If all virtual partitions are now gone, and a 'whole unit' VBD is
+         * present, then we can try to grok the unit's real partition table.
+         */
+        if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
+             (gd->sizes[minor & ~(gd->max_p-1)] != 0) &&
+             !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) )
+        {
+            register_disk(gd,
+                          device&~(gd->max_p-1), 
+                          gd->max_p, 
+                          &xlvbd_block_fops,
+                          gd->part[minor&~(gd->max_p-1)].nr_sects);
+        }
+    }
+    else
+    {
+        /*
+         * 2: The VBD is mapped to an entire 'unit'. Clear all partitions.
+         * NB. The partition entries are only cleared if there are no VBDs
+         * mapped to individual partitions on this unit.
+         */
+        i = gd->max_p - 1; /* Default: clear subpartitions as well. */
+        if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
+            i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */
+        while ( i >= 0 )
+        {
+            invalidate_device(device+i, 1);
+            gd->part[minor+i].start_sect = 0;
+            gd->part[minor+i].nr_sects   = 0;
+            gd->sizes[minor+i]           = 0;
+            i--;
+        }
+    }
+
+ out:
+    up(&bd->bd_sem);
+    bdput(bd);
+    return rc;
+}
+
+/*
+ * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
+ * state. The VBDs need to be updated in this way when the domain is
+ * initialised and also each time we receive an XLBLK_UPDATE event.
+ */
+void xlvbd_update_vbds(void)
+{
+    int i, j, k, old_nr, new_nr;
+    vdisk_t *old_info, *new_info, *merged_info;
+
+    old_info = vbd_info;
+    old_nr   = nr_vbds;
+
+    new_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
+    if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 )
+    {
+        kfree(new_info);
+        return;
+    }
+
+    /*
+     * Final list maximum size is old list + new list. This occurs only when
+     * old list and new list do not overlap at all, and we cannot yet destroy
+     * VBDs in the old list because the usage counts are busy.
+     */
+    merged_info = kmalloc((old_nr + new_nr) * sizeof(vdisk_t), GFP_KERNEL);
+
+    /* @i tracks old list; @j tracks new list; @k tracks merged list. */
+    i = j = k = 0;
+
+    while ( (i < old_nr) && (j < new_nr) )
+    {
+        if ( old_info[i].device < new_info[j].device )
+        {
+            if ( xlvbd_remove_device(old_info[i].device) != 0 )
+                memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+            i++;
+        }
+        else if ( old_info[i].device > new_info[j].device )
+        {
+            if ( xlvbd_init_device(&new_info[j]) == 0 )
+                memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+            j++;
+        }
+        else
+        {
+            if ( ((old_info[i].capacity == new_info[j].capacity) &&
+                  (old_info[i].info == new_info[j].info)) ||
+                 (xlvbd_remove_device(old_info[i].device) != 0) )
+                memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+            else if ( xlvbd_init_device(&new_info[j]) == 0 )
+                memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+            i++; j++;
+        }
+    }
+
+    for ( ; i < old_nr; i++ )
+    {
+        if ( xlvbd_remove_device(old_info[i].device) != 0 )
+            memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+    }
+
+    for ( ; j < new_nr; j++ )
+    {
+        if ( xlvbd_init_device(&new_info[j]) == 0 )
+            memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+    }
+
+    vbd_info = merged_info;
+    nr_vbds  = k;
+
+    kfree(old_info);
+    kfree(new_info);
+}
+#endif
+
+/*
+ * Set up all the linux device goop for the virtual block devices
+ * (vbd's) that we know about. Note that although from the backend
+ * driver's p.o.v. VBDs are addressed simply an opaque 16-bit device
+ * number, the domain creation tools conventionally allocate these
+ * numbers to correspond to those used by 'real' linux -- this is just
+ * for convenience as it means e.g. that the same /etc/fstab can be
+ * used when booting with or without Xen.
+ */
+int xlvbd_init(void)
+{
+       int i;
+
+       /*
+        * If compiled as a module, we don't support unloading yet. We
+        * therefore permanently increment the reference count to
+        * disallow it.
+        */
+       MOD_INC_USE_COUNT;
+
+       memset(major_info, 0, sizeof(major_info));
+
+       for (i = 0; i < sizeof(major_info) / sizeof(major_info[0]); i++) {
+       }
+
+       vbd_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
+       nr_vbds  = xlvbd_get_vbd_info(vbd_info);
+
+       if (nr_vbds < 0) {
+               kfree(vbd_info);
+               vbd_info = NULL;
+               nr_vbds  = 0;
+       } else {
+               for (i = 0; i < nr_vbds; i++)
+                       xlvbd_init_device(&vbd_info[i]);
+       }
+
+       return 0;
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/console/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/console/Makefile
new file mode 100644 (file)
index 0000000..e4ffdef
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := console.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/console/console.c b/linux-2.6.9-xen-sparse/drivers/xen/console/console.c
new file mode 100644 (file)
index 0000000..8309c25
--- /dev/null
@@ -0,0 +1,764 @@
+/******************************************************************************
+ * console.c
+ * 
+ * Virtual console driver.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser.
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/hypervisor-ifs/event_channel.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/ctrl_if.h>
+
+/*
+ * Modes:
+ *  'xencons=off'  [XC_OFF]:     Console is disabled.
+ *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
+ *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
+ *                 [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
+ * 
+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
+ * warnings from standard distro startup scripts.
+ */
+static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
+
+static int __init xencons_setup(char *str)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    if (str[0] == '=')
+       str++;
+#endif
+    if ( !strcmp(str, "tty") )
+        xc_mode = XC_TTY;
+    else if ( !strcmp(str, "ttyS") )
+        xc_mode = XC_SERIAL;
+    else if ( !strcmp(str, "off") )
+        xc_mode = XC_OFF;
+    return 1;
+}
+__setup("xencons", xencons_setup);
+
+/* The kernel and user-land drivers share a common transmit buffer. */
+#define WBUF_SIZE     4096
+#define WBUF_MASK(_i) ((_i)&(WBUF_SIZE-1))
+static char wbuf[WBUF_SIZE];
+static unsigned int wc, wp; /* write_cons, write_prod */
+
+/* This lock protects accesses to the common transmit buffer. */
+static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
+
+/* Common transmit-kick routine. */
+static void __xencons_tx_flush(void);
+
+/* This task is used to defer sending console data until there is space. */
+static void xencons_tx_flush_task_routine(void *data);
+
+static DECLARE_TQUEUE(xencons_tx_flush_task, 
+                      xencons_tx_flush_task_routine,
+                      NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static struct tty_driver *xencons_driver;
+#else
+static struct tty_driver xencons_driver;
+#endif
+
+
+/******************** Kernel console driver ********************************/
+
+static void kcons_write(
+    struct console *c, const char *s, unsigned int count)
+{
+    int           i;
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    
+    for ( i = 0; i < count; i++ )
+    {
+        if ( (wp - wc) >= (WBUF_SIZE - 1) )
+            break;
+        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
+            wbuf[WBUF_MASK(wp++)] = '\r';
+    }
+
+    __xencons_tx_flush();
+
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void kcons_write_dom0(
+    struct console *c, const char *s, unsigned int count)
+{
+    int rc;
+
+    while ( count > 0 )
+    {
+        if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write,
+                                         count, (char *)s)) > 0 )
+        {
+            count -= rc;
+            s += rc;
+        }
+       else
+           break;
+    }
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static struct tty_driver *kcons_device(struct console *c, int *index)
+{
+    *index = c->index;
+    return xencons_driver;
+}
+#else
+static kdev_t kcons_device(struct console *c)
+{
+    return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
+}
+#endif
+
+static struct console kcons_info = {
+    device:  kcons_device,
+    flags:   CON_PRINTBUFFER,
+    index:   -1
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define __RETCODE 0
+static int __init xen_console_init(void)
+#else
+#define __RETCODE
+void xen_console_init(void)
+#endif
+{
+    if ( xen_start_info.flags & SIF_INITDOMAIN )
+    {
+        if ( xc_mode == XC_DEFAULT )
+            xc_mode = XC_SERIAL;
+        kcons_info.write = kcons_write_dom0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+       if ( xc_mode == XC_SERIAL )
+           kcons_info.flags |= CON_ENABLED;
+#endif
+    }
+    else
+    {
+        if ( xc_mode == XC_DEFAULT )
+            xc_mode = XC_TTY;
+        kcons_info.write = kcons_write;
+    }
+
+    if ( xc_mode == XC_OFF )
+        return __RETCODE;
+
+    if ( xc_mode == XC_SERIAL )
+        strcpy(kcons_info.name, "ttyS");
+    else
+        strcpy(kcons_info.name, "tty");
+
+    register_console(&kcons_info);
+    return __RETCODE;
+}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+console_initcall(xen_console_init);
+#endif
+
+/*** Useful function for console debugging -- goes straight to Xen. ***/
+asmlinkage int xprintk(const char *fmt, ...)
+{
+    va_list args;
+    int printk_len;
+    static char printk_buf[1024];
+    
+    /* Emit the output into the temporary buffer */
+    va_start(args, fmt);
+    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+    va_end(args);
+
+    /* Send the processed output directly to Xen. */
+    kcons_write_dom0(NULL, printk_buf, printk_len);
+
+    return 0;
+}
+
+/*** Forcibly flush console data before dying. ***/
+void xencons_force_flush(void)
+{
+    ctrl_msg_t msg;
+    int        sz;
+
+    /* Emergency console is synchronous, so there's nothing to flush. */
+    if ( xen_start_info.flags & SIF_INITDOMAIN )
+        return;
+
+    /*
+     * We use dangerous control-interface functions that require a quiescent
+     * system and no interrupts. Try to ensure this with a global cli().
+     */
+    cli();
+
+    /* Spin until console data is flushed through to the domain controller. */
+    while ( (wc != wp) && !ctrl_if_transmitter_empty() )
+    {
+        /* Interrupts are disabled -- we must manually reap responses. */
+        ctrl_if_discard_responses();
+
+        if ( (sz = wp - wc) == 0 )
+            continue;
+        if ( sz > sizeof(msg.msg) )
+            sz = sizeof(msg.msg);
+        if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
+            sz = WBUF_SIZE - WBUF_MASK(wc);
+
+        msg.type    = CMSG_CONSOLE;
+        msg.subtype = CMSG_CONSOLE_DATA;
+        msg.length  = sz;
+        memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
+            
+        if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+            wc += sz;
+    }
+}
+
+
+/******************** User-space console driver (/dev/console) ************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define DRV(_d)         (_d)
+#define TTY_INDEX(_tty) ((_tty)->index)
+#else
+static int xencons_refcount;
+static struct tty_struct *xencons_table[MAX_NR_CONSOLES];
+#define DRV(_d)         (&(_d))
+#define TTY_INDEX(_tty) (MINOR((_tty)->device) - xencons_driver.minor_start)
+#endif
+
+static struct termios *xencons_termios[MAX_NR_CONSOLES];
+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
+static struct tty_struct *xencons_tty;
+static int xencons_priv_irq;
+static char x_char;
+
+/* Non-privileged receive callback. */
+static void xencons_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    int           i;
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    if ( xencons_tty != NULL )
+    {
+        for ( i = 0; i < msg->length; i++ )
+            tty_insert_flip_char(xencons_tty, msg->msg[i], 0);
+        tty_flip_buffer_push(xencons_tty);
+    }
+    spin_unlock_irqrestore(&xencons_lock, flags);
+
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+/* Privileged and non-privileged transmit worker. */
+static void __xencons_tx_flush(void)
+{
+    int        sz, work_done = 0;
+    ctrl_msg_t msg;
+
+    if ( xen_start_info.flags & SIF_INITDOMAIN )
+    {
+        if ( x_char )
+        {
+            kcons_write_dom0(NULL, &x_char, 1);
+            x_char = 0;
+            work_done = 1;
+        }
+
+        while ( wc != wp )
+        {
+            sz = wp - wc;
+            if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
+                sz = WBUF_SIZE - WBUF_MASK(wc);
+            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
+            wc += sz;
+            work_done = 1;
+        }
+    }
+    else
+    {
+        while ( x_char )
+        {
+            msg.type    = CMSG_CONSOLE;
+            msg.subtype = CMSG_CONSOLE_DATA;
+            msg.length  = 1;
+            msg.msg[0]  = x_char;
+
+            if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+                x_char = 0;
+            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+                break;
+
+            work_done = 1;
+        }
+
+        while ( wc != wp )
+        {
+            sz = wp - wc;
+            if ( sz > sizeof(msg.msg) )
+                sz = sizeof(msg.msg);
+            if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
+                sz = WBUF_SIZE - WBUF_MASK(wc);
+
+            msg.type    = CMSG_CONSOLE;
+            msg.subtype = CMSG_CONSOLE_DATA;
+            msg.length  = sz;
+            memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
+            
+            if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+                wc += sz;
+            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+                break;
+
+            work_done = 1;
+        }
+    }
+
+    if ( work_done && (xencons_tty != NULL) )
+    {
+        wake_up_interruptible(&xencons_tty->write_wait);
+        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+             (xencons_tty->ldisc.write_wakeup != NULL) )
+            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
+    }
+}
+
+/* Non-privileged transmit kicker. */
+static void xencons_tx_flush_task_routine(void *data)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&xencons_lock, flags);
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+/* Privileged receive callback and transmit kicker. */
+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
+                                          struct pt_regs *regs)
+{
+    static char   rbuf[16];
+    int           i, l;
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+
+    if ( xencons_tty != NULL )
+    {
+        /* Receive work. */
+        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
+            for ( i = 0; i < l; i++ )
+                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
+        if ( xencons_tty->flip.count != 0 )
+            tty_flip_buffer_push(xencons_tty);
+    }
+
+    /* Transmit work. */
+    __xencons_tx_flush();
+
+    spin_unlock_irqrestore(&xencons_lock, flags);
+
+    return IRQ_HANDLED;
+}
+
+static int xencons_write_room(struct tty_struct *tty)
+{
+    return WBUF_SIZE - (wp - wc);
+}
+
+static int xencons_chars_in_buffer(struct tty_struct *tty)
+{
+    return wp - wc;
+}
+
+static void xencons_send_xchar(struct tty_struct *tty, char ch)
+{
+    unsigned long flags;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    x_char = ch;
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_throttle(struct tty_struct *tty)
+{
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    if ( I_IXOFF(tty) )
+        xencons_send_xchar(tty, STOP_CHAR(tty));
+}
+
+static void xencons_unthrottle(struct tty_struct *tty)
+{
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    if ( I_IXOFF(tty) )
+    {
+        if ( x_char != 0 )
+            x_char = 0;
+        else
+            xencons_send_xchar(tty, START_CHAR(tty));
+    }
+}
+
+static void xencons_flush_buffer(struct tty_struct *tty)
+{
+    unsigned long flags;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    wc = wp = 0;
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static inline int __xencons_put_char(int ch)
+{
+    char _ch = (char)ch;
+    if ( (wp - wc) == WBUF_SIZE )
+        return 0;
+    wbuf[WBUF_MASK(wp++)] = _ch;
+    return 1;
+}
+
+static int xencons_write(struct tty_struct *tty, int from_user,
+                       const u_char * buf, int count)
+{
+    int i;
+    unsigned long flags;
+
+    if ( from_user && verify_area(VERIFY_READ, buf, count) )
+        return -EINVAL;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return count;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+
+    for ( i = 0; i < count; i++ )
+    {
+        char ch;
+        if ( from_user )
+            __get_user(ch, buf + i);
+        else
+            ch = buf[i];
+        if ( !__xencons_put_char(ch) )
+            break;
+    }
+
+    if ( i != 0 )
+        __xencons_tx_flush();
+
+    spin_unlock_irqrestore(&xencons_lock, flags);
+
+    return i;
+}
+
+static void xencons_put_char(struct tty_struct *tty, u_char ch)
+{
+    unsigned long flags;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    (void)__xencons_put_char(ch);
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_flush_chars(struct tty_struct *tty)
+{
+    unsigned long flags;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);    
+}
+
+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+    unsigned long orig_jiffies = jiffies;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    while ( DRV(tty->driver)->chars_in_buffer(tty) )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+        if ( signal_pending(current) )
+            break;
+        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
+            break;
+    }
+    
+    set_current_state(TASK_RUNNING);
+}
+
+static int xencons_open(struct tty_struct *tty, struct file *filp)
+{
+    unsigned long flags;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return 0;
+
+    MOD_INC_USE_COUNT;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    tty->driver_data = NULL;
+    if ( xencons_tty == NULL )
+        xencons_tty = tty;
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);    
+
+    return 0;
+}
+
+static void xencons_close(struct tty_struct *tty, struct file *filp)
+{
+    unsigned long flags;
+
+    if ( TTY_INDEX(tty) != 0 )
+        return;
+
+    if ( tty->count == 1 )
+    {
+        tty->closing = 1;
+        tty_wait_until_sent(tty, 0);
+        if ( DRV(tty->driver)->flush_buffer != NULL )
+            DRV(tty->driver)->flush_buffer(tty);
+        if ( tty->ldisc.flush_buffer != NULL )
+            tty->ldisc.flush_buffer(tty);
+        tty->closing = 0;
+        spin_lock_irqsave(&xencons_lock, flags);
+        xencons_tty = NULL;
+        spin_unlock_irqrestore(&xencons_lock, flags);    
+    }
+
+    MOD_DEC_USE_COUNT;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static struct tty_operations xencons_ops = {
+    .open = xencons_open,
+    .close = xencons_close,
+    .write = xencons_write,
+    .write_room = xencons_write_room,
+    .put_char = xencons_put_char,
+    .flush_chars = xencons_flush_chars,
+    .chars_in_buffer = xencons_chars_in_buffer,
+    .send_xchar = xencons_send_xchar,
+    .flush_buffer = xencons_flush_buffer,
+    .throttle = xencons_throttle,
+    .unthrottle = xencons_unthrottle,
+    .wait_until_sent = xencons_wait_until_sent,
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static const char *xennullcon_startup(void)
+{
+    return NULL;
+}
+
+static int xennullcon_dummy(void)
+{
+    return 0;
+}
+
+#define DUMMY  (void *)xennullcon_dummy
+
+/*
+ *  The console `switch' structure for the dummy console
+ *
+ *  Most of the operations are dummies.
+ */
+
+const struct consw xennull_con = {
+    .owner =           THIS_MODULE,
+    .con_startup =     xennullcon_startup,
+    .con_init =                DUMMY,
+    .con_deinit =      DUMMY,
+    .con_clear =       DUMMY,
+    .con_putc =                DUMMY,
+    .con_putcs =       DUMMY,
+    .con_cursor =      DUMMY,
+    .con_scroll =      DUMMY,
+    .con_bmove =       DUMMY,
+    .con_switch =      DUMMY,
+    .con_blank =       DUMMY,
+    .con_font_set =    DUMMY,
+    .con_font_get =    DUMMY,
+    .con_font_default =        DUMMY,
+    .con_font_copy =   DUMMY,
+    .con_set_palette = DUMMY,
+    .con_scrolldelta = DUMMY,
+};
+#endif
+#endif
+
+static int __init xencons_init(void)
+{
+    if ( xc_mode == XC_OFF )
+        return 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
+                                      1 : MAX_NR_CONSOLES);
+    if ( xencons_driver == NULL )
+        return -ENOMEM;
+#else
+    memset(&xencons_driver, 0, sizeof(struct tty_driver));
+    xencons_driver.magic       = TTY_DRIVER_MAGIC;
+    xencons_driver.refcount    = &xencons_refcount;
+    xencons_driver.table       = xencons_table;
+    xencons_driver.num         = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
+#endif
+
+    DRV(xencons_driver)->major           = TTY_MAJOR;
+    DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
+    DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
+    DRV(xencons_driver)->init_termios    = tty_std_termios;
+    DRV(xencons_driver)->flags           = 
+        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
+    DRV(xencons_driver)->termios         = xencons_termios;
+    DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
+
+    if ( xc_mode == XC_SERIAL )
+    {
+        DRV(xencons_driver)->name        = "ttyS";
+        DRV(xencons_driver)->minor_start = 64;
+       DRV(xencons_driver)->name_base   = 0;
+    }
+    else
+    {
+        DRV(xencons_driver)->name        = "tty";
+        DRV(xencons_driver)->minor_start = 1;
+       DRV(xencons_driver)->name_base   = 1;
+    }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    tty_set_operations(xencons_driver, &xencons_ops);
+#else
+    xencons_driver.open            = xencons_open;
+    xencons_driver.close           = xencons_close;
+    xencons_driver.write           = xencons_write;
+    xencons_driver.write_room      = xencons_write_room;
+    xencons_driver.put_char        = xencons_put_char;
+    xencons_driver.flush_chars     = xencons_flush_chars;
+    xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
+    xencons_driver.send_xchar      = xencons_send_xchar;
+    xencons_driver.flush_buffer    = xencons_flush_buffer;
+    xencons_driver.throttle        = xencons_throttle;
+    xencons_driver.unthrottle      = xencons_unthrottle;
+    xencons_driver.wait_until_sent = xencons_wait_until_sent;
+#endif
+
+    if ( tty_register_driver(DRV(xencons_driver)) )
+        panic("Couldn't register Xen virtual console driver as %s\n",
+              DRV(xencons_driver)->name);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    tty_register_device(xencons_driver, 0, NULL);
+#endif
+
+    if ( xen_start_info.flags & SIF_INITDOMAIN )
+    {
+        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+        (void)request_irq(xencons_priv_irq,
+                          xencons_priv_interrupt, 0, "console", NULL);
+    }
+    else
+    {
+        (void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0);
+    }
+
+    printk("Xen virtual console successfully installed as %s\n",
+           DRV(xencons_driver)->name);
+    
+    return 0;
+}
+
+static void __exit xencons_fini(void)
+{
+    int ret;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    tty_unregister_device(xencons_driver, 0);
+#endif
+
+    if ( (ret = tty_unregister_driver(DRV(xencons_driver))) != 0 )
+        printk(KERN_ERR "Unable to unregister Xen console driver: %d\n", ret);
+
+    if ( xen_start_info.flags & SIF_INITDOMAIN )
+    {
+        free_irq(xencons_priv_irq, NULL);
+        unbind_virq_from_irq(VIRQ_CONSOLE);
+    }
+    else
+    {
+        ctrl_if_unregister_receiver(CMSG_CONSOLE, xencons_rx);
+    }
+}
+
+module_init(xencons_init);
+module_exit(xencons_fini);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/evtchn/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/evtchn/Makefile
new file mode 100644 (file)
index 0000000..7b082a0
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := evtchn.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c b/linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c
new file mode 100644 (file)
index 0000000..60170e1
--- /dev/null
@@ -0,0 +1,402 @@
+/******************************************************************************
+ * evtchn.c
+ * 
+ * Xenolinux driver for receiving and demuxing event-channel signals.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/poll.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <asm-xen/evtchn.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#include <linux/devfs_fs_kernel.h>
+#define OLD_DEVFS
+#else
+#include <linux/gfp.h>
+#endif
+
+#ifdef OLD_DEVFS
+/* NB. This must be shared amongst drivers if more things go in /dev/xen */
+static devfs_handle_t xen_dev_dir;
+#endif
+
+/* Only one process may open /dev/xen/evtchn at any time. */
+static unsigned long evtchn_dev_inuse;
+
+/* Notification ring, accessed via /dev/xen/evtchn. */
+#define RING_SIZE     2048  /* 2048 16-bit entries */
+#define RING_MASK(_i) ((_i)&(RING_SIZE-1))
+static u16 *ring;
+static unsigned int ring_cons, ring_prod, ring_overflow;
+
+/* Processes wait on this queue via /dev/xen/evtchn when ring is empty. */
+static DECLARE_WAIT_QUEUE_HEAD(evtchn_wait);
+static struct fasync_struct *evtchn_async_queue;
+
+/* Which ports is user-space bound to? */
+static u32 bound_ports[32];
+
+static spinlock_t lock;
+
+void evtchn_device_upcall(int port)
+{
+    spin_lock(&lock);
+
+    mask_evtchn(port);
+    clear_evtchn(port);
+
+    if ( ring != NULL )
+    {
+        if ( (ring_prod - ring_cons) < RING_SIZE )
+        {
+            ring[RING_MASK(ring_prod)] = (u16)port;
+            if ( ring_cons == ring_prod++ )
+            {
+                wake_up_interruptible(&evtchn_wait);
+                kill_fasync(&evtchn_async_queue, SIGIO, POLL_IN);
+            }
+        }
+        else
+        {
+            ring_overflow = 1;
+        }
+    }
+
+    spin_unlock(&lock);
+}
+
+static void __evtchn_reset_buffer_ring(void)
+{
+    /* Initialise the ring to empty. Clear errors. */
+    ring_cons = ring_prod = ring_overflow = 0;
+}
+
+static ssize_t evtchn_read(struct file *file, char *buf,
+                           size_t count, loff_t *ppos)
+{
+    int rc;
+    unsigned int c, p, bytes1 = 0, bytes2 = 0;
+    DECLARE_WAITQUEUE(wait, current);
+
+    add_wait_queue(&evtchn_wait, &wait);
+
+    count &= ~1; /* even number of bytes */
+
+    if ( count == 0 )
+    {
+        rc = 0;
+        goto out;
+    }
+
+    if ( count > PAGE_SIZE )
+        count = PAGE_SIZE;
+
+    for ( ; ; )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+
+        if ( (c = ring_cons) != (p = ring_prod) )
+            break;
+
+        if ( ring_overflow )
+        {
+            rc = -EFBIG;
+            goto out;
+        }
+
+        if ( file->f_flags & O_NONBLOCK )
+        {
+            rc = -EAGAIN;
+            goto out;
+        }
+
+        if ( signal_pending(current) )
+        {
+            rc = -ERESTARTSYS;
+            goto out;
+        }
+
+        schedule();
+    }
+
+    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+    if ( ((c ^ p) & RING_SIZE) != 0 )
+    {
+        bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
+        bytes2 = RING_MASK(p) * sizeof(u16);
+    }
+    else
+    {
+        bytes1 = (p - c) * sizeof(u16);
+        bytes2 = 0;
+    }
+
+    /* Truncate chunks according to caller's maximum byte count. */
+    if ( bytes1 > count )
+    {
+        bytes1 = count;
+        bytes2 = 0;
+    }
+    else if ( (bytes1 + bytes2) > count )
+    {
+        bytes2 = count - bytes1;
+    }
+
+    if ( copy_to_user(buf, &ring[RING_MASK(c)], bytes1) ||
+         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &ring[0], bytes2)) )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
+    ring_cons += (bytes1 + bytes2) / sizeof(u16);
+
+    rc = bytes1 + bytes2;
+
+ out:
+    __set_current_state(TASK_RUNNING);
+    remove_wait_queue(&evtchn_wait, &wait);
+    return rc;
+}
+
+static ssize_t evtchn_write(struct file *file, const char *buf,
+                            size_t count, loff_t *ppos)
+{
+    int  rc, i;
+    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
+
+    if ( kbuf == NULL )
+        return -ENOMEM;
+
+    count &= ~1; /* even number of bytes */
+
+    if ( count == 0 )
+    {
+        rc = 0;
+        goto out;
+    }
+
+    if ( count > PAGE_SIZE )
+        count = PAGE_SIZE;
+
+    if ( copy_from_user(kbuf, buf, count) != 0 )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
+    spin_lock_irq(&lock);
+    for ( i = 0; i < (count/2); i++ )
+        if ( test_bit(kbuf[i], (unsigned long *)&bound_ports[0]) )
+            unmask_evtchn(kbuf[i]);
+    spin_unlock_irq(&lock);
+
+    rc = count;
+
+ out:
+    free_page((unsigned long)kbuf);
+    return rc;
+}
+
+static int evtchn_ioctl(struct inode *inode, struct file *file,
+                        unsigned int cmd, unsigned long arg)
+{
+    int rc = 0;
+    
+    spin_lock_irq(&lock);
+    
+    switch ( cmd )
+    {
+    case EVTCHN_RESET:
+        __evtchn_reset_buffer_ring();
+        break;
+    case EVTCHN_BIND:
+        if ( !test_and_set_bit(arg, (unsigned long *)&bound_ports[0]) )
+            unmask_evtchn(arg);
+        else
+            rc = -EINVAL;
+        break;
+    case EVTCHN_UNBIND:
+        if ( test_and_clear_bit(arg, (unsigned long *)&bound_ports[0]) )
+            mask_evtchn(arg);
+        else
+            rc = -EINVAL;
+        break;
+    default:
+        rc = -ENOSYS;
+        break;
+    }
+
+    spin_unlock_irq(&lock);   
+
+    return rc;
+}
+
+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+{
+    unsigned int mask = POLLOUT | POLLWRNORM;
+    poll_wait(file, &evtchn_wait, wait);
+    if ( ring_cons != ring_prod )
+        mask |= POLLIN | POLLRDNORM;
+    if ( ring_overflow )
+        mask = POLLERR;
+    return mask;
+}
+
+static int evtchn_fasync(int fd, struct file *filp, int on)
+{
+    return fasync_helper(fd, filp, on, &evtchn_async_queue);
+}
+
+static int evtchn_open(struct inode *inode, struct file *filp)
+{
+    u16 *_ring;
+
+    if ( test_and_set_bit(0, &evtchn_dev_inuse) )
+        return -EBUSY;
+
+    /* Allocate outside locked region so that we can use GFP_KERNEL. */
+    if ( (_ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
+        return -ENOMEM;
+
+    spin_lock_irq(&lock);
+    ring = _ring;
+    __evtchn_reset_buffer_ring();
+    spin_unlock_irq(&lock);
+
+    MOD_INC_USE_COUNT;
+
+    return 0;
+}
+
+static int evtchn_release(struct inode *inode, struct file *filp)
+{
+    int i;
+
+    spin_lock_irq(&lock);
+    if ( ring != NULL )
+    {
+        free_page((unsigned long)ring);
+        ring = NULL;
+    }
+    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+        if ( test_and_clear_bit(i, (unsigned long *)&bound_ports[0]) )
+            mask_evtchn(i);
+    spin_unlock_irq(&lock);
+
+    evtchn_dev_inuse = 0;
+
+    MOD_DEC_USE_COUNT;
+
+    return 0;
+}
+
+static struct file_operations evtchn_fops = {
+    owner:    THIS_MODULE,
+    read:     evtchn_read,
+    write:    evtchn_write,
+    ioctl:    evtchn_ioctl,
+    poll:     evtchn_poll,
+    fasync:   evtchn_fasync,
+    open:     evtchn_open,
+    release:  evtchn_release
+};
+
+static struct miscdevice evtchn_miscdev = {
+    .minor        = EVTCHN_MINOR,
+    .name         = "evtchn",
+    .fops         = &evtchn_fops,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    .devfs_name   = "misc/evtchn",
+#endif
+};
+
+static int __init evtchn_init(void)
+{
+#ifdef OLD_DEVFS
+    devfs_handle_t symlink_handle;
+    int            pos;
+    char           link_dest[64];
+#endif
+    int err;
+
+    /* (DEVFS) create '/dev/misc/evtchn'. */
+    err = misc_register(&evtchn_miscdev);
+    if ( err != 0 )
+    {
+        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+        return err;
+    }
+
+#ifdef OLD_DEVFS
+    /* (DEVFS) create directory '/dev/xen'. */
+    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
+
+    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
+    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
+                              &link_dest[3], 
+                              sizeof(link_dest) - 3);
+    if ( pos >= 0 )
+        strncpy(&link_dest[pos], "../", 3);
+
+    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
+    (void)devfs_mk_symlink(xen_dev_dir, 
+                           "evtchn", 
+                           DEVFS_FL_DEFAULT, 
+                           &link_dest[pos],
+                           &symlink_handle, 
+                           NULL);
+
+    /* (DEVFS) automatically destroy the symlink with its destination. */
+    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
+#endif
+
+    printk("Event-channel device installed.\n");
+
+    return 0;
+}
+
+static void evtchn_cleanup(void)
+{
+    misc_deregister(&evtchn_miscdev);
+}
+
+module_init(evtchn_init);
+module_exit(evtchn_cleanup);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netback/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/netback/Makefile
new file mode 100644 (file)
index 0000000..3279442
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := netback.o control.o interface.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netback/common.h b/linux-2.6.9-xen-sparse/drivers/xen/netback/common.h
new file mode 100644 (file)
index 0000000..6646f53
--- /dev/null
@@ -0,0 +1,99 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/common.h
+ */
+
+#ifndef __NETIF__BACKEND__COMMON_H__
+#define __NETIF__BACKEND__COMMON_H__
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor-ifs/io/netif.h>
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+
+#if 0
+#define ASSERT(_p) \
+    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+    __LINE__, __FILE__); *(int*)0=0; }
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+                           __FILE__ , __LINE__ , ## _a )
+#else
+#define ASSERT(_p) ((void)0)
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+typedef struct netif_st {
+    /* Unique identifier for this interface. */
+    domid_t          domid;
+    unsigned int     handle;
+
+    /* Physical parameters of the comms window. */
+    unsigned long    tx_shmem_frame;
+    unsigned long    rx_shmem_frame;
+    unsigned int     evtchn;
+    int              irq;
+
+    /* The shared rings and indexes. */
+    netif_tx_interface_t *tx;
+    netif_rx_interface_t *rx;
+
+    /* Private indexes into shared ring. */
+    NETIF_RING_IDX rx_req_cons;
+    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
+    NETIF_RING_IDX tx_req_cons;
+    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+
+    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+    unsigned long   credit_bytes;
+    unsigned long   credit_usec;
+    unsigned long   remaining_credit;
+    struct timer_list credit_timeout;
+
+    /* Miscellaneous private stuff. */
+    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+    /*
+     * DISCONNECT response is deferred until pending requests are ack'ed.
+     * We therefore need to store the id from the original request.
+     */
+    u8               disconnect_rspid;
+    struct netif_st *hash_next;
+    struct list_head list;  /* scheduling list */
+    atomic_t         refcnt;
+    spinlock_t       rx_lock, tx_lock;
+    struct net_device *dev;
+    struct net_device_stats stats;
+
+    struct work_struct work;
+} netif_t;
+
+void netif_create(netif_be_create_t *create);
+void netif_destroy(netif_be_destroy_t *destroy);
+void netif_connect(netif_be_connect_t *connect);
+int  netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
+void netif_disconnect_complete(netif_t *netif);
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define netif_put(_b)                             \
+    do {                                          \
+        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+            netif_disconnect_complete(_b);        \
+    } while (0)
+
+void netif_interface_init(void);
+void netif_ctrlif_init(void);
+
+void netif_deschedule(netif_t *netif);
+
+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+#endif /* __NETIF__BACKEND__COMMON_H__ */
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netback/control.c b/linux-2.6.9-xen-sparse/drivers/xen/netback/control.c
new file mode 100644 (file)
index 0000000..6ca0ba2
--- /dev/null
@@ -0,0 +1,65 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/control.c
+ * 
+ * Routines for interfacing with the control plane.
+ * 
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    switch ( msg->subtype )
+    {
+    case CMSG_NETIF_BE_CREATE:
+        if ( msg->length != sizeof(netif_be_create_t) )
+            goto parse_error;
+        netif_create((netif_be_create_t *)&msg->msg[0]);
+        break;        
+    case CMSG_NETIF_BE_DESTROY:
+        if ( msg->length != sizeof(netif_be_destroy_t) )
+            goto parse_error;
+        netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
+        break;        
+    case CMSG_NETIF_BE_CONNECT:
+        if ( msg->length != sizeof(netif_be_connect_t) )
+            goto parse_error;
+        netif_connect((netif_be_connect_t *)&msg->msg[0]);
+        break;        
+    case CMSG_NETIF_BE_DISCONNECT:
+        if ( msg->length != sizeof(netif_be_disconnect_t) )
+            goto parse_error;
+        if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
+            return; /* Sending the response is deferred until later. */
+        break;        
+    default:
+        goto parse_error;
+    }
+
+    ctrl_if_send_response(msg);
+    return;
+
+ parse_error:
+    DPRINTK("Parse error while reading message subtype %d, len %d\n",
+            msg->subtype, msg->length);
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+void netif_ctrlif_init(void)
+{
+    ctrl_msg_t cmsg;
+    netif_be_driver_status_t st;
+
+    (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx,
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+
+    /* Send a driver-UP notification to the domain controller. */
+    cmsg.type      = CMSG_NETIF_BE;
+    cmsg.subtype   = CMSG_NETIF_BE_DRIVER_STATUS;
+    cmsg.length    = sizeof(netif_be_driver_status_t);
+    st.status      = NETIF_DRIVER_STATUS_UP;
+    memcpy(cmsg.msg, &st, sizeof(st));
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netback/interface.c b/linux-2.6.9-xen-sparse/drivers/xen/netback/interface.c
new file mode 100644 (file)
index 0000000..406d253
--- /dev/null
@@ -0,0 +1,297 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/interface.c
+ * 
+ * Network-device interface management.
+ * 
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+#include <linux/rtnetlink.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#endif
+
+#define NETIF_HASHSZ 1024
+#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
+
+static netif_t *netif_hash[NETIF_HASHSZ];
+
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
+{
+    netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
+    while ( (netif != NULL) && 
+            ((netif->domid != domid) || (netif->handle != handle)) )
+        netif = netif->hash_next;
+    return netif;
+}
+
+static void __netif_disconnect_complete(void *arg)
+{
+    netif_t              *netif = (netif_t *)arg;
+    ctrl_msg_t            cmsg;
+    netif_be_disconnect_t disc;
+
+    /*
+     * These can't be done in netif_disconnect() because at that point there
+     * may be outstanding requests at the disc whose asynchronous responses
+     * must still be notified to the remote driver.
+     */
+    unbind_evtchn_from_irq(netif->evtchn);
+    vfree(netif->tx); /* Frees netif->rx as well. */
+    rtnl_lock();
+    (void)dev_close(netif->dev);
+    rtnl_unlock();
+
+    /* Construct the deferred response message. */
+    cmsg.type         = CMSG_NETIF_BE;
+    cmsg.subtype      = CMSG_NETIF_BE_DISCONNECT;
+    cmsg.id           = netif->disconnect_rspid;
+    cmsg.length       = sizeof(netif_be_disconnect_t);
+    disc.domid        = netif->domid;
+    disc.netif_handle = netif->handle;
+    disc.status       = NETIF_BE_STATUS_OKAY;
+    memcpy(cmsg.msg, &disc, sizeof(disc));
+
+    /*
+     * Make sure message is constructed /before/ status change, because
+     * after the status change the 'netif' structure could be deallocated at
+     * any time. Also make sure we send the response /after/ status change,
+     * as otherwise a subsequent CONNECT request could spuriously fail if
+     * another CPU doesn't see the status change yet.
+     */
+    mb();
+    if ( netif->status != DISCONNECTING )
+        BUG();
+    netif->status = DISCONNECTED;
+    mb();
+
+    /* Send the successful response. */
+    ctrl_if_send_response(&cmsg);
+}
+
+void netif_disconnect_complete(netif_t *netif)
+{
+    INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif);
+    schedule_work(&netif->work);
+}
+
+void netif_create(netif_be_create_t *create)
+{
+    int                err = 0;
+    domid_t            domid  = create->domid;
+    unsigned int       handle = create->netif_handle;
+    struct net_device *dev;
+    netif_t          **pnetif, *netif;
+    char               name[IFNAMSIZ] = {};
+
+    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
+    if ( dev == NULL )
+    {
+        DPRINTK("Could not create netif: out of memory\n");
+        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+        return;
+    }
+
+    netif = dev->priv;
+    memset(netif, 0, sizeof(*netif));
+    netif->domid  = domid;
+    netif->handle = handle;
+    netif->status = DISCONNECTED;
+    spin_lock_init(&netif->rx_lock);
+    spin_lock_init(&netif->tx_lock);
+    atomic_set(&netif->refcnt, 0);
+    netif->dev = dev;
+
+    netif->credit_bytes = netif->remaining_credit = ~0UL;
+    netif->credit_usec  = 0UL;
+    /*init_ac_timer(&new_vif->credit_timeout);*/
+
+    pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+    while ( *pnetif != NULL )
+    {
+        if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
+        {
+            DPRINTK("Could not create netif: already exists\n");
+            create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
+            kfree(dev);
+            return;
+        }
+        pnetif = &(*pnetif)->hash_next;
+    }
+
+    dev->hard_start_xmit = netif_be_start_xmit;
+    dev->get_stats       = netif_be_get_stats;
+    memcpy(dev->dev_addr, create->mac, ETH_ALEN);
+
+    /* Disable queuing. */
+    dev->tx_queue_len = 0;
+
+    /* Force a different MAC from remote end. */
+    dev->dev_addr[2] ^= 1;
+
+    if ( (err = register_netdev(dev)) != 0 )
+    {
+        DPRINTK("Could not register new net device %s: err=%d\n",
+                dev->name, err);
+        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+        kfree(dev);
+        return;
+    }
+
+    netif->hash_next = *pnetif;
+    *pnetif = netif;
+
+    DPRINTK("Successfully created netif\n");
+    create->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_destroy(netif_be_destroy_t *destroy)
+{
+    domid_t       domid  = destroy->domid;
+    unsigned int  handle = destroy->netif_handle;
+    netif_t     **pnetif, *netif;
+
+    pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+    while ( (netif = *pnetif) != NULL )
+    {
+        if ( (netif->domid == domid) && (netif->handle == handle) )
+        {
+            if ( netif->status != DISCONNECTED )
+                goto still_connected;
+            goto destroy;
+        }
+        pnetif = &netif->hash_next;
+    }
+
+    destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+    return;
+
+ still_connected:
+    destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+    return;
+
+ destroy:
+    *pnetif = netif->hash_next;
+    unregister_netdev(netif->dev);
+    kfree(netif->dev);
+    destroy->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_connect(netif_be_connect_t *connect)
+{
+    domid_t       domid  = connect->domid;
+    unsigned int  handle = connect->netif_handle;
+    unsigned int  evtchn = connect->evtchn;
+    unsigned long tx_shmem_frame = connect->tx_shmem_frame;
+    unsigned long rx_shmem_frame = connect->rx_shmem_frame;
+    struct vm_struct *vma;
+    pgprot_t      prot;
+    int           error;
+    netif_t      *netif;
+#if 0
+    struct net_device *eth0_dev;
+#endif
+
+    netif = netif_find_by_handle(domid, handle);
+    if ( unlikely(netif == NULL) )
+    {
+        DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n", 
+                connect->domid, connect->netif_handle); 
+        connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return;
+    }
+
+    if ( netif->status != DISCONNECTED )
+    {
+        connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+        return;
+    }
+
+    if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
+    {
+        connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+        return;
+    }
+
+    prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+    error  = direct_remap_area_pages(&init_mm, 
+                                     VMALLOC_VMADDR(vma->addr),
+                                     tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+                                     prot, domid);
+    error |= direct_remap_area_pages(&init_mm, 
+                                     VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
+                                     rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+                                     prot, domid);
+    if ( error != 0 )
+    {
+        if ( error == -ENOMEM )
+            connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+        else if ( error == -EFAULT )
+            connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
+        else
+            connect->status = NETIF_BE_STATUS_ERROR;
+        vfree(vma->addr);
+        return;
+    }
+
+    netif->evtchn         = evtchn;
+    netif->irq            = bind_evtchn_to_irq(evtchn);
+    netif->tx_shmem_frame = tx_shmem_frame;
+    netif->rx_shmem_frame = rx_shmem_frame;
+    netif->tx             = 
+        (netif_tx_interface_t *)vma->addr;
+    netif->rx             = 
+        (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
+    netif->status         = CONNECTED;
+    netif_get(netif);
+
+    netif->tx->resp_prod = netif->rx->resp_prod = 0;
+
+    rtnl_lock();
+    (void)dev_open(netif->dev);
+    rtnl_unlock();
+
+    (void)request_irq(netif->irq, netif_be_int, 0, netif->dev->name, netif);
+    netif_start_queue(netif->dev);
+
+    connect->status = NETIF_BE_STATUS_OKAY;
+}
+
+int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+    domid_t       domid  = disconnect->domid;
+    unsigned int  handle = disconnect->netif_handle;
+    netif_t      *netif;
+
+    netif = netif_find_by_handle(domid, handle);
+    if ( unlikely(netif == NULL) )
+    {
+        DPRINTK("netif_disconnect attempted for non-existent netif"
+                " (%u,%u)\n", disconnect->domid, disconnect->netif_handle); 
+        disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+        return 1; /* Caller will send response error message. */
+    }
+
+    if ( netif->status == CONNECTED )
+    {
+        netif->status = DISCONNECTING;
+        netif->disconnect_rspid = rsp_id;
+        wmb(); /* Let other CPUs see the status change. */
+        netif_stop_queue(netif->dev);
+        free_irq(netif->irq, netif);
+        netif_deschedule(netif);
+        netif_put(netif);
+        return 0; /* Caller should not send response message. */
+    }
+
+    disconnect->status = NETIF_BE_STATUS_OKAY;
+    return 1;
+}
+
+void netif_interface_init(void)
+{
+    memset(netif_hash, 0, sizeof(netif_hash));
+}
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netback/netback.c b/linux-2.6.9-xen-sparse/drivers/xen/netback/netback.c
new file mode 100644 (file)
index 0000000..143ccff
--- /dev/null
@@ -0,0 +1,823 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/main.c
+ * 
+ * Back-end of the driver for virtual block devices. This portion of the
+ * driver exports a 'unified' block-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A 
+ * reference front-end implementation can be found in:
+ *  arch/xen/drivers/netif/frontend
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ */
+
+#include "common.h"
+
+static void netif_page_release(struct page *page);
+static void netif_skb_release(struct sk_buff *skb);
+static void make_tx_response(netif_t *netif, 
+                             u16      id,
+                             s8       st);
+static int  make_rx_response(netif_t *netif, 
+                             u16      id, 
+                             s8       st,
+                             memory_t addr,
+                             u16      size);
+
+static void net_tx_action(unsigned long unused);
+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
+static void net_rx_action(unsigned long unused);
+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+
+static struct sk_buff_head rx_queue;
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
+static unsigned char rx_notify[NR_EVENT_CHANNELS];
+
+/* Don't currently gate addition of an interface to the tx scheduling list. */
+#define tx_work_exists(_if) (1)
+
+#define MAX_PENDING_REQS 256
+static unsigned long mmap_vstart;
+#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
+
+#define PKT_MIN_LEN (ETH_HLEN + 20)
+#define PKT_PROT_LEN 64
+
+static struct {
+    netif_tx_request_t req;
+    netif_t *netif;
+} pending_tx_info[MAX_PENDING_REQS];
+static u16 pending_ring[MAX_PENDING_REQS];
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
+static u16 dealloc_ring[MAX_PENDING_REQS];
+static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED;
+static PEND_RING_IDX dealloc_prod, dealloc_cons;
+
+static struct sk_buff_head tx_queue;
+static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
+
+static struct list_head net_schedule_list;
+static spinlock_t net_schedule_list_lock;
+
+#define MAX_MFN_ALLOC 64
+static unsigned long mfn_list[MAX_MFN_ALLOC];
+static unsigned int alloc_index = 0;
+static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
+
+static void __refresh_mfn_list(void)
+{
+    int ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+                                    mfn_list, MAX_MFN_ALLOC, 0);
+    if ( unlikely(ret != MAX_MFN_ALLOC) )
+        BUG();
+    alloc_index = MAX_MFN_ALLOC;
+}
+
+static unsigned long get_new_mfn(void)
+{
+    unsigned long mfn, flags;
+    spin_lock_irqsave(&mfn_lock, flags);
+    if ( alloc_index == 0 )
+        __refresh_mfn_list();
+    mfn = mfn_list[--alloc_index];
+    spin_unlock_irqrestore(&mfn_lock, flags);
+    return mfn;
+}
+
+static void dealloc_mfn(unsigned long mfn)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&mfn_lock, flags);
+    if ( alloc_index != MAX_MFN_ALLOC )
+        mfn_list[alloc_index++] = mfn;
+    else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+                                    &mfn, 1, 0) != 1 )
+        BUG();
+    spin_unlock_irqrestore(&mfn_lock, flags);
+}
+
+static inline void maybe_schedule_tx_action(void)
+{
+    smp_mb();
+    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+         !list_empty(&net_schedule_list) )
+        tasklet_schedule(&net_tx_tasklet);
+}
+
+/*
+ * A gross way of confirming the origin of an skb data page. The slab
+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
+ */
+static inline int is_xen_skb(struct sk_buff *skb)
+{
+    extern kmem_cache_t *skbuff_cachep;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
+#else
+    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
+#endif
+    return (cp == skbuff_cachep);
+}
+
+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+    netif_t *netif = (netif_t *)dev->priv;
+
+    ASSERT(skb->dev == dev);
+
+    /* Drop the packet if the target domain has no receive buffers. */
+    if ( (netif->rx_req_cons == netif->rx->req_prod) ||
+         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
+        goto drop;
+
+    /*
+     * We do not copy the packet unless:
+     *  1. The data is shared; or
+     *  2. The data is not allocated from our special cache.
+     * NB. We also couldn't cope with fragmented packets, but we won't get
+     *     any because we not advertise the NETIF_F_SG feature.
+     */
+    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
+    {
+        int hlen = skb->data - skb->head;
+        struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
+        if ( unlikely(nskb == NULL) )
+            goto drop;
+        skb_reserve(nskb, hlen);
+        __skb_put(nskb, skb->len);
+        (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
+        nskb->dev = skb->dev;
+        dev_kfree_skb(skb);
+        skb = nskb;
+    }
+
+    netif->rx_req_cons++;
+
+    skb_queue_tail(&rx_queue, skb);
+    tasklet_schedule(&net_rx_tasklet);
+
+    return 0;
+
+ drop:
+    netif->stats.tx_dropped++;
+    dev_kfree_skb(skb);
+    return 0;
+}
+
+#if 0
+static void xen_network_done_notify(void)
+{
+    static struct net_device *eth0_dev = NULL;
+    if ( unlikely(eth0_dev == NULL) )
+        eth0_dev = __dev_get_by_name("eth0");
+    netif_rx_schedule(eth0_dev);
+}
+/* 
+ * Add following to poll() function in NAPI driver (Tigon3 is example):
+ *  if ( xen_network_done() )
+ *      tg3_enable_ints(tp); 
+ */
+int xen_network_done(void)
+{
+    return skb_queue_empty(&rx_queue);
+}
+#endif
+
+static void net_rx_action(unsigned long unused)
+{
+    netif_t *netif;
+    s8 status;
+    u16 size, id, evtchn;
+    mmu_update_t *mmu;
+    multicall_entry_t *mcl;
+    unsigned long vdata, mdata, new_mfn;
+    struct sk_buff_head rxq;
+    struct sk_buff *skb;
+    u16 notify_list[NETIF_RX_RING_SIZE];
+    int notify_nr = 0;
+
+    skb_queue_head_init(&rxq);
+
+    mcl = rx_mcl;
+    mmu = rx_mmu;
+    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
+    {
+        netif   = (netif_t *)skb->dev->priv;
+        vdata   = (unsigned long)skb->data;
+        mdata   = virt_to_machine(vdata);
+        new_mfn = get_new_mfn();
+        
+        /*
+         * Set the new P2M table entry before reassigning the old data page.
+         * Heed the comment in pgtable-2level.h:pte_page(). :-)
+         */
+        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+        
+        mmu[0].ptr  = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+        mmu[0].val  = __pa(vdata) >> PAGE_SHIFT;  
+        mmu[1].ptr  = MMU_EXTENDED_COMMAND;
+        mmu[1].val  = MMUEXT_SET_FOREIGNDOM;      
+        mmu[1].val |= (unsigned long)netif->domid << 16;
+        mmu[2].ptr  = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
+        mmu[2].val  = MMUEXT_REASSIGN_PAGE;
+
+        mcl[0].op = __HYPERVISOR_update_va_mapping;
+        mcl[0].args[0] = vdata >> PAGE_SHIFT;
+        mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
+        mcl[0].args[2] = 0;
+        mcl[1].op = __HYPERVISOR_mmu_update;
+        mcl[1].args[0] = (unsigned long)mmu;
+        mcl[1].args[1] = 3;
+        mcl[1].args[2] = 0;
+
+        mcl += 2;
+        mmu += 3;
+
+        __skb_queue_tail(&rxq, skb);
+
+        /* Filled the batch queue? */
+        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
+            break;
+    }
+
+    if ( mcl == rx_mcl )
+        return;
+
+    mcl[-2].args[2] = UVMF_FLUSH_TLB;
+    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
+        BUG();
+
+    mcl = rx_mcl;
+    mmu = rx_mmu;
+    while ( (skb = __skb_dequeue(&rxq)) != NULL )
+    {
+        netif   = (netif_t *)skb->dev->priv;
+        size    = skb->tail - skb->data;
+
+        /* Rederive the machine addresses. */
+        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
+        mdata   = ((mmu[2].ptr & PAGE_MASK) |
+                   ((unsigned long)skb->data & ~PAGE_MASK));
+        
+        atomic_set(&(skb_shinfo(skb)->dataref), 1);
+        skb_shinfo(skb)->nr_frags = 0;
+        skb_shinfo(skb)->frag_list = NULL;
+
+        netif->stats.tx_bytes += size;
+        netif->stats.tx_packets++;
+
+        /* The update_va_mapping() must not fail. */
+        if ( unlikely(mcl[0].args[5] != 0) )
+            BUG();
+
+        /* Check the reassignment error code. */
+        status = NETIF_RSP_OKAY;
+        if ( unlikely(mcl[1].args[5] != 0) )
+        {
+            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
+            dealloc_mfn(mdata >> PAGE_SHIFT);
+            status = NETIF_RSP_ERROR;
+        }
+
+        evtchn = netif->evtchn;
+        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+        if ( make_rx_response(netif, id, status, mdata, size) &&
+             (rx_notify[evtchn] == 0) )
+        {
+            rx_notify[evtchn] = 1;
+            notify_list[notify_nr++] = evtchn;
+        }
+
+        dev_kfree_skb(skb);
+
+        mcl += 2;
+        mmu += 3;
+    }
+
+    while ( notify_nr != 0 )
+    {
+        evtchn = notify_list[--notify_nr];
+        rx_notify[evtchn] = 0;
+        notify_via_evtchn(evtchn);
+    }
+
+    /* More work to do? */
+    if ( !skb_queue_empty(&rx_queue) )
+        tasklet_schedule(&net_rx_tasklet);
+#if 0
+    else
+        xen_network_done_notify();
+#endif
+}
+
+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+{
+    netif_t *netif = dev->priv;
+    return &netif->stats;
+}
+
+static int __on_net_schedule_list(netif_t *netif)
+{
+    return netif->list.next != NULL;
+}
+
+static void remove_from_net_schedule_list(netif_t *netif)
+{
+    spin_lock_irq(&net_schedule_list_lock);
+    if ( likely(__on_net_schedule_list(netif)) )
+    {
+        list_del(&netif->list);
+        netif->list.next = NULL;
+        netif_put(netif);
+    }
+    spin_unlock_irq(&net_schedule_list_lock);
+}
+
+static void add_to_net_schedule_list_tail(netif_t *netif)
+{
+    if ( __on_net_schedule_list(netif) )
+        return;
+
+    spin_lock_irq(&net_schedule_list_lock);
+    if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
+    {
+        list_add_tail(&netif->list, &net_schedule_list);
+        netif_get(netif);
+    }
+    spin_unlock_irq(&net_schedule_list_lock);
+}
+
+static inline void netif_schedule_work(netif_t *netif)
+{
+    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+    {
+        add_to_net_schedule_list_tail(netif);
+        maybe_schedule_tx_action();
+    }
+}
+
+void netif_deschedule(netif_t *netif)
+{
+    remove_from_net_schedule_list(netif);
+}
+
+#if 0
+static void tx_credit_callback(unsigned long data)
+{
+    netif_t *netif = (netif_t *)data;
+    netif->remaining_credit = netif->credit_bytes;
+    netif_schedule_work(netif);
+}
+#endif
+
+static void net_tx_action(unsigned long unused)
+{
+    struct list_head *ent;
+    struct sk_buff *skb;
+    netif_t *netif;
+    netif_tx_request_t txreq;
+    u16 pending_idx;
+    NETIF_RING_IDX i;
+    multicall_entry_t *mcl;
+    PEND_RING_IDX dc, dp;
+    unsigned int data_len;
+
+    if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
+        goto skip_dealloc;
+
+    mcl = tx_mcl;
+    while ( dc != dp )
+    {
+        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+        mcl[0].op = __HYPERVISOR_update_va_mapping;
+        mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+        mcl[0].args[1] = 0;
+        mcl[0].args[2] = 0;
+        mcl++;        
+    }
+
+    mcl[-1].args[2] = UVMF_FLUSH_TLB;
+    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+        BUG();
+
+    mcl = tx_mcl;
+    while ( dealloc_cons != dp )
+    {
+        /* The update_va_mapping() must not fail. */
+        if ( unlikely(mcl[0].args[5] != 0) )
+            BUG();
+
+        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+
+        netif = pending_tx_info[pending_idx].netif;
+
+        spin_lock(&netif->tx_lock);
+        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
+                         NETIF_RSP_OKAY);
+        spin_unlock(&netif->tx_lock);
+        
+        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
+        /*
+         * Scheduling checks must happen after the above response is posted.
+         * This avoids a possible race with a guest OS on another CPU if that
+         * guest is testing against 'resp_prod' when deciding whether to notify
+         * us when it queues additional packets.
+         */
+        mb();
+        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+            add_to_net_schedule_list_tail(netif);
+        
+        netif_put(netif);
+
+        mcl++;
+    }
+
+ skip_dealloc:
+    mcl = tx_mcl;
+    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+            !list_empty(&net_schedule_list) )
+    {
+        /* Get a netif from the list with work to do. */
+        ent = net_schedule_list.next;
+        netif = list_entry(ent, netif_t, list);
+        netif_get(netif);
+        remove_from_net_schedule_list(netif);
+
+        /* Work to do? */
+        i = netif->tx_req_cons;
+        if ( (i == netif->tx->req_prod) ||
+             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
+        {
+            netif_put(netif);
+            continue;
+        }
+
+        netif->tx->req_cons = ++netif->tx_req_cons;
+
+        /*
+         * 1. Ensure that we see the request when we copy it.
+         * 2. Ensure that frontend sees updated req_cons before we check
+         *    for more work to schedule.
+         */
+        mb();
+
+        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
+               sizeof(txreq));
+
+#if 0
+        /* Credit-based scheduling. */
+        if ( tx.size > netif->remaining_credit )
+        {
+            s_time_t now = NOW(), next_credit = 
+                netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
+            if ( next_credit <= now )
+            {
+                netif->credit_timeout.expires = now;
+                netif->remaining_credit = netif->credit_bytes;
+            }
+            else
+            {
+                netif->remaining_credit = 0;
+                netif->credit_timeout.expires  = next_credit;
+                netif->credit_timeout.data     = (unsigned long)netif;
+                netif->credit_timeout.function = tx_credit_callback;
+                netif->credit_timeout.cpu      = smp_processor_id();
+                add_ac_timer(&netif->credit_timeout);
+                break;
+            }
+        }
+        netif->remaining_credit -= tx.size;
+#endif
+
+        netif_schedule_work(netif);
+
+        if ( unlikely(txreq.size <= PKT_MIN_LEN) || 
+             unlikely(txreq.size > ETH_FRAME_LEN) )
+        {
+            DPRINTK("Bad packet size: %d\n", txreq.size);
+            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+            netif_put(netif);
+            continue; 
+        }
+
+        /* No crossing a page boundary as the payload mustn't fragment. */
+        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
+        {
+            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
+                    txreq.addr, txreq.size, 
+                    (txreq.addr &~PAGE_MASK) + txreq.size);
+            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+            netif_put(netif);
+            continue;
+        }
+
+        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+        data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
+
+        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
+        {
+            DPRINTK("Can't allocate a skb in start_xmit.\n");
+            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+            netif_put(netif);
+            break;
+        }
+
+        /* Packets passed to netif_rx() must have some headroom. */
+        skb_reserve(skb, 16);
+
+        mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
+        mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+        mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
+        mcl[0].args[2] = 0;
+        mcl[0].args[3] = netif->domid;
+        mcl++;
+
+        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
+        pending_tx_info[pending_idx].netif = netif;
+        *((u16 *)skb->data) = pending_idx;
+
+        __skb_queue_tail(&tx_queue, skb);
+
+        pending_cons++;
+
+        /* Filled the batch queue? */
+        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
+            break;
+    }
+
+    if ( mcl == tx_mcl )
+        return;
+
+    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+        BUG();
+
+    mcl = tx_mcl;
+    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
+    {
+        pending_idx = *((u16 *)skb->data);
+        netif       = pending_tx_info[pending_idx].netif;
+        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
+
+        /* Check the remap error code. */
+        if ( unlikely(mcl[0].args[5] != 0) )
+        {
+            DPRINTK("Bad page frame\n");
+            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+            netif_put(netif);
+            kfree_skb(skb);
+            mcl++;
+            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+            continue;
+        }
+
+        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+            FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
+
+        data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
+
+        __skb_put(skb, data_len);
+        memcpy(skb->data, 
+               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
+               data_len);
+
+        if (data_len < txreq.size) {
+            /* Append the packet payload as a fragment. */
+            skb_shinfo(skb)->frags[0].page        = 
+                virt_to_page(MMAP_VADDR(pending_idx));
+            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
+            skb_shinfo(skb)->frags[0].page_offset = 
+                (txreq.addr + data_len) & ~PAGE_MASK;
+            skb_shinfo(skb)->nr_frags = 1;
+        } else {
+            skb_shinfo(skb)->frags[0].page        = 
+                virt_to_page(MMAP_VADDR(pending_idx));
+            skb->destructor = netif_skb_release;
+        }
+
+        skb->data_len  = txreq.size - data_len;
+        skb->len      += skb->data_len;
+
+        skb->dev      = netif->dev;
+        skb->protocol = eth_type_trans(skb, skb->dev);
+
+        netif->stats.rx_bytes += txreq.size;
+        netif->stats.rx_packets++;
+
+        netif_rx(skb);
+        netif->dev->last_rx = jiffies;
+
+        mcl++;
+    }
+}
+
+static void netif_idx_release(u16 pending_idx)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&dealloc_lock, flags);
+    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+    spin_unlock_irqrestore(&dealloc_lock, flags);
+
+    tasklet_schedule(&net_tx_tasklet);
+}
+
+static void netif_page_release(struct page *page)
+{
+    u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+    /* Ready for next use. */
+    set_page_count(page, 1);
+
+    netif_idx_release(pending_idx);
+}
+
+static void netif_skb_release(struct sk_buff *skb)
+{
+    struct page *page = skb_shinfo(skb)->frags[0].page;
+    u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+    netif_idx_release(pending_idx);
+}
+
+#if 0
+long flush_bufs_for_netif(netif_t *netif)
+{
+    NETIF_RING_IDX i;
+
+    /* Return any outstanding receive buffers to the guest OS. */
+    spin_lock(&netif->rx_lock);
+    for ( i = netif->rx_req_cons; 
+          (i != netif->rx->req_prod) &&
+              ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
+          i++ )
+    {
+        make_rx_response(netif,
+                         netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
+                         NETIF_RSP_DROPPED, 0, 0);
+    }
+    netif->rx_req_cons = i;
+    spin_unlock(&netif->rx_lock);
+
+    /*
+     * Flush pending transmit buffers. The guest may still have to wait for
+     * buffers that are queued at a physical NIC.
+     */
+    spin_lock(&netif->tx_lock);
+    for ( i = netif->tx_req_cons; 
+          (i != netif->tx->req_prod) &&
+              ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
+          i++ )
+    {
+        make_tx_response(netif,
+                         netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
+                         NETIF_RSP_DROPPED);
+    }
+    netif->tx_req_cons = i;
+    spin_unlock(&netif->tx_lock);
+
+    return 0;
+}
+#endif
+
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+    netif_t *netif = dev_id;
+    if ( tx_work_exists(netif) )
+    {
+        add_to_net_schedule_list_tail(netif);
+        maybe_schedule_tx_action();
+    }
+    return IRQ_HANDLED;
+}
+
+static void make_tx_response(netif_t *netif, 
+                             u16      id,
+                             s8       st)
+{
+    NETIF_RING_IDX i = netif->tx_resp_prod;
+    netif_tx_response_t *resp;
+
+    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+    resp->id     = id;
+    resp->status = st;
+    wmb();
+    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+    mb(); /* Update producer before checking event threshold. */
+    if ( i == netif->tx->event )
+        notify_via_evtchn(netif->evtchn);
+}
+
+static int make_rx_response(netif_t *netif, 
+                            u16      id, 
+                            s8       st,
+                            memory_t addr,
+                            u16      size)
+{
+    NETIF_RING_IDX i = netif->rx_resp_prod;
+    netif_rx_response_t *resp;
+
+    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+    resp->addr   = addr;
+    resp->id     = id;
+    resp->status = (s16)size;
+    if ( st < 0 )
+        resp->status = (s16)st;
+    wmb();
+    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+    mb(); /* Update producer before checking event threshold. */
+    return (i == netif->rx->event);
+}
+
+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+{
+    struct list_head *ent;
+    netif_t *netif;
+    int i = 0;
+
+    printk(KERN_ALERT "netif_schedule_list:\n");
+    spin_lock_irq(&net_schedule_list_lock);
+
+    list_for_each ( ent, &net_schedule_list )
+    {
+        netif = list_entry(ent, netif_t, list);
+        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
+               i, netif->rx_req_cons, netif->rx_resp_prod);               
+        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
+               netif->tx_req_cons, netif->tx_resp_prod);
+        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
+               netif->rx->req_prod, netif->rx->resp_prod);
+        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
+               netif->rx->event, netif->tx->req_prod);
+        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
+               netif->tx->resp_prod, netif->tx->event);
+        i++;
+    }
+
+    spin_unlock_irq(&net_schedule_list_lock);
+    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+    return IRQ_HANDLED;
+}
+
+static int __init netback_init(void)
+{
+    int i;
+    struct page *page;
+
+    if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
+        !(xen_start_info.flags & SIF_INITDOMAIN) )
+        return 0;
+
+    printk("Initialising Xen netif backend\n");
+
+    skb_queue_head_init(&rx_queue);
+    skb_queue_head_init(&tx_queue);
+
+    netif_interface_init();
+
+    if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
+        BUG();
+
+    for ( i = 0; i < MAX_PENDING_REQS; i++ )
+    {
+        page = virt_to_page(MMAP_VADDR(i));
+        set_page_count(page, 1);
+        SetPageForeign(page, netif_page_release);
+    }
+
+    pending_cons = 0;
+    pending_prod = MAX_PENDING_REQS;
+    for ( i = 0; i < MAX_PENDING_REQS; i++ )
+        pending_ring[i] = i;
+
+    spin_lock_init(&net_schedule_list_lock);
+    INIT_LIST_HEAD(&net_schedule_list);
+
+    netif_ctrlif_init();
+
+    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+                      netif_be_dbg, SA_SHIRQ, 
+                      "net-be-dbg", &netif_be_dbg);
+
+    return 0;
+}
+
+static void netback_cleanup(void)
+{
+    BUG();
+}
+
+module_init(netback_init);
+module_exit(netback_cleanup);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netfront/Kconfig b/linux-2.6.9-xen-sparse/drivers/xen/netfront/Kconfig
new file mode 100644 (file)
index 0000000..334e6c3
--- /dev/null
@@ -0,0 +1,6 @@
+
+config XENNET
+       tristate "Xen network driver"
+       depends on NETDEVICES && ARCH_XEN
+       help
+         Network driver for Xen
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netfront/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/netfront/Makefile
new file mode 100644 (file)
index 0000000..7eb07a2
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := netfront.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c
new file mode 100644 (file)
index 0000000..831572b
--- /dev/null
@@ -0,0 +1,1341 @@
+/******************************************************************************
+ * Virtual network driver for conversing with remote driver backends.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <asm/io.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor-ifs/io/netif.h>
+#include <asm/page.h>
+
+#include <net/arp.h>
+#include <net/route.h>
+
+#define DEBUG 0
+
+#ifndef __GFP_NOWARN
+#define __GFP_NOWARN 0
+#endif
+#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
+
+#define init_skb_shinfo(_skb)                         \
+    do {                                              \
+        atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
+        skb_shinfo(_skb)->nr_frags = 0;               \
+        skb_shinfo(_skb)->frag_list = NULL;           \
+    } while ( 0 )
+
+/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
+#define RX_HEADROOM 200
+
+/*
+ * If the backend driver is pipelining transmit requests then we can be very
+ * aggressive in avoiding new-packet notifications -- only need to send a
+ * notification if there are no outstanding unreceived responses.
+ * If the backend may be buffering our transmit buffers for any reason then we
+ * are rather more conservative.
+ */
+#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
+#else
+#define TX_TEST_IDX req_cons  /* conservative: not seen all our requests? */
+#endif
+
+static void network_tx_buf_gc(struct net_device *dev);
+static void network_alloc_rx_buffers(struct net_device *dev);
+
+static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+
+static struct list_head dev_list;
+
+struct net_private
+{
+    struct list_head list;
+    struct net_device *dev;
+
+    struct net_device_stats stats;
+    NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
+    unsigned int tx_full;
+    
+    netif_tx_interface_t *tx;
+    netif_rx_interface_t *rx;
+
+    spinlock_t   tx_lock;
+    spinlock_t   rx_lock;
+
+    unsigned int handle;
+    unsigned int evtchn;
+    unsigned int irq;
+
+    /* What is the status of our connection to the remote backend? */
+#define BEST_CLOSED       0
+#define BEST_DISCONNECTED 1
+#define BEST_CONNECTED    2
+    unsigned int backend_state;
+
+    /* Is this interface open or closed (down or up)? */
+#define UST_CLOSED        0
+#define UST_OPEN          1
+    unsigned int user_state;
+
+    /* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_MAX_TARGET NETIF_RX_RING_SIZE
+    int rx_target;
+    struct sk_buff_head rx_batch;
+
+    /*
+     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+     * array is an index into a chain of free entries.
+     */
+    struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
+    struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+};
+
+/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
+#define ADD_ID_TO_FREELIST(_list, _id)             \
+    (_list)[(_id)] = (_list)[0];                   \
+    (_list)[0]     = (void *)(unsigned long)(_id);
+#define GET_ID_FROM_FREELIST(_list)                \
+ ({ unsigned long _id = (unsigned long)(_list)[0]; \
+    (_list)[0]  = (_list)[_id];                    \
+    (unsigned short)_id; })
+
+static char *status_name[] = {
+    [NETIF_INTERFACE_STATUS_CLOSED]       = "closed",
+    [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
+    [NETIF_INTERFACE_STATUS_CONNECTED]    = "connected",
+    [NETIF_INTERFACE_STATUS_CHANGED]      = "changed",
+};
+
+static char *be_state_name[] = {
+    [BEST_CLOSED]       = "closed",
+    [BEST_DISCONNECTED] = "disconnected",
+    [BEST_CONNECTED]    = "connected",
+};
+
+#if DEBUG
+#define DPRINTK(fmt, args...) \
+    printk(KERN_ALERT "[XEN] (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
+#else
+#define DPRINTK(fmt, args...) ((void)0)
+#endif
+#define IPRINTK(fmt, args...) \
+    printk(KERN_INFO "[XEN] " fmt, ##args)
+#define WPRINTK(fmt, args...) \
+    printk(KERN_WARNING "[XEN] " fmt, ##args)
+
+static struct net_device *find_dev_by_handle(unsigned int handle)
+{
+    struct list_head *ent;
+    struct net_private *np;
+    list_for_each ( ent, &dev_list )
+    {
+        np = list_entry(ent, struct net_private, list);
+        if ( np->handle == handle )
+            return np->dev;
+    }
+    return NULL;
+}
+
+/** Network interface info. */
+struct netif_ctrl {
+    /** Number of interfaces. */
+    int interface_n;
+    /** Number of connected interfaces. */
+    int connected_n;
+    /** Error code. */
+    int err;
+    int up;
+};
+
+static struct netif_ctrl netctrl;
+
+static void netctrl_init(void)
+{
+    memset(&netctrl, 0, sizeof(netctrl));
+    netctrl.up = NETIF_DRIVER_STATUS_DOWN;
+}
+
+/** Get or set a network interface error.
+ */
+static int netctrl_err(int err)
+{
+    if ( (err < 0) && !netctrl.err )
+        netctrl.err = err;
+    return netctrl.err;
+}
+
+/** Test if all network interfaces are connected.
+ *
+ * @return 1 if all connected, 0 if not, negative error code otherwise
+ */
+static int netctrl_connected(void)
+{
+    int ok;
+
+    if ( netctrl.err )
+        ok = netctrl.err;
+    else if ( netctrl.up == NETIF_DRIVER_STATUS_UP )
+        ok = (netctrl.connected_n == netctrl.interface_n);
+    else
+        ok = 0;
+
+    return ok;
+}
+
+/** Count the connected network interfaces.
+ *
+ * @return connected count
+ */
+static int netctrl_connected_count(void)
+{
+    
+    struct list_head *ent;
+    struct net_private *np;
+    unsigned int connected;
+
+    connected = 0;
+    
+    list_for_each(ent, &dev_list) {
+        np = list_entry(ent, struct net_private, list);
+        if (np->backend_state == BEST_CONNECTED)
+            connected++;
+    }
+
+    netctrl.connected_n = connected;
+    DPRINTK("> connected_n=%d interface_n=%d\n",
+            netctrl.connected_n, netctrl.interface_n);
+    return connected;
+}
+
+/** Send a packet on a net device to encourage switches to learn the
+ * MAC. We send a fake ARP request.
+ *
+ * @param dev device
+ * @return 0 on success, error code otherwise
+ */
+static int vif_wake(struct net_device *dev)
+{
+    struct sk_buff *skb;
+    u32             src_ip, dst_ip;
+
+    dst_ip = INADDR_BROADCAST;
+    src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
+
+    skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+                     dst_ip, dev, src_ip,
+                     /*dst_hw*/ NULL, /*src_hw*/ NULL, 
+                     /*target_hw*/ dev->dev_addr);
+    if ( skb == NULL )
+        return -ENOMEM;
+
+    return dev_queue_xmit(skb);
+}
+
+static int network_open(struct net_device *dev)
+{
+    struct net_private *np = dev->priv;
+
+    memset(&np->stats, 0, sizeof(np->stats));
+
+    np->user_state = UST_OPEN;
+
+    network_alloc_rx_buffers(dev);
+    np->rx->event = np->rx_resp_cons + 1;
+
+    netif_start_queue(dev);
+
+    return 0;
+}
+
+static void network_tx_buf_gc(struct net_device *dev)
+{
+    NETIF_RING_IDX i, prod;
+    unsigned short id;
+    struct net_private *np = dev->priv;
+    struct sk_buff *skb;
+
+    if ( np->backend_state != BEST_CONNECTED )
+        return;
+
+    do {
+        prod = np->tx->resp_prod;
+        rmb(); /* Ensure we see responses up to 'rp'. */
+
+        for ( i = np->tx_resp_cons; i != prod; i++ )
+        {
+            id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+            skb = np->tx_skbs[id];
+            ADD_ID_TO_FREELIST(np->tx_skbs, id);
+            dev_kfree_skb_irq(skb);
+        }
+        
+        np->tx_resp_cons = prod;
+        
+        /*
+         * Set a new event, then check for race with update of tx_cons. Note
+         * that it is essential to schedule a callback, no matter how few
+         * buffers are pending. Even if there is space in the transmit ring,
+         * higher layers may be blocked because too much data is outstanding:
+         * in such cases notification from Xen is likely to be the only kick
+         * that we'll get.
+         */
+        np->tx->event = 
+            prod + ((np->tx->req_prod - prod) >> 1) + 1;
+        mb();
+    }
+    while ( prod != np->tx->resp_prod );
+
+    if ( np->tx_full && 
+         ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
+    {
+        np->tx_full = 0;
+        if ( np->user_state == UST_OPEN )
+            netif_wake_queue(dev);
+    }
+}
+
+
+static void network_alloc_rx_buffers(struct net_device *dev)
+{
+    unsigned short id;
+    struct net_private *np = dev->priv;
+    struct sk_buff *skb;
+    int i, batch_target;
+    NETIF_RING_IDX req_prod = np->rx->req_prod;
+
+    if ( unlikely(np->backend_state != BEST_CONNECTED) )
+        return;
+
+    /*
+     * Allocate skbuffs greedily, even though we batch updates to the
+     * receive ring. This creates a less bursty demand on the memory allocator,
+     * so should reduce the chance of failed allocation requests both for
+     * ourself and for other kernel subsystems.
+     */
+    batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
+    for ( i = skb_queue_len(&np->rx_batch); i < batch_target; i++ )
+    {
+        if ( unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL) )
+            break;
+        __skb_queue_tail(&np->rx_batch, skb);
+    }
+
+    /* Is the batch large enough to be worthwhile? */
+    if ( i < (np->rx_target/2)  )
+        return;
+
+    for ( i = 0; ; i++ )
+    {
+        if ( (skb = __skb_dequeue(&np->rx_batch)) == NULL )
+            break;
+
+        skb->dev = dev;
+
+        id = GET_ID_FROM_FREELIST(np->rx_skbs);
+
+        np->rx_skbs[id] = skb;
+        
+        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+        
+        rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+
+       /* Remove this page from pseudo phys map before passing back to Xen. */
+       phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] 
+           = INVALID_P2M_ENTRY;
+
+        rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
+        rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+        rx_mcl[i].args[1] = 0;
+        rx_mcl[i].args[2] = 0;
+    }
+
+    /*
+     * We may have allocated buffers which have entries outstanding in the page
+     * update queue -- make sure we flush those first!
+     */
+    flush_page_update_queue();
+
+    /* After all PTEs have been zapped we blow away stale TLB entries. */
+    rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
+
+    /* Give away a batch of pages. */
+    rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
+    rx_mcl[i].args[0] = MEMOP_decrease_reservation;
+    rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
+    rx_mcl[i].args[2] = (unsigned long)i;
+    rx_mcl[i].args[3] = 0;
+    rx_mcl[i].args[4] = DOMID_SELF;
+
+    /* Zap PTEs and give away pages in one big multicall. */
+    (void)HYPERVISOR_multicall(rx_mcl, i+1);
+
+    /* Check return status of HYPERVISOR_dom_mem_op(). */
+    if ( unlikely(rx_mcl[i].args[5] != i) )
+        panic("Unable to reduce memory reservation\n");
+
+    /* Above is a suitable barrier to ensure backend will see requests. */
+    np->rx->req_prod = req_prod + i;
+
+    /* Adjust our floating fill target if we risked running out of buffers. */
+    if ( ((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+         ((np->rx_target *= 2) > RX_MAX_TARGET) )
+        np->rx_target = RX_MAX_TARGET;
+}
+
+
+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+    unsigned short id;
+    struct net_private *np = (struct net_private *)dev->priv;
+    netif_tx_request_t *tx;
+    NETIF_RING_IDX i;
+
+    if ( unlikely(np->tx_full) )
+    {
+        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
+        netif_stop_queue(dev);
+        goto drop;
+    }
+
+    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
+                  PAGE_SIZE) )
+    {
+        struct sk_buff *nskb;
+        if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) )
+            goto drop;
+        skb_put(nskb, skb->len);
+        memcpy(nskb->data, skb->data, skb->len);
+        nskb->dev = skb->dev;
+        dev_kfree_skb(skb);
+        skb = nskb;
+    }
+    
+    spin_lock_irq(&np->tx_lock);
+
+    if ( np->backend_state != BEST_CONNECTED )
+    {
+        spin_unlock_irq(&np->tx_lock);
+        goto drop;
+    }
+
+    i = np->tx->req_prod;
+
+    id = GET_ID_FROM_FREELIST(np->tx_skbs);
+    np->tx_skbs[id] = skb;
+
+    tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+
+    tx->id   = id;
+    tx->addr = virt_to_machine(skb->data);
+    tx->size = skb->len;
+
+    wmb(); /* Ensure that backend will see the request. */
+    np->tx->req_prod = i + 1;
+
+    network_tx_buf_gc(dev);
+
+    if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
+    {
+        np->tx_full = 1;
+        netif_stop_queue(dev);
+    }
+
+    spin_unlock_irq(&np->tx_lock);
+
+    np->stats.tx_bytes += skb->len;
+    np->stats.tx_packets++;
+
+    /* Only notify Xen if we really have to. */
+    mb();
+    if ( np->tx->TX_TEST_IDX == i )
+        notify_via_evtchn(np->evtchn);
+
+    return 0;
+
+ drop:
+    np->stats.tx_dropped++;
+    dev_kfree_skb(skb);
+    return 0;
+}
+
+
+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+    struct net_device *dev = dev_id;
+    struct net_private *np = dev->priv;
+    unsigned long flags;
+
+    spin_lock_irqsave(&np->tx_lock, flags);
+    network_tx_buf_gc(dev);
+    spin_unlock_irqrestore(&np->tx_lock, flags);
+
+    if ( (np->rx_resp_cons != np->rx->resp_prod) &&
+         (np->user_state == UST_OPEN) )
+        netif_rx_schedule(dev);
+
+    return IRQ_HANDLED;
+}
+
+
+static int netif_poll(struct net_device *dev, int *pbudget)
+{
+    struct net_private *np = dev->priv;
+    struct sk_buff *skb, *nskb;
+    netif_rx_response_t *rx;
+    NETIF_RING_IDX i, rp;
+    mmu_update_t *mmu = rx_mmu;
+    multicall_entry_t *mcl = rx_mcl;
+    int work_done, budget, more_to_do = 1;
+    struct sk_buff_head rxq;
+    unsigned long flags;
+
+    spin_lock(&np->rx_lock);
+
+    if ( np->backend_state != BEST_CONNECTED )
+    {
+        spin_unlock(&np->rx_lock);
+        return 0;
+    }
+
+    skb_queue_head_init(&rxq);
+
+    if ( (budget = *pbudget) > dev->quota )
+        budget = dev->quota;
+
+    rp = np->rx->resp_prod;
+    rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+    for ( i = np->rx_resp_cons, work_done = 0; 
+          (i != rp) && (work_done < budget); 
+          i++, work_done++ )
+    {
+        rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+
+        /*
+         * An error here is very odd. Usually indicates a backend bug,
+         * low-memory condition, or that we didn't have reservation headroom.
+         * Whatever - print an error and queue the id again straight away.
+         */
+        if ( unlikely(rx->status <= 0) )
+        {
+           printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
+            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
+            wmb();
+            np->rx->req_prod++;
+            continue;
+        }
+
+        skb = np->rx_skbs[rx->id];
+        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
+
+        /* NB. We handle skb overflow later. */
+        skb->data = skb->head + (rx->addr & ~PAGE_MASK);
+        skb->len  = rx->status;
+        skb->tail = skb->data + skb->len;
+
+        np->stats.rx_packets++;
+        np->stats.rx_bytes += rx->status;
+
+        /* Remap the page. */
+        mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+        mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
+        mmu++;
+        mcl->op = __HYPERVISOR_update_va_mapping;
+        mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+        mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
+        mcl->args[2] = 0;
+        mcl++;
+
+        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
+            rx->addr >> PAGE_SHIFT;
+
+        __skb_queue_tail(&rxq, skb);
+    }
+
+    /* Do all the remapping work, and M->P updates, in one big hypercall. */
+    if ( likely((mcl - rx_mcl) != 0) )
+    {
+        mcl->op = __HYPERVISOR_mmu_update;
+        mcl->args[0] = (unsigned long)rx_mmu;
+        mcl->args[1] = mmu - rx_mmu;
+        mcl->args[2] = 0;
+        mcl++;
+        (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+    }
+
+    while ( (skb = __skb_dequeue(&rxq)) != NULL )
+    {
+        /*
+         * Enough room in skbuff for the data we were passed? Also, Linux 
+         * expects at least 16 bytes headroom in each receive buffer.
+         */
+        if ( unlikely(skb->tail > skb->end) ||
+             unlikely((skb->data - skb->head) < 16) )
+        {
+            nskb = NULL;
+
+            /* Only copy the packet if it fits in the current MTU. */
+            if ( skb->len <= (dev->mtu + ETH_HLEN) )
+            {
+                if ( (skb->tail > skb->end) && net_ratelimit() )
+                    printk(KERN_INFO "Received packet needs %d bytes more "
+                           "headroom.\n", skb->tail - skb->end);
+
+                if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL )
+                {
+                    skb_reserve(nskb, 2);
+                    skb_put(nskb, skb->len);
+                    memcpy(nskb->data, skb->data, skb->len);
+                    nskb->dev = skb->dev;
+                }
+            }
+            else if ( net_ratelimit() )
+                printk(KERN_INFO "Received packet too big for MTU "
+                       "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
+
+            /* Reinitialise and then destroy the old skbuff. */
+            skb->len  = 0;
+            skb->tail = skb->data;
+            init_skb_shinfo(skb);
+            dev_kfree_skb(skb);
+
+            /* Switch old for new, if we copied the buffer. */
+            if ( (skb = nskb) == NULL )
+                continue;
+        }
+        
+        /* Set the shared-info area, which is hidden behind the real data. */
+        init_skb_shinfo(skb);
+
+        /* Ethernet-specific work. Delayed to here as it peeks the header. */
+        skb->protocol = eth_type_trans(skb, dev);
+
+        /* Pass it up. */
+        netif_receive_skb(skb);
+        dev->last_rx = jiffies;
+    }
+
+    np->rx_resp_cons = i;
+
+    /* If we get a callback with very few responses, reduce fill target. */
+    /* NB. Note exponential increase, linear decrease. */
+    if ( ((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
+         (--np->rx_target < RX_MIN_TARGET) )
+        np->rx_target = RX_MIN_TARGET;
+
+    network_alloc_rx_buffers(dev);
+
+    *pbudget   -= work_done;
+    dev->quota -= work_done;
+
+    if ( work_done < budget )
+    {
+        local_irq_save(flags);
+
+        np->rx->event = i + 1;
+    
+        /* Deal with hypervisor racing our resetting of rx_event. */
+        mb();
+        if ( np->rx->resp_prod == i )
+        {
+            __netif_rx_complete(dev);
+            more_to_do = 0;
+        }
+
+        local_irq_restore(flags);
+    }
+
+    spin_unlock(&np->rx_lock);
+
+    return more_to_do;
+}
+
+
+static int network_close(struct net_device *dev)
+{
+    struct net_private *np = dev->priv;
+    np->user_state = UST_CLOSED;
+    netif_stop_queue(np->dev);
+    return 0;
+}
+
+
+static struct net_device_stats *network_get_stats(struct net_device *dev)
+{
+    struct net_private *np = (struct net_private *)dev->priv;
+    return &np->stats;
+}
+
+
+static void network_connect(struct net_device *dev,
+                            netif_fe_interface_status_t *status)
+{
+    struct net_private *np;
+    int i, requeue_idx;
+    netif_tx_request_t *tx;
+
+    np = dev->priv;
+    spin_lock_irq(&np->tx_lock);
+    spin_lock(&np->rx_lock);
+
+    /* Recovery procedure: */
+
+    /* Step 1: Reinitialise variables. */
+    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
+    np->rx->event = np->tx->event = 1;
+
+    /* Step 2: Rebuild the RX and TX ring contents.
+     * NB. We could just free the queued TX packets now but we hope
+     * that sending them out might do some good.  We have to rebuild
+     * the RX ring because some of our pages are currently flipped out
+     * so we can't just free the RX skbs.
+     * NB2. Freelist index entries are always going to be less than
+     *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
+     * greater than __PAGE_OFFSET: we use this property to distinguish
+     * them.
+     */
+
+    /* Rebuild the TX buffer freelist and the TX ring itself.
+     * NB. This reorders packets.  We could keep more private state
+     * to avoid this but maybe it doesn't matter so much given the
+     * interface has been down.
+     */
+    for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
+    {
+            if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
+            {
+                struct sk_buff *skb = np->tx_skbs[i];
+                
+                tx = &np->tx->ring[requeue_idx++].req;
+                
+                tx->id   = i;
+                tx->addr = virt_to_machine(skb->data);
+                tx->size = skb->len;
+                
+                np->stats.tx_bytes += skb->len;
+                np->stats.tx_packets++;
+            }
+    }
+    wmb();
+    np->tx->req_prod = requeue_idx;
+
+    /* Rebuild the RX buffer freelist and the RX ring itself. */
+    for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
+        if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
+            np->rx->ring[requeue_idx++].req.id = i;
+    wmb();                
+    np->rx->req_prod = requeue_idx;
+
+    printk(KERN_ALERT "[XEN] Netfront recovered tx=%d rxfree=%d\n",
+           np->tx->req_prod,np->rx->req_prod);
+
+
+    /* Step 3: All public and private state should now be sane.  Get
+     * ready to start sending and receiving packets and give the driver
+     * domain a kick because we've probably just requeued some
+     * packets.
+     */
+    np->backend_state = BEST_CONNECTED;
+    wmb();
+    notify_via_evtchn(status->evtchn);  
+    network_tx_buf_gc(dev);
+
+    if ( np->user_state == UST_OPEN )
+        netif_start_queue(dev);
+
+    spin_unlock(&np->rx_lock);
+    spin_unlock_irq(&np->tx_lock);
+}
+
+static void vif_show(struct net_private *np)
+{
+#if DEBUG
+    if (np) {
+        IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
+               np->handle,
+               be_state_name[np->backend_state],
+               np->user_state ? "open" : "closed",
+               np->evtchn,
+               np->irq,
+               np->tx,
+               np->rx);
+    } else {
+        IPRINTK("<vif NULL>\n");
+    }
+#endif
+}
+
+/* Send a connect message to xend to tell it to bring up the interface. */
+static void send_interface_connect(struct net_private *np)
+{
+    ctrl_msg_t cmsg = {
+        .type    = CMSG_NETIF_FE,
+        .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
+        .length  = sizeof(netif_fe_interface_connect_t),
+    };
+    netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+
+    DPRINTK(">\n"); vif_show(np); 
+    msg->handle = np->handle;
+    msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
+    msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+        
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+    DPRINTK("<\n");
+}
+
+/* Send a driver status notification to the domain controller. */
+static int send_driver_status(int ok)
+{
+    int err = 0;
+    ctrl_msg_t cmsg = {
+        .type    = CMSG_NETIF_FE,
+        .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
+        .length  = sizeof(netif_fe_driver_status_t),
+    };
+    netif_fe_driver_status_t *msg = (void*)cmsg.msg;
+
+    msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
+    err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+    return err;
+}
+
+/* Stop network device and free tx/rx queues and irq.
+ */
+static void vif_release(struct net_private *np)
+{
+    /* Stop old i/f to prevent errors whilst we rebuild the state. */
+    spin_lock_irq(&np->tx_lock);
+    spin_lock(&np->rx_lock);
+    netif_stop_queue(np->dev);
+    /* np->backend_state = BEST_DISCONNECTED; */
+    spin_unlock(&np->rx_lock);
+    spin_unlock_irq(&np->tx_lock);
+    
+    /* Free resources. */
+    if(np->tx != NULL){
+        free_irq(np->irq, np->dev);
+        unbind_evtchn_from_irq(np->evtchn);
+        free_page((unsigned long)np->tx);
+        free_page((unsigned long)np->rx);
+        np->irq = 0;
+        np->evtchn = 0;
+        np->tx = NULL;
+        np->rx = NULL;
+    }
+}
+
+/* Release vif resources and close it down completely.
+ */
+static void vif_close(struct net_private *np)
+{
+    DPRINTK(">\n"); vif_show(np);
+    WPRINTK("Unexpected netif-CLOSED message in state %s\n",
+            be_state_name[np->backend_state]);
+    vif_release(np);
+    np->backend_state = BEST_CLOSED;
+    /* todo: take dev down and free. */
+    vif_show(np); DPRINTK("<\n");
+}
+
+/* Move the vif into disconnected state.
+ * Allocates tx/rx pages.
+ * Sends connect message to xend.
+ */
+static void vif_disconnect(struct net_private *np){
+    DPRINTK(">\n");
+    if(np->tx) free_page((unsigned long)np->tx);
+    if(np->rx) free_page((unsigned long)np->rx);
+    // Before this np->tx and np->rx had better be null.
+    np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
+    np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
+    memset(np->tx, 0, PAGE_SIZE);
+    memset(np->rx, 0, PAGE_SIZE);
+    np->backend_state = BEST_DISCONNECTED;
+    send_interface_connect(np);
+    vif_show(np); DPRINTK("<\n");
+}
+
+/* Begin interface recovery.
+ *
+ * NB. Whilst we're recovering, we turn the carrier state off.  We
+ * take measures to ensure that this device isn't used for
+ * anything.  We also stop the queue for this device.  Various
+ * different approaches (e.g. continuing to buffer packets) have
+ * been tested but don't appear to improve the overall impact on
+ * TCP connections.
+ *
+ * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
+ * is initiated by a special "RESET" message - disconnect could
+ * just mean we're not allowed to use this interface any more.
+ */
+static void 
+vif_reset(
+    struct net_private *np)
+{
+    DPRINTK(">\n");
+    IPRINTK("Attempting to reconnect network interface: handle=%u\n",
+            np->handle);    
+    vif_release(np);
+    vif_disconnect(np);
+    vif_show(np); DPRINTK("<\n");
+}
+
+/* Move the vif into connected state.
+ * Sets the mac and event channel from the message.
+ * Binds the irq to the event channel.
+ */
+static void
+vif_connect(
+    struct net_private *np, netif_fe_interface_status_t *status)
+{
+    struct net_device *dev = np->dev;
+    DPRINTK(">\n");
+    memcpy(dev->dev_addr, status->mac, ETH_ALEN);
+    network_connect(dev, status);
+    np->evtchn = status->evtchn;
+    np->irq = bind_evtchn_to_irq(np->evtchn);
+    (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
+    netctrl_connected_count();
+    vif_wake(dev);
+    vif_show(np); DPRINTK("<\n");
+}
+
+
+/** Create a network device.
+ * @param handle device handle
+ * @param val return parameter for created device
+ * @return 0 on success, error code otherwise
+ */
+static int create_netdev(int handle, struct net_device **val)
+{
+    int i, err = 0;
+    struct net_device *dev = NULL;
+    struct net_private *np = NULL;
+
+    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
+    {
+        printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
+        err = -ENOMEM;
+        goto exit;
+    }
+
+    np                = dev->priv;
+    np->backend_state = BEST_CLOSED;
+    np->user_state    = UST_CLOSED;
+    np->handle        = handle;
+    
+    spin_lock_init(&np->tx_lock);
+    spin_lock_init(&np->rx_lock);
+
+    skb_queue_head_init(&np->rx_batch);
+    np->rx_target = RX_MIN_TARGET;
+
+    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
+    for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
+        np->tx_skbs[i] = (void *)(i+1);
+    for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
+        np->rx_skbs[i] = (void *)(i+1);
+
+    dev->open            = network_open;
+    dev->hard_start_xmit = network_start_xmit;
+    dev->stop            = network_close;
+    dev->get_stats       = network_get_stats;
+    dev->poll            = netif_poll;
+    dev->weight          = 64;
+    
+    if ( (err = register_netdev(dev)) != 0 )
+    {
+        printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
+        goto exit;
+    }
+    np->dev = dev;
+    list_add(&np->list, &dev_list);
+
+  exit:
+    if ( (err != 0) && (dev != NULL ) )
+        kfree(dev);
+    else if ( val != NULL )
+        *val = dev;
+    return err;
+}
+
+/* Get the target interface for a status message.
+ * Creates the interface when it makes sense.
+ * The returned interface may be null when there is no error.
+ *
+ * @param status status message
+ * @param np return parameter for interface state
+ * @return 0 on success, error code otherwise
+ */
+static int 
+target_vif(
+    netif_fe_interface_status_t *status, struct net_private **np)
+{
+    int err = 0;
+    struct net_device *dev;
+
+    DPRINTK("> handle=%d\n", status->handle);
+    if ( status->handle < 0 )
+    {
+        err = -EINVAL;
+        goto exit;
+    }
+
+    if ( (dev = find_dev_by_handle(status->handle)) != NULL )
+        goto exit;
+
+    if ( status->status == NETIF_INTERFACE_STATUS_CLOSED )
+        goto exit;
+    if ( status->status == NETIF_INTERFACE_STATUS_CHANGED )
+        goto exit;
+
+    /* It's a new interface in a good state - create it. */
+    DPRINTK("> create device...\n");
+    if ( (err = create_netdev(status->handle, &dev)) != 0 )
+        goto exit;
+
+    netctrl.interface_n++;
+
+  exit:
+    if ( np != NULL )
+        *np = ((dev && !err) ? dev->priv : NULL);
+    DPRINTK("< err=%d\n", err);
+    return err;
+}
+
+/* Handle an interface status message. */
+static void netif_interface_status(netif_fe_interface_status_t *status)
+{
+    int err = 0;
+    struct net_private *np = NULL;
+    
+    DPRINTK(">\n");
+    DPRINTK("> status=%s handle=%d\n",
+            status_name[status->status], status->handle);
+
+    if ( (err = target_vif(status, &np)) != 0 )
+    {
+        WPRINTK("Invalid netif: handle=%u\n", status->handle);
+        return;
+    }
+
+    if ( np == NULL )
+    {
+        DPRINTK("> no vif\n");
+        return;
+    }
+
+    DPRINTK(">\n"); vif_show(np);
+
+    switch ( status->status )
+    {
+    case NETIF_INTERFACE_STATUS_CLOSED:
+        switch ( np->backend_state )
+        {
+        case BEST_CLOSED:
+        case BEST_DISCONNECTED:
+        case BEST_CONNECTED:
+            vif_close(np);
+            break;
+        }
+        break;
+
+    case NETIF_INTERFACE_STATUS_DISCONNECTED:
+        switch ( np->backend_state )
+        {
+        case BEST_CLOSED:
+            vif_disconnect(np);
+            break;
+        case BEST_DISCONNECTED:
+        case BEST_CONNECTED:
+            vif_reset(np);
+            break;
+        }
+        break;
+
+    case NETIF_INTERFACE_STATUS_CONNECTED:
+        switch ( np->backend_state )
+        {
+        case BEST_CLOSED:
+            WPRINTK("Unexpected netif status %s in state %s\n",
+                    status_name[status->status],
+                    be_state_name[np->backend_state]);
+            vif_disconnect(np);
+            vif_connect(np, status);
+            break;
+        case BEST_DISCONNECTED:
+            vif_connect(np, status);
+            break;
+        }
+        break;
+
+    case NETIF_INTERFACE_STATUS_CHANGED:
+        /*
+         * The domain controller is notifying us that a device has been
+         * added or removed.
+         */
+        break;
+
+    default:
+        WPRINTK("Invalid netif status code %d\n", status->status);
+        break;
+    }
+    vif_show(np);
+    DPRINTK("<\n");
+}
+
+/*
+ * Initialize the network control interface. 
+ */
+static void netif_driver_status(netif_fe_driver_status_t *status)
+{
+    DPRINTK("> status=%d\n", status->status);
+    netctrl.up = status->status;
+    //netctrl.interface_n = status->max_handle;
+    //netctrl.connected_n = 0;
+    netctrl_connected_count();
+}
+
+/* Receive handler for control messages. */
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+
+    switch ( msg->subtype )
+    {
+    case CMSG_NETIF_FE_INTERFACE_STATUS:
+        if ( msg->length != sizeof(netif_fe_interface_status_t) )
+            goto error;
+        netif_interface_status((netif_fe_interface_status_t *)
+                               &msg->msg[0]);
+        break;
+
+    case CMSG_NETIF_FE_DRIVER_STATUS:
+        if ( msg->length != sizeof(netif_fe_driver_status_t) )
+            goto error;
+        netif_driver_status((netif_fe_driver_status_t *)
+                            &msg->msg[0]);
+        break;
+
+    error:
+    default:
+        msg->length = 0;
+        break;
+    }
+
+    ctrl_if_send_response(msg);
+}
+
+
+#if 1
+/* Wait for all interfaces to be connected.
+ *
+ * This works OK, but we'd like to use the probing mode (see below).
+ */
+static int probe_interfaces(void)
+{
+    int err = 0, conn = 0;
+    int wait_i, wait_n = 100;
+
+    DPRINTK(">\n");
+
+    for ( wait_i = 0; wait_i < wait_n; wait_i++)
+    { 
+        DPRINTK("> wait_i=%d\n", wait_i);
+        conn = netctrl_connected();
+        if(conn) break;
+        DPRINTK("> schedule_timeout...\n");
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(10);
+    }
+
+    DPRINTK("> wait finished...\n");
+    if ( conn <= 0 )
+    {
+        err = netctrl_err(-ENETDOWN);
+        WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
+    }
+
+    DPRINTK("< err=%d\n", err);
+
+    return err;
+}
+#else
+/* Probe for interfaces until no more are found.
+ *
+ * This is the mode we'd like to use, but at the moment it panics the kernel.
+*/
+static int probe_interfaces(void)
+{
+    int err = 0;
+    int wait_i, wait_n = 100;
+    ctrl_msg_t cmsg = {
+        .type    = CMSG_NETIF_FE,
+        .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
+        .length  = sizeof(netif_fe_interface_status_t),
+    };
+    netif_fe_interface_status_t msg = {};
+    ctrl_msg_t rmsg = {};
+    netif_fe_interface_status_t *reply = (void*)rmsg.msg;
+    int state = TASK_UNINTERRUPTIBLE;
+    u32 query = -1;
+
+    DPRINTK(">\n");
+
+    netctrl.interface_n = 0;
+    for ( wait_i = 0; wait_i < wait_n; wait_i++ )
+    { 
+        DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
+        msg.handle = query;
+        memcpy(cmsg.msg, &msg, sizeof(msg));
+        DPRINTK("> set_current_state...\n");
+        set_current_state(state);
+        DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+        DPRINTK("> sending...\n");
+        err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
+        DPRINTK("> err=%d\n", err);
+        if(err) goto exit;
+        DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+        if((int)reply->handle < 0){
+            // No more interfaces.
+            break;
+        }
+        query = -reply->handle - 2;
+        DPRINTK(">netif_interface_status ...\n");
+        netif_interface_status(reply);
+    }
+
+  exit:
+    if ( err )
+    {
+        err = netctrl_err(-ENETDOWN);
+        WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
+    }
+
+    DPRINTK("< err=%d\n", err);
+    return err;
+}
+
+#endif
+
+static int __init netif_init(void)
+{
+    int err = 0;
+
+    if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
+         (xen_start_info.flags & SIF_NET_BE_DOMAIN) )
+        return 0;
+
+    IPRINTK("Initialising virtual ethernet driver.\n");
+    INIT_LIST_HEAD(&dev_list);
+    netctrl_init();
+    (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+    send_driver_status(1);
+    err = probe_interfaces();
+    if ( err )
+        ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
+
+    DPRINTK("< err=%d\n", err);
+    return err;
+}
+
+static void vif_suspend(struct net_private *np)
+{
+    // Avoid having tx/rx stuff happen until we're ready.
+    DPRINTK(">\n");
+    free_irq(np->irq, np->dev);
+    unbind_evtchn_from_irq(np->evtchn);
+    DPRINTK("<\n");
+}
+
+static void vif_resume(struct net_private *np)
+{
+    // Connect regardless of whether IFF_UP flag set.
+    // Stop bad things from happening until we're back up.
+    DPRINTK(">\n");
+    np->backend_state = BEST_DISCONNECTED;
+    memset(np->tx, 0, PAGE_SIZE);
+    memset(np->rx, 0, PAGE_SIZE);
+    
+    send_interface_connect(np);
+    DPRINTK("<\n");
+}
+
+void netif_suspend(void)
+{
+#if 1 /* XXX THIS IS TEMPORARY */
+    struct list_head *ent;
+    struct net_private *np;
+    
+    DPRINTK(">\n");
+    list_for_each(ent, &dev_list){
+        np = list_entry(ent, struct net_private, list);
+        vif_suspend(np);
+    }
+    DPRINTK("<\n");
+#endif
+}
+
+void netif_resume(void)
+{
+#if 1
+    /* XXX THIS IS TEMPORARY */
+    struct list_head *ent;
+    struct net_private *np;
+
+    DPRINTK(">\n");
+    list_for_each ( ent, &dev_list )
+    {
+        np = list_entry(ent, struct net_private, list);
+        vif_resume(np);
+    }
+    DPRINTK("<\n");
+#endif     
+}
+
+
+__initcall(netif_init);
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/privcmd/Makefile b/linux-2.6.9-xen-sparse/drivers/xen/privcmd/Makefile
new file mode 100644 (file)
index 0000000..e218695
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := privcmd.o
diff --git a/linux-2.6.9-xen-sparse/drivers/xen/privcmd/privcmd.c b/linux-2.6.9-xen-sparse/drivers/xen/privcmd/privcmd.c
new file mode 100644 (file)
index 0000000..42c5b13
--- /dev/null
@@ -0,0 +1,235 @@
+/******************************************************************************
+ * core.c
+ * 
+ * Interface to privileged domain-0 commands.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm-xen/proc_cmd.h>
+#include <asm/hypervisor-ifs/dom0_ops.h>
+#include <asm-xen/xen_proc.h>
+
+static struct proc_dir_entry *privcmd_intf;
+
+static int privcmd_ioctl(struct inode *inode, struct file *file,
+                         unsigned int cmd, unsigned long data)
+{
+    int ret = -ENOSYS;
+
+    switch ( cmd )
+    {
+    case IOCTL_PRIVCMD_HYPERCALL:
+    {
+        privcmd_hypercall_t hypercall;
+  
+        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
+            return -EFAULT;
+
+        __asm__ __volatile__ (
+            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
+            "movl  4(%%eax),%%ebx ;"
+            "movl  8(%%eax),%%ecx ;"
+            "movl 12(%%eax),%%edx ;"
+            "movl 16(%%eax),%%esi ;"
+            "movl 20(%%eax),%%edi ;"
+            "movl   (%%eax),%%eax ;"
+            TRAP_INSTR "; "
+            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
+            : "=a" (ret) : "0" (&hypercall) : "memory" );
+
+    }
+    break;
+
+    case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
+    {
+        extern int initdom_ctrlif_domcontroller_port;
+        ret = initdom_ctrlif_domcontroller_port;
+    }
+    break;
+    
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+    case IOCTL_PRIVCMD_MMAP:
+    {
+#define PRIVCMD_MMAP_SZ 32
+        privcmd_mmap_t mmapcmd;
+        privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+        int i, rc;
+
+        if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
+            return -EFAULT;
+
+        p = mmapcmd.entry;
+
+        for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
+        {
+            int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+                PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+            if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
+                return -EFAULT;
+     
+            for ( j = 0; j < n; j++ )
+            {
+                struct vm_area_struct *vma = 
+                    find_vma( current->mm, msg[j].va );
+
+                if ( !vma )
+                    return -EINVAL;
+
+                if ( msg[j].va > PAGE_OFFSET )
+                    return -EINVAL;
+
+                if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
+                    return -EINVAL;
+
+                if ( (rc = direct_remap_area_pages(vma->vm_mm, 
+                                                   msg[j].va&PAGE_MASK, 
+                                                   msg[j].mfn<<PAGE_SHIFT, 
+                                                   msg[j].npages<<PAGE_SHIFT, 
+                                                   vma->vm_page_prot,
+                                                   mmapcmd.dom)) < 0 )
+                    return rc;
+            }
+        }
+        ret = 0;
+    }
+    break;
+
+    case IOCTL_PRIVCMD_MMAPBATCH:
+    {
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+        mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+        privcmd_mmapbatch_t m;
+        struct vm_area_struct *vma = NULL;
+        unsigned long *p, addr;
+        unsigned long mfn;
+        int i;
+
+        if ( copy_from_user(&m, (void *)data, sizeof(m)) )
+        { ret = -EFAULT; goto batch_err; }
+
+        vma = find_vma( current->mm, m.addr );
+
+        if ( !vma )
+        { ret = -EINVAL; goto batch_err; }
+
+        if ( m.addr > PAGE_OFFSET )
+        { ret = -EFAULT; goto batch_err; }
+
+        if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
+        { ret = -EFAULT; goto batch_err; }
+
+        u[0].ptr  = MMU_EXTENDED_COMMAND;
+        u[0].val  = MMUEXT_SET_FOREIGNDOM;
+        u[0].val |= (unsigned long)m.dom << 16;
+        v = w = &u[1];
+
+        p = m.arr;
+        addr = m.addr;
+        for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
+        {
+            if ( get_user(mfn, p) )
+                return -EFAULT;
+
+            v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
+
+            __direct_remap_area_pages(vma->vm_mm,
+                                      addr, 
+                                      PAGE_SIZE, 
+                                      v);
+
+            if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
+                put_user( 0xF0000000 | mfn, p );
+
+            v = w;
+        }
+        ret = 0;
+        break;
+
+    batch_err:
+        printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
+               ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
+        break;
+    }
+    break;
+#endif
+
+    case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
+    {
+       unsigned long m2p_start_mfn = 
+           HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
+
+       if( put_user( m2p_start_mfn, (unsigned long *) data ) )
+           ret = -EFAULT;
+       else
+           ret = 0;
+    }
+    break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+    return ret;
+}
+
+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
+{
+    /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+    vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+    return 0;
+}
+
+static struct file_operations privcmd_file_ops = {
+    ioctl : privcmd_ioctl,
+    mmap:   privcmd_mmap
+};
+
+
+static int __init privcmd_init(void)
+{
+    if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
+        return 0;
+
+    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
+    if ( privcmd_intf != NULL )
+    {
+        privcmd_intf->owner      = THIS_MODULE;
+        privcmd_intf->nlink      = 1;
+        privcmd_intf->proc_fops  = &privcmd_file_ops;
+    }
+
+    return 0;
+}
+
+
+static void __exit privcmd_cleanup(void)
+{
+    if ( privcmd_intf == NULL ) return;
+    remove_xen_proc_entry("privcmd");
+    privcmd_intf = NULL;
+}
+
+
+module_init(privcmd_init);
+module_exit(privcmd_cleanup);
diff --git a/linux-2.6.9-xen-sparse/include/asm-generic/pgtable.h b/linux-2.6.9-xen-sparse/include/asm-generic/pgtable.h
new file mode 100644 (file)
index 0000000..59563e9
--- /dev/null
@@ -0,0 +1,144 @@
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH
+/*
+ * Establish a new mapping:
+ *  - flush the old one
+ *  - update the page tables
+ *  - inform the TLB about the new one
+ *
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
+ *
+ * Note: the old pte is known to not be writable, so we don't need to
+ * worry about dirty bits etc getting lost.
+ */
+#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
+#define ptep_establish(__vma, __address, __ptep, __entry)              \
+do {                                                                   \
+       set_pte(__ptep, __entry);                                       \
+       flush_tlb_page(__vma, __address);                               \
+} while (0)
+#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#define ptep_establish(__vma, __address, __ptep, __entry)              \
+do {                                                                   \
+       set_pte_atomic(__ptep, __entry);                                \
+       flush_tlb_page(__vma, __address);                               \
+} while (0)
+#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Largely same as above, but only sets the access flags (dirty,
+ * accessed, and writable). Furthermore, we know it always gets set
+ * to a "more permissive" setting, which allows most architectures
+ * to optimize this.
+ */
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+do {                                                                     \
+       set_pte(__ptep, __entry);                                         \
+       flush_tlb_page(__vma, __address);                                 \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH_NEW
+#define ptep_establish_new(__vma, __address, __ptep, __entry)          \
+do {                                                                   \
+       set_pte(__ptep, __entry);                                       \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       if (!pte_young(pte))
+               return 0;
+       set_pte(ptep, pte_mkold(pte));
+       return 1;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep)               \
+({                                                                     \
+       int __young = ptep_test_and_clear_young(__ptep);                \
+       if (__young)                                                    \
+               flush_tlb_page(__vma, __address);                       \
+       __young;                                                        \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       if (!pte_dirty(pte))
+               return 0;
+       set_pte(ptep, pte_mkclean(pte));
+       return 1;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(__vma, __address, __ptep)               \
+({                                                                     \
+       int __dirty = ptep_test_and_clear_dirty(__ptep);                \
+       if (__dirty)                                                    \
+               flush_tlb_page(__vma, __address);                       \
+       __dirty;                                                        \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       pte_clear(ptep);
+       return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define ptep_clear_flush(__vma, __address, __ptep)                     \
+({                                                                     \
+       pte_t __pte = ptep_get_and_clear(__ptep);                       \
+       flush_tlb_page(__vma, __address);                               \
+       __pte;                                                          \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+       pte_t old_pte = *ptep;
+       set_pte(ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_MKDIRTY
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+       pte_t old_pte = *ptep;
+       set_pte(ptep, pte_mkdirty(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (pte_val(A) == pte_val(B))
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define page_test_and_clear_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+#define page_test_and_clear_young(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr)      pgd_offset(mm, addr)
+#endif
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/desc.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/desc.h
new file mode 100644 (file)
index 0000000..3cebc41
--- /dev/null
@@ -0,0 +1,133 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <asm/ldt.h>
+#include <asm/segment.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/preempt.h>
+#include <linux/smp.h>
+
+#include <asm/mmu.h>
+
+extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
+struct Xgt_desc_struct {
+       unsigned short size;
+       unsigned long address __attribute__((packed));
+       unsigned short pad;
+} __attribute__ ((packed));
+
+extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
+
+#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
+
+#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)cpu_gdt_descr[(_cpu)].address)
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern void set_intr_gate(unsigned int irq, void * addr);
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
+       "movw %%ax,2(%2)\n\t" \
+       "rorl $16,%%eax\n\t" \
+       "movb %%al,4(%2)\n\t" \
+       "movb %4,5(%2)\n\t" \
+       "movb $0,6(%2)\n\t" \
+       "movb %%ah,7(%2)\n\t" \
+       "rorl $16,%%eax" \
+       : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
+{
+       _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
+               offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
+}
+
+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
+{
+       _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
+           (int)addr, ((size << 3)-1), 0x82);
+}
+
+#define LDT_entry_a(info) \
+       ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+       (((info)->base_addr & 0xff000000) | \
+       (((info)->base_addr & 0x00ff0000) >> 16) | \
+       ((info)->limit & 0xf0000) | \
+       (((info)->read_exec_only ^ 1) << 9) | \
+       ((info)->contents << 10) | \
+       (((info)->seg_not_present ^ 1) << 15) | \
+       ((info)->seg_32bit << 22) | \
+       ((info)->limit_in_pages << 23) | \
+       ((info)->useable << 20) | \
+       0x7000)
+
+#define LDT_empty(info) (\
+       (info)->base_addr       == 0    && \
+       (info)->limit           == 0    && \
+       (info)->contents        == 0    && \
+       (info)->read_exec_only  == 1    && \
+       (info)->seg_32bit       == 0    && \
+       (info)->limit_in_pages  == 0    && \
+       (info)->seg_not_present == 1    && \
+       (info)->useable         == 0    )
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+       C(0); C(1); C(2);
+#undef C
+}
+
+static inline void clear_LDT(void)
+{
+       int cpu = get_cpu();
+
+       /*
+        * NB. We load the default_ldt for lcall7/27 handling on demand, as
+        * it slows down context switching. Noone uses it anyway.
+        */
+       cpu = cpu;              /* XXX avoid compiler warning */
+       queue_set_ldt(0UL, 0);
+       put_cpu();
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+{
+       void *segments = pc->ldt;
+       int count = pc->size;
+
+       if (likely(!count))
+               segments = NULL;
+
+       queue_set_ldt((unsigned long)segments, count);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+       int cpu = get_cpu();
+       load_LDT_nolock(pc, cpu);
+       put_cpu();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
new file mode 100644 (file)
index 0000000..43b4f57
--- /dev/null
@@ -0,0 +1,177 @@
+#ifndef _ASM_I386_DMA_MAPPING_H
+#define _ASM_I386_DMA_MAPPING_H
+
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                          dma_addr_t *dma_handle, int flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+                        void *vaddr, dma_addr_t dma_handle);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+              enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+       flush_write_buffers();
+       return virt_to_bus(ptr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+                enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+          enum dma_data_direction direction)
+{
+       int i;
+
+       BUG_ON(direction == DMA_NONE);
+
+       for (i = 0; i < nents; i++ ) {
+               BUG_ON(!sg[i].page);
+
+               sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+       }
+
+       flush_write_buffers();
+       return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+            size_t size, enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+       return page_to_phys(page) + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+              enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+            enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+                       enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+                       enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                             unsigned long offset, size_t size,
+                             enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+                                unsigned long offset, size_t size,
+                                enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+                   enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+                   enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+       return 0;
+}
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+        /*
+         * we fall back to GFP_DMA when the mask isn't all 1s,
+         * so we can't guarantee allocations that must be
+         * within a tighter range than GFP_DMA..
+         */
+        if(mask < 0x00ffffff)
+                return 0;
+
+       return 1;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+       if(!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+
+       *dev->dma_mask = mask;
+
+       return 0;
+}
+
+static inline int
+dma_get_cache_alignment(void)
+{
+       /* no easy way to get cache size on all x86, so return the
+        * maximum possible, to be safe */
+       return (1 << L1_CACHE_SHIFT_MAX);
+}
+
+#define dma_is_consistent(d)   (1)
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+              enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+                           dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+                                 dma_addr_t device_addr, size_t size);
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/fixmap.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/fixmap.h
new file mode 100644 (file)
index 0000000..794a6cd
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+
+/* used by vmalloc.c, vsyscall.lds.S.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+#define __FIXADDR_TOP  (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/acpi.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+       FIX_HOLE,
+       FIX_VSYSCALL,
+#ifdef CONFIG_X86_LOCAL_APIC
+       FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+       FIX_IO_APIC_BASE_0,
+       FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+#endif
+#ifdef CONFIG_X86_VISWS_APIC
+       FIX_CO_CPU,     /* Cobalt timer */
+       FIX_CO_APIC,    /* Cobalt APIC Redirection Table */ 
+       FIX_LI_PCIA,    /* Lithium PCI Bridge A */
+       FIX_LI_PCIB,    /* Lithium PCI Bridge B */
+#endif
+#ifdef CONFIG_X86_F00F_BUG
+       FIX_F00F_IDT,   /* Virtual mapping for IDT */
+#endif
+#ifdef CONFIG_X86_CYCLONE_TIMER
+       FIX_CYCLONE_TIMER, /*cyclone timer register*/
+#endif 
+#ifdef CONFIG_HIGHMEM
+       FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+#ifdef CONFIG_ACPI_BOOT
+       FIX_ACPI_BEGIN,
+       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
+#endif
+#ifdef CONFIG_PCI_MMCONFIG
+       FIX_PCIE_MCFG,
+#endif
+       FIX_SHARED_INFO,
+       FIX_GNTTAB,
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define NR_FIX_ISAMAPS 256
+       FIX_ISAMAP_END,
+       FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
+#endif
+       __end_of_permanent_fixed_addresses,
+       /* temporary boot-time mappings, used before ioremap() is functional */
+#define NR_FIX_BTMAPS  16
+       FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+       FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+       FIX_WP_TEST,
+       __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+                                       unsigned long phys, pgprot_t flags);
+extern void __set_fixmap_ma (enum fixed_addresses idx,
+                                       unsigned long mach, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+               __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma(idx, phys) \
+               __set_fixmap_ma(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma_ro(idx, phys) \
+               __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+               __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+               __set_fixmap(idx, 0, __pgprot(0))
+
+#define FIXADDR_TOP    ((unsigned long)__FIXADDR_TOP)
+
+#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START  (FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define __fix_to_virt(x)       (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x)       ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * This is the range that is readable by user mode, and things
+ * acting like user mode such as get_user_pages.
+ */
+#define FIXADDR_USER_START     (__fix_to_virt(FIX_VSYSCALL))
+#define FIXADDR_USER_END       (FIXADDR_USER_START + PAGE_SIZE)
+
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+       /*
+        * this branch gets completely eliminated after inlining,
+        * except when someone tries to use fixaddr indices in an
+        * illegal way. (such as mixing up address types or using
+        * out-of-range indices).
+        *
+        * If it doesn't get removed, the linker will complain
+        * loudly with a reasonably clear error message..
+        */
+       if (idx >= __end_of_fixed_addresses)
+               __this_fixmap_does_not_exist();
+
+        return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+       BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+       return __virt_to_fix(vaddr);
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/highmem.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/highmem.h
new file mode 100644 (file)
index 0000000..f608a13
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ *                   Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with 
+ * up to 16 Terabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#include <asm/tlbflush.h>
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+extern void kmap_init(void);
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#if NR_CPUS <= 32
+#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23))
+#else
+#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23) - 0x200000UL)
+#endif
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void * FASTCALL(kmap_high(struct page *page));
+extern void FASTCALL(kunmap_high(struct page *page));
+
+void *kmap(struct page *page);
+void kunmap(struct page *page);
+void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_pte(struct page *page, enum km_type type);
+void kunmap_atomic(void *kvaddr, enum km_type type);
+void kunmap_atomic_force(void *kvaddr, enum km_type type);
+struct page *kmap_atomic_to_page(void *ptr);
+
+#define flush_cache_kmaps()    do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h
new file mode 100644 (file)
index 0000000..0a2de0b
--- /dev/null
@@ -0,0 +1,447 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ *             Linus
+ */
+
+ /*
+  *  Bit simplified and optimized by Jan Hubicka
+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+  *
+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+  *  isa_read[wl] and isa_write[wl] fixed
+  *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+  */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
+
+#ifdef __KERNEL__
+
+#include <asm-generic/iomap.h>
+
+#include <linux/vmalloc.h>
+#include <asm/fixmap.h>
+
+/**
+ *     virt_to_phys    -       map virtual addresses to physical
+ *     @address: address to remap
+ *
+ *     The returned physical address is the physical (CPU) mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses directly mapped or allocated via kmalloc. 
+ *
+ *     This function does not give bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+       return __pa(address);
+}
+
+/**
+ *     phys_to_virt    -       map physical address to virtual
+ *     @address: address to remap
+ *
+ *     The returned virtual address is a current CPU mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses that have a kernel mapping
+ *
+ *     This function does not handle bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+       return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page)       (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
+       (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+        ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == bvec_to_pseudophys((vec2))))
+
+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/**
+ * ioremap     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address. 
+ */
+
+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
+{
+       return __ioremap(offset, size, 0);
+}
+
+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define isa_bus_to_virt(_x) (void *)__fix_to_virt(FIX_ISAMAP_BEGIN - ((_x) >> PAGE_SHIFT))
+#else
+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
+#endif
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
+#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+       return *(volatile unsigned char __force *) addr;
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+       return *(volatile unsigned short __force *) addr;
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+       return *(volatile unsigned int __force *) addr;
+}
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+       *(volatile unsigned char __force *) addr = b;
+}
+static inline void writew(unsigned short b, volatile void __iomem *addr)
+{
+       *(volatile unsigned short __force *) addr = b;
+}
+static inline void writel(unsigned int b, volatile void __iomem *addr)
+{
+       *(volatile unsigned int __force *) addr = b;
+}
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+       memset((void __force *) addr, val, count);
+}
+static inline void memcpy_fromio(void *dst, volatile void __iomem *src, int count)
+{
+       __memcpy(dst, (void __force *) src, count);
+}
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+       __memcpy((void __force *) dst, src, count);
+}
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c)           memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c)       memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c)         memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d)           eth_copy_and_sum((a),(void __force *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d)       eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ *     check_signature         -       find BIOS signatures
+ *     @io_addr: mmio address to check 
+ *     @signature:  signature block
+ *     @length: length of signature
+ *
+ *     Perform a signature comparison with the mmio address io_addr. This
+ *     address should have been obtained by ioremap.
+ *     Returns 1 on a match.
+ */
+static inline int check_signature(volatile void __iomem * io_addr,
+       const unsigned char *signature, int length)
+{
+       int retval = 0;
+       do {
+               if (readb(io_addr) != *signature)
+                       goto out;
+               io_addr++;
+               signature++;
+               length--;
+       } while (length);
+       retval = 1;
+out:
+       return retval;
+}
+
+/**
+ *     isa_check_signature             -       find BIOS signatures
+ *     @io_addr: mmio address to check 
+ *     @signature:  signature block
+ *     @length: length of signature
+ *
+ *     Perform a signature comparison with the ISA mmio address io_addr.
+ *     Returns 1 on a match.
+ *
+ *     This function is deprecated. New drivers should use ioremap and
+ *     check_signature.
+ */
+
+static inline int isa_check_signature(unsigned long io_addr,
+       const unsigned char *signature, int length)
+{
+       int retval = 0;
+       do {
+               if (isa_readb(io_addr) != *signature)
+                       goto out;
+               io_addr++;
+               signature++;
+               length--;
+       } while (length);
+       retval = 1;
+out:
+       return retval;
+}
+
+/*
+ *     Cache management
+ *
+ *     This needed for two cases
+ *     1. Out of order aware processors
+ *     2. Accidentally out of order processors (PPro errata #51)
+ */
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+       __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size)            flush_write_buffers()
+#define dma_cache_wback(_start,_size)          flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size)      flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size)            do { } while (0)
+#define dma_cache_wback(_start,_size)          do { } while (0)
+#define dma_cache_wback_inv(_start,_size)      do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#elif defined(__UNSAFE_IO__)
+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
+#else
+#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n"                \
+                      "2:\n"                           \
+                      ".section __ex_table,\"a\"\n\t"  \
+                      ".align 4\n\t"                   \
+                      ".long 1b,2b\n"                  \
+                      ".previous"
+#endif
+
+static inline void slow_down_io(void) {
+       __asm__ __volatile__(
+               __SLOW_DOWN_IO
+#ifdef REALLY_SLOW_IO
+               __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+               : : );
+}
+
+#ifdef CONFIG_X86_NUMAQ
+extern void *xquad_portio;    /* Where the IO area was mapped */
+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
+       if (xquad_portio) \
+               write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
+       else \
+               out##bwl##_local(value, port); \
+} \
+static inline void out##bwl(unsigned type value, int port) { \
+       out##bwl##_quad(value, port, 0); \
+} \
+static inline unsigned type in##bwl##_quad(int port, int quad) { \
+       if (xquad_portio) \
+               return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
+       else \
+               return in##bwl##_local(port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+       return in##bwl##_quad(port, 0); \
+}
+#else 
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl(unsigned type value, int port) { \
+       out##bwl##_local(value, port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+       return in##bwl##_local(port); \
+}
+#endif
+
+
+#if __UNSAFE_IO__
+#define ____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+       __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+       unsigned type value; \
+       __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
+       return value; \
+}
+#else
+#define ____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+       __asm__ __volatile__("1: out" #bwl " %" #bw "0, %w1\n"          \
+                            "2:\n"                                     \
+                            ".section __ex_table,\"a\"\n\t"            \
+                            ".align 4\n\t"                             \
+                            ".long 1b,2b\n"                            \
+                            ".previous" : : "a"(value), "Nd"(port));   \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+       unsigned type value; \
+       __asm__ __volatile__("1:in" #bwl " %w1, %" #bw "0\n"            \
+                            "2:\n"                                     \
+                            ".section .fixup,\"ax\"\n"                 \
+                            "3: mov" #bwl " $~0,%" #bw "0\n\t"         \
+                            "jmp 2b\n"                                 \
+                            ".previous\n"                              \
+                            ".section __ex_table,\"a\"\n\t"            \
+                            ".align 4\n\t"                             \
+                            ".long 1b,3b\n"                            \
+                            ".previous" : "=a"(value) : "Nd"(port));   \
+       return value; \
+}
+#endif
+
+#define BUILDIO(bwl,bw,type) \
+____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local_p(unsigned type value, int port) { \
+       out##bwl##_local(value, port); \
+       slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_local_p(int port) { \
+       unsigned type value = in##bwl##_local(port); \
+       slow_down_io(); \
+       return value; \
+} \
+__BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_p(unsigned type value, int port) { \
+       out##bwl(value, port); \
+       slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_p(int port) { \
+       unsigned type value = in##bwl(port); \
+       slow_down_io(); \
+       return value; \
+} \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
+       __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
+       __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+}
+
+BUILDIO(b,b,char)
+BUILDIO(w,w,short)
+BUILDIO(l,,int)
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
new file mode 100644 (file)
index 0000000..e79ce71
--- /dev/null
@@ -0,0 +1,83 @@
+/* defines for inline arch setup functions */
+
+#include <asm/apic.h>
+
+/**
+ * do_timer_interrupt_hook - hook into timer tick
+ * @regs:      standard registers from interrupt
+ *
+ * Description:
+ *     This hook is called immediately after the timer interrupt is ack'd.
+ *     It's primary purpose is to allow architectures that don't possess
+ *     individual per CPU clocks (like the CPU APICs supply) to broadcast the
+ *     timer interrupt as a means of triggering reschedules etc.
+ **/
+
+static inline void do_timer_interrupt_hook(struct pt_regs *regs)
+{
+       do_timer(regs);
+/*
+ * In the SMP case we use the local APIC timer interrupt to do the
+ * profiling, except when we simulate SMP mode on a uniprocessor
+ * system, in that case we have to call the local interrupt handler.
+ */
+#ifndef CONFIG_X86_LOCAL_APIC
+       if (regs)
+               profile_tick(CPU_PROFILING, regs);
+#else
+       if (regs && !using_apic_timer)
+               smp_local_timer_interrupt(regs);
+#endif
+}
+
+
+/* you can safely undefine this if you don't have the Neptune chipset */
+
+#define BUGGY_NEPTUN_TIMER
+
+/**
+ * do_timer_overflow - process a detected timer overflow condition
+ * @count:     hardware timer interrupt count on overflow
+ *
+ * Description:
+ *     This call is invoked when the jiffies count has not incremented but
+ *     the hardware timer interrupt has.  It means that a timer tick interrupt
+ *     came along while the previous one was pending, thus a tick was missed
+ **/
+static inline int do_timer_overflow(int count)
+{
+       int i;
+
+       spin_lock(&i8259A_lock);
+       /*
+        * This is tricky when I/O APICs are used;
+        * see do_timer_interrupt().
+        */
+       i = inb(0x20);
+       spin_unlock(&i8259A_lock);
+       
+       /* assumption about timer being IRQ0 */
+       if (i & 0x01) {
+               /*
+                * We cannot detect lost timer interrupts ... 
+                * well, that's why we call them lost, don't we? :)
+                * [hmm, on the Pentium and Alpha we can ... sort of]
+                */
+               count -= LATCH;
+       } else {
+#ifdef BUGGY_NEPTUN_TIMER
+               /*
+                * for the Neptun bug we know that the 'latch'
+                * command doesn't latch the high and low value
+                * of the counter atomically. Thus we have to 
+                * substract 256 from the counter 
+                * ... funny, isnt it? :)
+                */
+               
+               count -= 256;
+#else
+               printk("do_slow_gettimeoffset(): hardware timer problem?\n");
+#endif
+       }
+       return count;
+}
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
new file mode 100644 (file)
index 0000000..a96d9f6
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *  arch/i386/mach-generic/io_ports.h
+ *
+ *  Machine specific IO port address definition for generic.
+ *  Written by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_IO_PORTS_H
+#define _MACH_IO_PORTS_H
+
+/* i8253A PIT registers */
+#define PIT_MODE               0x43
+#define PIT_CH0                        0x40
+#define PIT_CH2                        0x42
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD         0x20
+#define PIC_MASTER_IMR         0x21
+#define PIC_MASTER_ISR         PIC_MASTER_CMD
+#define PIC_MASTER_POLL                PIC_MASTER_ISR
+#define PIC_MASTER_OCW3                PIC_MASTER_ISR
+#define PIC_SLAVE_CMD          0xa0
+#define PIC_SLAVE_IMR          0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR         2
+#define MASTER_ICW4_DEFAULT    0x01
+#define SLAVE_ICW4_DEFAULT     0x01
+#define PIC_ICW4_AEOI          2
+
+#endif /* !_MACH_IO_PORTS_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
new file mode 100644 (file)
index 0000000..b8384ef
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ *     FIRST_EXTERNAL_VECTOR:
+ *             The first free place for external interrupts
+ *
+ *     SYSCALL_VECTOR:
+ *             The IRQ vector a syscall makes the user to kernel transition
+ *             under.
+ *
+ *     TIMER_IRQ:
+ *             The IRQ number the timer interrupt comes in at.
+ *
+ *     NR_IRQS:
+ *             The total number of interrupt vectors (including all the
+ *             architecture specific interrupts) needed.
+ *
+ */                    
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR  0x20
+
+#define SYSCALL_VECTOR         0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ *  some of the following vectors are 'rare', they are merged
+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ *  TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR   0xff
+#define ERROR_APIC_VECTOR      0xfe
+#define INVALIDATE_TLB_VECTOR  0xfd
+#define RESCHEDULE_VECTOR      0xfc
+#define CALL_FUNCTION_VECTOR   0xfb
+
+#define THERMAL_APIC_VECTOR    0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR     0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR    0x31
+#define FIRST_SYSTEM_VECTOR    0xef
+
+/*  #define TIMER_IRQ _EVENT_TIMER */
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+#if 0
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+
+#ifdef CONFIG_PCI_USE_VECTOR
+#define NR_IRQS FIRST_SYSTEM_VECTOR
+#define NR_IRQ_VECTORS NR_IRQS
+#else
+#ifdef CONFIG_X86_IO_APIC
+#define NR_IRQS 224
+# if (224 >= 32 * NR_CPUS)
+# define NR_IRQ_VECTORS NR_IRQS
+# else
+# define NR_IRQ_VECTORS (32 * NR_CPUS)
+# endif
+#else
+#define NR_IRQS 16
+#define NR_IRQ_VECTORS NR_IRQS
+#endif
+#endif
+#endif
+
+#define FPU_IRQ                        13
+
+#define        FIRST_VM86_IRQ          3
+#define LAST_VM86_IRQ          15
+#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
+
+/*
+ * The flat IRQ space is divided into two regions:
+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
+ *     if we have physical device-access privilege. This region is at the 
+ *     start of the IRQ space so that existing device drivers do not need
+ *     to be modified to translate physical IRQ numbers into our IRQ space.
+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ *     are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE   0
+#define NR_PIRQS  128
+
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS  128
+
+#define NR_IRQS   (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define pirq_to_irq(_x)   ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x)   ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+#ifndef __ASSEMBLY__
+/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+extern int  bind_virq_to_irq(int virq);
+extern void unbind_virq_from_irq(int virq);
+extern int  bind_evtchn_to_irq(int evtchn);
+extern void unbind_evtchn_from_irq(int evtchn);
+
+extern void irq_suspend(void);
+extern void irq_resume(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h
new file mode 100644 (file)
index 0000000..6b5dadc
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 256
+
+#define MAX_MP_BUSSES 32
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h
new file mode 100644 (file)
index 0000000..521e227
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *  arch/i386/mach-generic/mach_reboot.h
+ *
+ *  Machine specific reboot functions for generic.
+ *  Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_REBOOT_H
+#define _MACH_REBOOT_H
+
+static inline void kb_wait(void)
+{
+       int i;
+
+       for (i = 0; i < 0x10000; i++)
+               if ((inb_p(0x64) & 0x02) == 0)
+                       break;
+}
+
+static inline void mach_reboot(void)
+{
+       int i;
+       for (i = 0; i < 100; i++) {
+               kb_wait();
+               udelay(50);
+               outb(0xfe, 0x64);         /* pulse reset low */
+               udelay(50);
+       }
+}
+
+#endif /* !_MACH_REBOOT_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h
new file mode 100644 (file)
index 0000000..b749aa4
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ *  include/asm-i386/mach-default/mach_time.h
+ *
+ *  Machine specific set RTC function for generic.
+ *  Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TIME_H
+#define _MACH_TIME_H
+
+#include <linux/mc146818rtc.h>
+
+/* for check timing call set_rtc_mmss() 500ms     */
+/* used in arch/i386/time.c::do_timer_interrupt() */
+#define USEC_AFTER     500000
+#define USEC_BEFORE    500000
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ *      sets the minutes. Usually you'll only notice that after reboot!
+ */
+static inline int mach_set_rtc_mmss(unsigned long nowtime)
+{
+       int retval = 0;
+       int real_seconds, real_minutes, cmos_minutes;
+       unsigned char save_control, save_freq_select;
+
+       save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+       CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+       save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+       cmos_minutes = CMOS_READ(RTC_MINUTES);
+       if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+               BCD_TO_BIN(cmos_minutes);
+
+       /*
+        * since we're only adjusting minutes and seconds,
+        * don't interfere with hour overflow. This avoids
+        * messing with unknown time zones but requires your
+        * RTC not to be off by more than 15 minutes
+        */
+       real_seconds = nowtime % 60;
+       real_minutes = nowtime / 60;
+       if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+               real_minutes += 30;             /* correct for half hour time zone */
+       real_minutes %= 60;
+
+       if (abs(real_minutes - cmos_minutes) < 30) {
+               if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+                       BIN_TO_BCD(real_seconds);
+                       BIN_TO_BCD(real_minutes);
+               }
+               CMOS_WRITE(real_seconds,RTC_SECONDS);
+               CMOS_WRITE(real_minutes,RTC_MINUTES);
+       } else {
+               printk(KERN_WARNING
+                      "set_rtc_mmss: can't update from %d to %d\n",
+                      cmos_minutes, real_minutes);
+               retval = -1;
+       }
+
+       /* The following flags have to be released exactly in this order,
+        * otherwise the DS12887 (popular MC146818A clone with integrated
+        * battery and quartz) will not reset the oscillator and will not
+        * update precisely 500 ms later. You won't find this mentioned in
+        * the Dallas Semiconductor data sheets, but who believes data
+        * sheets anyway ...                           -- Markus Kuhn
+        */
+       CMOS_WRITE(save_control, RTC_CONTROL);
+       CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+       return retval;
+}
+
+static inline unsigned long mach_get_cmos_time(void)
+{
+       unsigned int year, mon, day, hour, min, sec;
+       int i;
+
+       /* The Linux interpretation of the CMOS clock register contents:
+        * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+        * RTC registers show the second which has precisely just started.
+        * Let's hope other operating systems interpret the RTC the same way.
+        */
+       /* read RTC exactly on falling edge of update flag */
+       for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
+               if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+                       break;
+       for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
+               if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+                       break;
+       do { /* Isn't this overkill ? UIP above should guarantee consistency */
+               sec = CMOS_READ(RTC_SECONDS);
+               min = CMOS_READ(RTC_MINUTES);
+               hour = CMOS_READ(RTC_HOURS);
+               day = CMOS_READ(RTC_DAY_OF_MONTH);
+               mon = CMOS_READ(RTC_MONTH);
+               year = CMOS_READ(RTC_YEAR);
+       } while (sec != CMOS_READ(RTC_SECONDS));
+       if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+         {
+           BCD_TO_BIN(sec);
+           BCD_TO_BIN(min);
+           BCD_TO_BIN(hour);
+           BCD_TO_BIN(day);
+           BCD_TO_BIN(mon);
+           BCD_TO_BIN(year);
+         }
+       if ((year += 1900) < 1970)
+               year += 100;
+
+       return mktime(year, mon, day, hour, min, sec);
+}
+
+#endif /* !_MACH_TIME_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h
new file mode 100644 (file)
index 0000000..4b9703b
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ *  include/asm-i386/mach-default/mach_timer.h
+ *
+ *  Machine specific calibrate_tsc() for generic.
+ *  Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC ------- 
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _MACH_TIMER_H
+#define _MACH_TIMER_H
+
+#define CALIBRATE_LATCH        (5 * LATCH)
+
+static inline void mach_prepare_counter(void)
+{
+       /* Set the Gate high, disable speaker */
+       outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+       /*
+        * Now let's take care of CTC channel 2
+        *
+        * Set the Gate high, program CTC channel 2 for mode 0,
+        * (interrupt on terminal count mode), binary count,
+        * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+        *
+        * Some devices need a delay here.
+        */
+       outb(0xb0, 0x43);                       /* binary, mode 0, LSB/MSB, Ch 2 */
+       outb_p(CALIBRATE_LATCH & 0xff, 0x42);   /* LSB of count */
+       outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+       unsigned long count = 0;
+       do {
+               count++;
+       } while ((inb_p(0x61) & 0x20) == 0);
+       *count_p = count;
+}
+
+#endif /* !_MACH_TIMER_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h
new file mode 100644 (file)
index 0000000..c98c288
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ *  include/asm-i386/mach-default/mach_traps.h
+ *
+ *  Machine specific NMI handling for generic.
+ *  Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TRAPS_H
+#define _MACH_TRAPS_H
+
+static inline void clear_mem_error(unsigned char reason)
+{
+       reason = (reason & 0xf) | 4;
+       outb(reason, 0x61);
+}
+
+static inline unsigned char get_nmi_reason(void)
+{
+       return inb(0x61);
+}
+
+static inline void reassert_nmi(void)
+{
+       outb(0x8f, 0x70);
+       inb(0x71);              /* dummy */
+       outb(0x0f, 0x70);
+       inb(0x71);              /* dummy */
+}
+
+#endif /* !_MACH_TRAPS_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/pci-functions.h
new file mode 100644 (file)
index 0000000..ed0bab4
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ *     PCI BIOS function numbering for conventional PCI BIOS 
+ *     systems
+ */
+
+#define PCIBIOS_PCI_FUNCTION_ID        0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT       0xb101
+#define PCIBIOS_FIND_PCI_DEVICE                0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE    0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
+#define PCIBIOS_READ_CONFIG_BYTE       0xb108
+#define PCIBIOS_READ_CONFIG_WORD       0xb109
+#define PCIBIOS_READ_CONFIG_DWORD      0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE      0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD      0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD     0xb10d
+#define PCIBIOS_GET_ROUTING_OPTIONS    0xb10e
+#define PCIBIOS_SET_PCI_HW_INT         0xb10f
+
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
new file mode 100644 (file)
index 0000000..8f6c7b2
--- /dev/null
@@ -0,0 +1,45 @@
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ *
+ * Description:
+ *     This is included late in kernel/setup.c so that it can make
+ *     use of all of the static functions.
+ **/
+
+static char * __init machine_specific_memory_setup(void)
+{
+       char *who;
+       unsigned long start_pfn, max_pfn;
+
+       who = "Xen";
+
+       start_pfn = 0;
+       max_pfn = xen_start_info.nr_pages;
+
+       e820.nr_map = 0;
+       add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
+
+       return who;
+}
+
+void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
+{
+       clear_bit(X86_FEATURE_VME, c->x86_capability);
+       clear_bit(X86_FEATURE_DE, c->x86_capability);
+       clear_bit(X86_FEATURE_PSE, c->x86_capability);
+       clear_bit(X86_FEATURE_PGE, c->x86_capability);
+       clear_bit(X86_FEATURE_MTRR, c->x86_capability);
+       clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+}
+
+extern void hypervisor_callback(void);
+extern void failsafe_callback(void);
+
+static void __init machine_specific_arch_setup(void)
+{
+       HYPERVISOR_set_callbacks(
+           __KERNEL_CS, (unsigned long)hypervisor_callback,
+           __KERNEL_CS, (unsigned long)failsafe_callback);
+
+       machine_specific_modify_cpu_capabilities(&boot_cpu_data);
+}
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
new file mode 100644 (file)
index 0000000..b18df68
--- /dev/null
@@ -0,0 +1,5 @@
+/* Hook to call BIOS initialisation function */
+
+#define ARCH_SETUP machine_specific_arch_setup();
+
+static void __init machine_specific_arch_setup(void);
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mmu_context.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
new file mode 100644 (file)
index 0000000..159310f
--- /dev/null
@@ -0,0 +1,75 @@
+#ifndef __I386_SCHED_H
+#define __I386_SCHED_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Used for LDT copy/destruction.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+       unsigned cpu = smp_processor_id();
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+                            struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       int cpu = smp_processor_id();
+
+       if (likely(prev != next)) {
+               /* stop flush ipis for the previous mm */
+               cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
+               per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
+               cpu_set(cpu, next->cpu_vm_mask);
+
+               /* Re-load page tables */
+               load_cr3(next->pgd);
+
+               /*
+                * load the LDT, if the LDT is different:
+                */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_LDT_nolock(&next->context, cpu);
+       }
+#ifdef CONFIG_SMP
+       else {
+               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
+               BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
+
+               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+                       /* We were in lazy tlb mode and leave_mm disabled 
+                        * tlb flush IPI delivery. We must reload %cr3.
+                        */
+                       load_cr3(next->pgd);
+                       load_LDT_nolock(&next->context, cpu);
+               }
+       }
+#endif
+}
+
+#define deactivate_mm(tsk, mm) \
+       asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+
+#define activate_mm(prev, next) \
+do { \
+       switch_mm((prev),(next),NULL); \
+       flush_page_update_queue();                      \
+} while ( 0 )
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h
new file mode 100644 (file)
index 0000000..4cc4f31
--- /dev/null
@@ -0,0 +1,272 @@
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+#include <linux/smp.h>
+#include <asm-xen/hypervisor.h>
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(_msr,_val1,_val2) do { \
+       dom0_op_t op; \
+       op.cmd = DOM0_MSR; \
+       op.u.msr.write = 0; \
+       op.u.msr.msr = (_msr); \
+       op.u.msr.cpu_mask = (1 << smp_processor_id()); \
+       HYPERVISOR_dom0_op(&op); \
+       (_val1) = op.u.msr.out1; \
+       (_val2) = op.u.msr.out2; \
+} while(0)
+
+#define wrmsr(_msr,_val1,_val2) do { \
+       dom0_op_t op; \
+       op.cmd = DOM0_MSR; \
+       op.u.msr.write = 1; \
+       op.u.msr.cpu_mask = (1 << smp_processor_id()); \
+       op.u.msr.msr = (_msr); \
+       op.u.msr.in1 = (_val1); \
+       op.u.msr.in2 = (_val2); \
+       HYPERVISOR_dom0_op(&op); \
+} while(0)
+
+#define rdmsrl(msr,val) do { \
+       unsigned long l__,h__; \
+       rdmsr (msr, l__, h__);  \
+       val = l__;  \
+       val |= ((u64)h__<<32);  \
+} while(0)
+
+static inline void wrmsrl (unsigned long msr, unsigned long long val)
+{
+       unsigned long lo, hi;
+       lo = (unsigned long) val;
+       hi = val >> 32;
+       wrmsr (msr, lo, hi);
+}
+
+#define rdtsc(low,high) \
+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+     __asm__ __volatile__("rdpmc" \
+                         : "=a" (low), "=d" (high) \
+                         : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR            0
+#define MSR_IA32_P5_MC_TYPE            1
+#define MSR_IA32_PLATFORM_ID           0x17
+#define MSR_IA32_EBL_CR_POWERON                0x2a
+
+#define MSR_IA32_APICBASE              0x1b
+#define MSR_IA32_APICBASE_BSP          (1<<8)
+#define MSR_IA32_APICBASE_ENABLE       (1<<11)
+#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE           0x79
+#define MSR_IA32_UCODE_REV             0x8b
+
+#define MSR_P6_PERFCTR0                0xc1
+#define MSR_P6_PERFCTR1                0xc2
+
+#define MSR_IA32_BBL_CR_CTL            0x119
+
+#define MSR_IA32_SYSENTER_CS           0x174
+#define MSR_IA32_SYSENTER_ESP          0x175
+#define MSR_IA32_SYSENTER_EIP          0x176
+
+#define MSR_IA32_MCG_CAP               0x179
+#define MSR_IA32_MCG_STATUS            0x17a
+#define MSR_IA32_MCG_CTL               0x17b
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX               0x180
+#define MSR_IA32_MCG_EBX               0x181
+#define MSR_IA32_MCG_ECX               0x182
+#define MSR_IA32_MCG_EDX               0x183
+#define MSR_IA32_MCG_ESI               0x184
+#define MSR_IA32_MCG_EDI               0x185
+#define MSR_IA32_MCG_EBP               0x186
+#define MSR_IA32_MCG_ESP               0x187
+#define MSR_IA32_MCG_EFLAGS            0x188
+#define MSR_IA32_MCG_EIP               0x189
+#define MSR_IA32_MCG_RESERVED          0x18A
+
+#define MSR_P6_EVNTSEL0                        0x186
+#define MSR_P6_EVNTSEL1                        0x187
+
+#define MSR_IA32_PERF_STATUS           0x198
+#define MSR_IA32_PERF_CTL              0x199
+
+#define MSR_IA32_THERM_CONTROL         0x19a
+#define MSR_IA32_THERM_INTERRUPT       0x19b
+#define MSR_IA32_THERM_STATUS          0x19c
+#define MSR_IA32_MISC_ENABLE           0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR           0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP      0x1db
+#define MSR_IA32_LASTBRANCHTOIP                0x1dc
+#define MSR_IA32_LASTINTFROMIP         0x1dd
+#define MSR_IA32_LASTINTTOIP           0x1de
+
+#define MSR_IA32_MC0_CTL               0x400
+#define MSR_IA32_MC0_STATUS            0x401
+#define MSR_IA32_MC0_ADDR              0x402
+#define MSR_IA32_MC0_MISC              0x403
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0            0x300
+#define MSR_P4_BPU_PERFCTR1            0x301
+#define MSR_P4_BPU_PERFCTR2            0x302
+#define MSR_P4_BPU_PERFCTR3            0x303
+#define MSR_P4_MS_PERFCTR0             0x304
+#define MSR_P4_MS_PERFCTR1             0x305
+#define MSR_P4_MS_PERFCTR2             0x306
+#define MSR_P4_MS_PERFCTR3             0x307
+#define MSR_P4_FLAME_PERFCTR0          0x308
+#define MSR_P4_FLAME_PERFCTR1          0x309
+#define MSR_P4_FLAME_PERFCTR2          0x30a
+#define MSR_P4_FLAME_PERFCTR3          0x30b
+#define MSR_P4_IQ_PERFCTR0             0x30c
+#define MSR_P4_IQ_PERFCTR1             0x30d
+#define MSR_P4_IQ_PERFCTR2             0x30e
+#define MSR_P4_IQ_PERFCTR3             0x30f
+#define MSR_P4_IQ_PERFCTR4             0x310
+#define MSR_P4_IQ_PERFCTR5             0x311
+#define MSR_P4_BPU_CCCR0               0x360
+#define MSR_P4_BPU_CCCR1               0x361
+#define MSR_P4_BPU_CCCR2               0x362
+#define MSR_P4_BPU_CCCR3               0x363
+#define MSR_P4_MS_CCCR0                0x364
+#define MSR_P4_MS_CCCR1                0x365
+#define MSR_P4_MS_CCCR2                0x366
+#define MSR_P4_MS_CCCR3                0x367
+#define MSR_P4_FLAME_CCCR0             0x368
+#define MSR_P4_FLAME_CCCR1             0x369
+#define MSR_P4_FLAME_CCCR2             0x36a
+#define MSR_P4_FLAME_CCCR3             0x36b
+#define MSR_P4_IQ_CCCR0                0x36c
+#define MSR_P4_IQ_CCCR1                0x36d
+#define MSR_P4_IQ_CCCR2                0x36e
+#define MSR_P4_IQ_CCCR3                0x36f
+#define MSR_P4_IQ_CCCR4                0x370
+#define MSR_P4_IQ_CCCR5                0x371
+#define MSR_P4_ALF_ESCR0               0x3ca
+#define MSR_P4_ALF_ESCR1               0x3cb
+#define MSR_P4_BPU_ESCR0               0x3b2
+#define MSR_P4_BPU_ESCR1               0x3b3
+#define MSR_P4_BSU_ESCR0               0x3a0
+#define MSR_P4_BSU_ESCR1               0x3a1
+#define MSR_P4_CRU_ESCR0               0x3b8
+#define MSR_P4_CRU_ESCR1               0x3b9
+#define MSR_P4_CRU_ESCR2               0x3cc
+#define MSR_P4_CRU_ESCR3               0x3cd
+#define MSR_P4_CRU_ESCR4               0x3e0
+#define MSR_P4_CRU_ESCR5               0x3e1
+#define MSR_P4_DAC_ESCR0               0x3a8
+#define MSR_P4_DAC_ESCR1               0x3a9
+#define MSR_P4_FIRM_ESCR0              0x3a4
+#define MSR_P4_FIRM_ESCR1              0x3a5
+#define MSR_P4_FLAME_ESCR0             0x3a6
+#define MSR_P4_FLAME_ESCR1             0x3a7
+#define MSR_P4_FSB_ESCR0               0x3a2
+#define MSR_P4_FSB_ESCR1               0x3a3
+#define MSR_P4_IQ_ESCR0                0x3ba
+#define MSR_P4_IQ_ESCR1                0x3bb
+#define MSR_P4_IS_ESCR0                0x3b4
+#define MSR_P4_IS_ESCR1                0x3b5
+#define MSR_P4_ITLB_ESCR0              0x3b6
+#define MSR_P4_ITLB_ESCR1              0x3b7
+#define MSR_P4_IX_ESCR0                0x3c8
+#define MSR_P4_IX_ESCR1                0x3c9
+#define MSR_P4_MOB_ESCR0               0x3aa
+#define MSR_P4_MOB_ESCR1               0x3ab
+#define MSR_P4_MS_ESCR0                0x3c0
+#define MSR_P4_MS_ESCR1                0x3c1
+#define MSR_P4_PMH_ESCR0               0x3ac
+#define MSR_P4_PMH_ESCR1               0x3ad
+#define MSR_P4_RAT_ESCR0               0x3bc
+#define MSR_P4_RAT_ESCR1               0x3bd
+#define MSR_P4_SAAT_ESCR0              0x3ae
+#define MSR_P4_SAAT_ESCR1              0x3af
+#define MSR_P4_SSU_ESCR0               0x3be
+#define MSR_P4_SSU_ESCR1               0x3bf    /* guess: not defined in manual */
+#define MSR_P4_TBPU_ESCR0              0x3c2
+#define MSR_P4_TBPU_ESCR1              0x3c3
+#define MSR_P4_TC_ESCR0                0x3c4
+#define MSR_P4_TC_ESCR1                0x3c5
+#define MSR_P4_U2L_ESCR0               0x3b0
+#define MSR_P4_U2L_ESCR1               0x3b1
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER                    0xC0000080
+#define MSR_K6_STAR                    0xC0000081
+#define MSR_K6_WHCR                    0xC0000082
+#define MSR_K6_UWCCR                   0xC0000085
+#define MSR_K6_EPMR                    0xC0000086
+#define MSR_K6_PSOR                    0xC0000087
+#define MSR_K6_PFIR                    0xC0000088
+
+#define MSR_K7_EVNTSEL0                        0xC0010000
+#define MSR_K7_EVNTSEL1                        0xC0010001
+#define MSR_K7_EVNTSEL2                        0xC0010002
+#define MSR_K7_EVNTSEL3                        0xC0010003
+#define MSR_K7_PERFCTR0                        0xC0010004
+#define MSR_K7_PERFCTR1                        0xC0010005
+#define MSR_K7_PERFCTR2                        0xC0010006
+#define MSR_K7_PERFCTR3                        0xC0010007
+#define MSR_K7_HWCR                    0xC0010015
+#define MSR_K7_CLK_CTL                 0xC001001b
+#define MSR_K7_FID_VID_CTL             0xC0010041
+#define MSR_K7_FID_VID_STATUS          0xC0010042
+
+/* extended feature register */
+#define MSR_EFER                       0xc0000080
+
+/* EFER bits: */
+
+/* Execute Disable enable */
+#define _EFER_NX                       11
+#define EFER_NX                                (1<<_EFER_NX)
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1                   0x107
+#define MSR_IDT_FCR2                   0x108
+#define MSR_IDT_FCR3                   0x109
+#define MSR_IDT_FCR4                   0x10a
+
+#define MSR_IDT_MCR0                   0x110
+#define MSR_IDT_MCR1                   0x111
+#define MSR_IDT_MCR2                   0x112
+#define MSR_IDT_MCR3                   0x113
+#define MSR_IDT_MCR4                   0x114
+#define MSR_IDT_MCR5                   0x115
+#define MSR_IDT_MCR6                   0x116
+#define MSR_IDT_MCR7                   0x117
+#define MSR_IDT_MCR_CTRL               0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR                    0x1107
+#define MSR_VIA_LONGHAUL               0x110a
+#define MSR_VIA_RNG                    0x110b
+#define MSR_VIA_BCR2                   0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL          0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS         0x80868011
+#define MSR_TMTA_LRTI_READOUT          0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ         0x8086801a
+
+#endif /* __ASM_MSR_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/page.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/page.h
new file mode 100644 (file)
index 0000000..34e8f7f
--- /dev/null
@@ -0,0 +1,214 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT     12
+#define PAGE_SIZE      (1UL << PAGE_SHIFT)
+#define PAGE_MASK      (~(PAGE_SIZE-1))
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+#define clear_page(page)       mmx_clear_page((void *)(page))
+#define copy_page(to,from)     mmx_copy_page(to,from)
+
+#else
+
+/*
+ *     On older X86 processors it's not a win to use MMX here it seems.
+ *     Maybe the K6-III ?
+ */
+#define clear_page(page)       memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from)     memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#endif
+
+#define clear_user_page(page, vaddr, pg)       clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern unsigned long *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+    unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+    machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+    return machine;
+}
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+    unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+    phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+    return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#ifdef CONFIG_X86_PAE
+extern unsigned long long __supported_pte_mask;
+extern int nx_enabled;
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long long pgd; } pgd_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
+#define pte_val(x)     ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define HPAGE_SHIFT    21
+#else
+#define nx_enabled 0
+typedef struct { unsigned long pte_low; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define boot_pte_t pte_t /* or would you rather have a typedef */
+#if 0                          /* XXXcl for MMU_UPDATE_DEBUG */
+static inline unsigned long pte_val(pte_t x)
+{
+       unsigned long ret = x.pte_low;
+       if ( (ret & 1) ) ret = machine_to_phys(ret);
+       return ret;
+}
+#else
+#define pte_val(x)     (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : (x).pte_low)
+#endif
+#define pte_val_ma(x)  ((x).pte_low)
+#define HPAGE_SHIFT    22
+#endif
+#define PTE_MASK       PAGE_MASK
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE     ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK     (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+#endif
+
+
+static inline unsigned long pmd_val(pmd_t x)
+{
+    unsigned long ret = x.pmd;
+    if ( (ret) ) ret = machine_to_phys(ret);
+    return ret;
+}
+#define pgd_val(x)     ({ BUG(); (unsigned long)0; })
+#define pgprot_val(x)  ((x).pgprot)
+
+static inline pte_t __pte(unsigned long x)
+{
+       if ( (x & 1) ) x = phys_to_machine(x);
+       return ((pte_t) { (x) });
+}
+#define __pte_ma(x) ((pte_t) { (x) } )
+static inline pmd_t __pmd(unsigned long x)
+{
+       if ( (x & 1) ) x = phys_to_machine(x);
+       return ((pmd_t) { (x) });
+}
+#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
+#define __pgprot(x)    ((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)       (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB. 
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+extern unsigned int __VMALLOC_RESERVE;
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+       int order;
+
+       size = (size-1) >> (PAGE_SHIFT-1);
+       order = -1;
+       do {
+               size >>= 1;
+               order++;
+       } while (size);
+       return order;
+}
+
+extern int sysctl_legacy_va_layout;
+
+#endif /* __ASSEMBLY__ */
+
+/* 
+ * XXXcl two options for PAGE_OFFSET
+ * - 0xC0000000:
+ *   change text offset in arch/xen/i386/kernel/vmlinux.lds.S
+ *   change __pa/__va macros
+ * - 0xC0100000:
+ *   change TASK_SIZE 
+ */
+#ifdef __ASSEMBLY__
+#define __PAGE_OFFSET          (0xC0000000)
+#else
+#define __PAGE_OFFSET          (0xC0000000UL)
+#endif
+
+
+#define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
+#define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
+#define MAXMEM                 (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn)       (mem_map + (pfn))
+#define page_to_pfn(page)      ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn)         ((pfn) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS \
+       (VM_READ | VM_WRITE | \
+       ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a)    (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m)    (__va(machine_to_phys(_m)))
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/param.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/param.h
new file mode 100644 (file)
index 0000000..658b29e
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ            100/*  0 */             /* Internal kernel timer frequency */
+# define USER_HZ       100             /* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC                (USER_HZ)       /* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE  4096
+
+#ifndef NOGROUP
+#define NOGROUP                (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64      /* max length of hostname */
+#define COMMAND_LINE_SIZE 256
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pci.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pci.h
new file mode 100644 (file)
index 0000000..32d08ad
--- /dev/null
@@ -0,0 +1,117 @@
+#ifndef __i386_PCI_H
+#define __i386_PCI_H
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+#include <linux/mm.h>          /* for struct page */
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+   already-configured bus numbers - to be used for buggy BIOSes
+   or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses()    0
+#endif
+#define pcibios_scan_all_fns(a, b)     0
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO         0x1000
+#define PCIBIOS_MIN_MEM                (pci_mem_start)
+
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+/* Dynamic DMA mapping stuff.
+ * i386 has everything mapped statically.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* The PCI address space does equal the physical memory
+ * address space.  The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS    (1)
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME)         (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)        do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME)           (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)  do { } while (0)
+
+/* This is always fine. */
+#define pci_dac_dma_supported(pci_dev, mask)   (1)
+
+static inline dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
+{
+       return ((dma64_addr_t) page_to_phys(page) +
+               (dma64_addr_t) offset);
+}
+
+static inline struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+       return pfn_to_page(dma_addr >> PAGE_SHIFT);
+}
+
+static inline unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+       return (dma_addr & ~PAGE_MASK);
+}
+
+static inline void
+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+}
+
+static inline void
+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+       flush_write_buffers();
+}
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                              enum pci_mmap_state mmap_state, int write_combine);
+
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* implement the pci_ DMA API in terms of the generic device dma_ one */
+#include <asm-generic/pci-dma-compat.h>
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+/* On Xen we have to scan all functions since Xen hides bridges from
+ * us.  If a bridge is at fn=0 and that slot has a multifunction
+ * device, we won't find the additional devices without scanning all
+ * functions. */
+#undef pcibios_scan_all_fns
+#define pcibios_scan_all_fns(a, b)     1
+
+#endif /* __i386_PCI_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgalloc.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
new file mode 100644 (file)
index 0000000..77d16e3
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef _I386_PGALLOC_H
+#define _I386_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+#include <linux/mm.h>          /* for struct page */
+#include <asm/io.h>            /* for phys_to_virt and page_to_pseudophys */
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+               set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+{
+       set_pmd(pmd, __pmd(_PAGE_TABLE +
+               ((unsigned long long)page_to_pfn(pte) <<
+                       (unsigned long long) PAGE_SHIFT)));
+       flush_page_update_queue();
+       /* XXXcl queue */
+}
+/*
+ * Allocate and free page tables.
+ */
+
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(pgd_t *pgd);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+       free_page((unsigned long)pte);
+       __make_page_writable(pte);
+       flush_page_update_queue();
+}
+
+extern void pte_free(struct page *pte);
+
+#define __pte_free_tlb(tlb,pte)        tlb_remove_page((tlb),(pte))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ * (In the PAE case we free the pmds as part of the pgd.)
+ */
+
+#define pmd_alloc_one(mm, addr)                ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x)                    do { } while (0)
+#define __pmd_free_tlb(tlb,x)          do { } while (0)
+#define pgd_populate(mm, pmd, pte)     BUG()
+
+#define check_pgt_cache()      do { } while (0)
+
+int direct_remap_area_pages(struct mm_struct *mm,
+                            unsigned long address, 
+                            unsigned long machine_addr,
+                            unsigned long size, 
+                            pgprot_t prot,
+                            domid_t  domid);
+int __direct_remap_area_pages(struct mm_struct *mm,
+                             unsigned long address, 
+                             unsigned long size, 
+                             mmu_update_t *v);
+
+#endif /* _I386_PGALLOC_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
new file mode 100644 (file)
index 0000000..2afc4fb
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+#define _I386_PGTABLE_2LEVEL_DEFS_H
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT    22
+#define PTRS_PER_PGD   1024
+#define PTRS_PER_PGD_NO_HV     (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+#define PMD_SHIFT      22
+#define PTRS_PER_PMD   1
+
+#define PTRS_PER_PTE   1024
+
+#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
new file mode 100644 (file)
index 0000000..aa23bd9
--- /dev/null
@@ -0,0 +1,140 @@
+#ifndef _I386_PGTABLE_2LEVEL_H
+#define _I386_PGTABLE_2LEVEL_H
+
+#define pte_ERROR(e) \
+       printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pmd_ERROR(e) \
+       printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+       printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)          { return 0; }
+static inline int pgd_bad(pgd_t pgd)           { return 0; }
+static inline int pgd_present(pgd_t pgd)       { return 1; }
+#define pgd_clear(xp)                          do { } while (0)
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_atomic(pteptr, pteval) (*(pteptr) = pteval)
+#else
+#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pte_atomic(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
+#endif
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval).pmd)
+#define set_pgd(pgdptr, pgdval) ((void)0)
+
+#define pgd_page(pgd) \
+((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+       return (pmd_t *) dir;
+}
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because Xen Linux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+       pte_t pte = *xp;
+       if (pte.pte_low)
+               set_pte(xp, __pte_ma(0));
+       return pte;
+}
+
+#define pte_same(a, b)         ((a).pte_low == (b).pte_low)
+/*                                 
+ * We detect special mappings in one of two ways:
+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
+ *     to be outside our maximum possible pseudophys range.
+ *  2. If the MFN belongs to a different domain then we will certainly
+ *     not have MFN in our p2m table. Conversely, if the page is ours,
+ *     then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ * 
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ *      require. In all the cases we care about, the high bit gets shifted out
+ *      (e.g., phys_to_machine()) so behaviour there is correct.
+ */
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#define pte_pfn(_pte)                                                   \
+({                                                                      \
+    unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;                   \
+    unsigned long pfn = mfn_to_pfn(mfn);                                \
+    if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) )               \
+        pfn = max_mapnr; /* special: force !pfn_valid() */              \
+    pfn;                                                                \
+})
+
+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
+
+#define pte_none(x)            (!(x).pte_low)
+
+#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte_ma(pfn, prot)  __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+/*
+ * All present user pages are user-executable:
+ */
+static inline int pte_exec(pte_t pte)
+{
+       return pte_user(pte);
+}
+
+/*
+ * All present pages are kernel-executable:
+ */
+static inline int pte_exec_kernel(pte_t pte)
+{
+       return 1;
+}
+
+/*
+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
+ * into this range:
+ */
+#define PTE_FILE_MAX_BITS      29
+
+#define pte_to_pgoff(pte) \
+       ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+
+#define pgoff_to_pte(off) \
+       ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x)                  (((x).val >> 1) & 0x1f)
+#define __swp_offset(x)                        ((x).val >> 8)
+#define __swp_entry(type, offset)      ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { (pte).pte_low })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
+
+#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h
new file mode 100644 (file)
index 0000000..67325a0
--- /dev/null
@@ -0,0 +1,549 @@
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern unsigned long empty_zero_page[1024];
+extern pgd_t swapper_pg_dir[1024];
+extern kmem_cache_t *pgd_cache;
+extern kmem_cache_t *pmd_cache;
+extern kmem_cache_t *pte_cache;
+extern spinlock_t pgd_lock;
+extern struct page *pgd_list;
+
+void pte_ctor(void *, kmem_cache_t *, unsigned long);
+void pte_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pgtable_cache_init(void);
+void paging_init(void);
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level-defs.h>
+#else
+# include <asm/pgtable-2level-defs.h>
+#endif
+
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR      0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT   22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define VMALLOC_START  (((unsigned long) high_memory + vmalloc_earlyreserve + \
+                       2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END   (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END   (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+extern void * high_memory;
+extern unsigned long vmalloc_earlyreserve;
+
+/*
+ * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
+ * of the Pentium details, but assuming intel did the straightforward
+ * thing, this bit set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory. 
+ */
+#define _PAGE_BIT_PRESENT      0
+#define _PAGE_BIT_RW           1
+#define _PAGE_BIT_USER         2
+#define _PAGE_BIT_PWT          3
+#define _PAGE_BIT_PCD          4
+#define _PAGE_BIT_ACCESSED     5
+#define _PAGE_BIT_DIRTY                6
+#define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1      9       /* available for programmer */
+#define _PAGE_BIT_UNUSED2      10
+#define _PAGE_BIT_UNUSED3      11
+#define _PAGE_BIT_NX           63
+
+#define _PAGE_PRESENT  0x001
+#define _PAGE_RW       0x002
+#define _PAGE_USER     0x004
+#define _PAGE_PWT      0x008
+#define _PAGE_PCD      0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY    0x040
+#define _PAGE_PSE      0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL   0x100   /* Global TLB entry PPro+ */
+#define _PAGE_UNUSED1  0x200   /* available for programmer */
+#define _PAGE_UNUSED2  0x400
+#define _PAGE_UNUSED3  0x800
+
+#define _PAGE_FILE     0x040   /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x080   /* If not present */
+#ifdef CONFIG_X86_PAE
+#define _PAGE_NX       (1ULL<<_PAGE_BIT_NX)
+#else
+#define _PAGE_NX       0
+#endif
+
+#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE \
+       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED \
+       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+
+#define PAGE_SHARED_EXEC \
+       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC \
+       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC \
+       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY \
+       PAGE_COPY_NOEXEC
+#define PAGE_READONLY \
+       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC \
+       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define _PAGE_KERNEL \
+       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_KERNEL_EXEC \
+       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
+#define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define PAGE_KERNEL            __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC       __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+#undef TEST_VERIFY_AREA
+
+/* The boot page tables (all created as a single array) */
+extern unsigned long pg0[];
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp)  do { set_pte(xp, __pte(0)); } while (0)
+
+#define pmd_none(x)    (!pmd_val(x))
+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+   can temporarily clear it. */
+#define pmd_present(x) (pmd_val(x))
+/* pmd_clear below */
+#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_user(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_read(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte)         { return (pte).pte_low & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)         { return (pte).pte_low & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte)         { return (pte).pte_low & _PAGE_RW; }
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+static inline int pte_file(pte_t pte)          { return (pte).pte_low & _PAGE_FILE; }
+
+static inline pte_t pte_rdprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_exprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_mkclean(pte_t pte)     { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte)       { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_RW; return pte; }
+static inline pte_t pte_mkread(pte_t pte)      { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte)      { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)     { (pte).pte_low |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)     { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte_low |= _PAGE_RW; return pte; }
+
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+#else
+# include <asm/pgtable-2level.h>
+#endif
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       int ret = pte_dirty(pte);
+       if (ret)
+               xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
+       return ret;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       int ret = pte_young(pte);
+       if (ret)
+               xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
+       return ret;
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       if (pte_write(pte))
+               set_pte(ptep, pte_wrprotect(pte));
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       if (!pte_dirty(pte))
+               xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
+ * it, this is a no-op.
+ */
+#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3)                                          \
+                                ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       pte.pte_low &= _PAGE_CHG_MASK;
+       pte.pte_low |= pgprot_val(newprot);
+#ifdef CONFIG_X86_PAE
+       /*
+        * Chop off the NX bit (if present), and add the NX portion of
+        * the newprot (if present):
+        */
+       pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+       pte.pte_high |= (pgprot_val(newprot) >> 32) & \
+                                       (__supported_pte_mask >> 32);
+#endif
+       return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_page_kernel(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#define pmd_clear(xp)  do {                                    \
+       set_pmd(xp, __pmd(0));                                  \
+       xen_flush_page_update_queue();                          \
+} while (0)
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define pmd_large(pmd) \
+       ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+/*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_index(address) \
+               (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+               (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+       ((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address);
+
+/*
+ * Make a given kernel text page executable/non-executable.
+ * Returns the previous executability setting of that page (which
+ * is used to restore the previous state). Used by the SMP bootup code.
+ * NOTE: this is an __init function for security reasons.
+ */
+#ifdef CONFIG_X86_PAE
+ extern int set_kernel_exec(unsigned long vaddr, int enable);
+#else
+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+       ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
+        pte_index(address))
+#define pte_offset_map_nested(dir, address) \
+       ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
+        pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#else
+#define pte_offset_map(dir, address) \
+       ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#endif
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Also, we only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+
+#if 0
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+       do {                                                              \
+               if (__dirty) {                                            \
+                       queue_l1_entry_update((__ptep), (__entry).pte_low); \
+                       flush_tlb_page(__vma, __address);                 \
+                       xen_flush_page_update_queue();                    \
+               }                                                         \
+       } while (0)
+#else
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+       do {                                                              \
+               if (__dirty) {                                            \
+                       if ( likely(vma->vm_mm == current->mm) ) {        \
+                           xen_flush_page_update_queue();                \
+                           HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, entry, UVMF_INVLPG); \
+                       } else {                                          \
+                            xen_l1_entry_update((__ptep), (__entry).pte_low); \
+                           flush_tlb_page(__vma, __address);             \
+                       }                                                 \
+               }                                                         \
+       } while (0)
+
+#endif
+
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(__vma, __address, __ptep, __entry)              \
+do {                                                                   \
+       ptep_set_access_flags(__vma, __address, __ptep, __entry, 1);    \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
+#define ptep_establish_new(__vma, __address, __ptep, __entry)          \
+do {                                                                   \
+       if ( likely((__vma)->vm_mm == current->mm) ) {                  \
+               xen_flush_page_update_queue();                          \
+               HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT,   \
+                                            __entry, 0);               \
+       } else {                                                        \
+               xen_l1_entry_update((__ptep), (__entry).pte_low);       \
+       }                                                               \
+} while (0)
+
+/* NOTE: make_page* callers must call flush_page_update_queue() */
+static inline void __make_page_readonly(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+}
+
+static inline void __make_page_writable(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+}
+
+static inline void make_page_readonly(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+       if ( (unsigned long)va >= VMALLOC_START )
+               __make_page_readonly(machine_to_virt(
+                       *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_page_writable(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+       if ( (unsigned long)va >= VMALLOC_START )
+               __make_page_writable(machine_to_virt(
+                       *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_pages_readonly(void *va, unsigned int nr)
+{
+       while ( nr-- != 0 )
+       {
+               make_page_readonly(va);
+               va = (void *)((unsigned long)va + PAGE_SIZE);
+       }
+}
+
+static inline void make_pages_writable(void *va, unsigned int nr)
+{
+       while ( nr-- != 0 )
+       {
+               make_page_writable(va);
+               va = (void *)((unsigned long)va + PAGE_SIZE);
+       }
+}
+
+static inline unsigned long arbitrary_virt_to_phys(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
+       return pa | ((unsigned long)va & (PAGE_SIZE-1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_DISCONTIGMEM
+#define kern_addr_valid(addr)  (1)
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define io_remap_page_range remap_page_range
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _I386_PGTABLE_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/processor.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/processor.h
new file mode 100644 (file)
index 0000000..e1aaf00
--- /dev/null
@@ -0,0 +1,673 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+#include <asm/system.h>
+#include <linux/cache.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/percpu.h>
+
+/* flag for disabling the tsc */
+extern int tsc_disable;
+
+struct desc_struct {
+       unsigned long a,b;
+};
+
+#define desc_empty(desc) \
+               (!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+
+/*
+ *  CPU type and hardware bug flags. Kept separately for each CPU.
+ *  Members of this structure are referenced in head.S, so think twice
+ *  before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+       __u8    x86;            /* CPU family */
+       __u8    x86_vendor;     /* CPU vendor */
+       __u8    x86_model;
+       __u8    x86_mask;
+       char    wp_works_ok;    /* It doesn't on 386's */
+       char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
+       char    hard_math;
+       char    rfu;
+               int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
+       unsigned long   x86_capability[NCAPINTS];
+       char    x86_vendor_id[16];
+       char    x86_model_id[64];
+       int     x86_cache_size;  /* in KB - valid for CPUS which support this
+                                   call  */
+       int     x86_cache_alignment;    /* In bytes */
+       int     fdiv_bug;
+       int     f00f_bug;
+       int     coma_bug;
+       unsigned long loops_per_jiffy;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_NUM 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 new_cpu_data;
+extern struct tss_struct doublefault_tss;
+DECLARE_PER_CPU(struct tss_struct, init_tss);
+extern pgd_t *cur_pgd;         /* XXXsmp */
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_fpu_irq;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void dodgy_tsc(void);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF  0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF  0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF  0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF  0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF  0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF  0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF  0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF  0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF  0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL        0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT  0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF  0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM  0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC  0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID  0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ */
+static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+       __asm__("cpuid"
+               : "=a" (*eax),
+                 "=b" (*ebx),
+                 "=c" (*ecx),
+                 "=d" (*edx)
+               : "0" (op));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+       unsigned int eax;
+
+       __asm__("cpuid"
+               : "=a" (eax)
+               : "0" (op)
+               : "bx", "cx", "dx");
+       return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+       unsigned int eax, ebx;
+
+       __asm__("cpuid"
+               : "=a" (eax), "=b" (ebx)
+               : "0" (op)
+               : "cx", "dx" );
+       return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+       unsigned int eax, ecx;
+
+       __asm__("cpuid"
+               : "=a" (eax), "=c" (ecx)
+               : "0" (op)
+               : "bx", "dx" );
+       return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+       unsigned int eax, edx;
+
+       __asm__("cpuid"
+               : "=a" (eax), "=d" (edx)
+               : "0" (op)
+               : "bx", "cx");
+       return edx;
+}
+
+#define load_cr3(pgdir) do {                           \
+       queue_pt_switch(__pa(pgdir));                   \
+       cur_pgd = pgdir;        /* XXXsmp */            \
+} while (/* CONSTCOND */0)
+
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME            0x0001  /* enable vm86 extensions */
+#define X86_CR4_PVI            0x0002  /* virtual interrupts flag enable */
+#define X86_CR4_TSD            0x0004  /* disable time stamp at ipl 3 */
+#define X86_CR4_DE             0x0008  /* enable debugging extensions */
+#define X86_CR4_PSE            0x0010  /* enable page size extensions */
+#define X86_CR4_PAE            0x0020  /* enable physical address extensions */
+#define X86_CR4_MCE            0x0040  /* Machine check enable */
+#define X86_CR4_PGE            0x0080  /* enable global pages */
+#define X86_CR4_PCE            0x0100  /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR         0x0200  /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT     0x0400  /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+       mmu_cr4_features |= mask;
+       switch (mask) {
+       case X86_CR4_OSFXSR:
+       case X86_CR4_OSXMMEXCPT:
+               break;
+       default:
+               do {
+                       const char *msg = "Xen unsupported cr4 update\n";
+                       (void)HYPERVISOR_console_io(
+                               CONSOLEIO_write, __builtin_strlen(msg),
+                               (char *)msg);
+                       BUG();
+               } while (0);
+       }
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+       mmu_cr4_features &= ~mask;
+       __asm__("movl %%cr4,%%eax\n\t"
+               "andl %0,%%eax\n\t"
+               "movl %%eax,%%cr4\n"
+               : : "irg" (~mask)
+               :"ax");
+}
+
+/*
+ *      NSC/Cyrix CPU configuration register indexes
+ */
+
+#define CX86_PCR0 0x20
+#define CX86_GCR  0xb8
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_PCR1 0xf0
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ *      NSC/Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+       outb((reg), 0x22); \
+       outb((data), 0x23); \
+} while (0)
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ */
+extern int MCA_bus;
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+               unsigned long edx)
+{
+       /* "monitor %eax,%ecx,%edx;" */
+       asm volatile(
+               ".byte 0x0f,0x01,0xc8;"
+               : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+       /* "mwait %eax,%ecx;" */
+       asm volatile(
+               ".byte 0x0f,0x01,0xc9;"
+               : :"a" (eax), "c" (ecx));
+}
+
+/* from system description table in BIOS.  Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE      (PAGE_OFFSET)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+/*
+ * Size of io_bitmap.
+ */
+#define IO_BITMAP_BITS  65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
+
+struct i387_fsave_struct {
+       long    cwd;
+       long    swd;
+       long    twd;
+       long    fip;
+       long    fcs;
+       long    foo;
+       long    fos;
+       long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
+       long    status;         /* software status information */
+};
+
+struct i387_fxsave_struct {
+       unsigned short  cwd;
+       unsigned short  swd;
+       unsigned short  twd;
+       unsigned short  fop;
+       long    fip;
+       long    fcs;
+       long    foo;
+       long    fos;
+       long    mxcsr;
+       long    mxcsr_mask;
+       long    st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
+       long    xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
+       long    padding[56];
+} __attribute__ ((aligned (16)));
+
+struct i387_soft_struct {
+       long    cwd;
+       long    swd;
+       long    twd;
+       long    fip;
+       long    fcs;
+       long    foo;
+       long    fos;
+       long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
+       unsigned char   ftop, changed, lookahead, no_update, rm, alimit;
+       struct info     *info;
+       unsigned long   entry_eip;
+};
+
+union i387_union {
+       struct i387_fsave_struct        fsave;
+       struct i387_fxsave_struct       fxsave;
+       struct i387_soft_struct soft;
+};
+
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct;
+
+struct tss_struct {
+       unsigned short  back_link,__blh;
+       unsigned long   esp0;
+       unsigned short  ss0,__ss0h;
+       unsigned long   esp1;
+       unsigned short  ss1,__ss1h;     /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
+       unsigned long   esp2;
+       unsigned short  ss2,__ss2h;
+       unsigned long   __cr3;
+       unsigned long   eip;
+       unsigned long   eflags;
+       unsigned long   eax,ecx,edx,ebx;
+       unsigned long   esp;
+       unsigned long   ebp;
+       unsigned long   esi;
+       unsigned long   edi;
+       unsigned short  es, __esh;
+       unsigned short  cs, __csh;
+       unsigned short  ss, __ssh;
+       unsigned short  ds, __dsh;
+       unsigned short  fs, __fsh;
+       unsigned short  gs, __gsh;
+       unsigned short  ldt, __ldth;
+       unsigned short  trace, io_bitmap_base;
+       /*
+        * The extra 1 is there because the CPU will access an
+        * additional byte beyond the end of the IO permission
+        * bitmap. The extra byte must be all 1 bits, and must
+        * be within the limit.
+        */
+       unsigned long   io_bitmap[IO_BITMAP_LONGS + 1];
+       /*
+        * Cache the current maximum and the last task that used the bitmap:
+        */
+       unsigned long io_bitmap_max;
+       struct thread_struct *io_bitmap_owner;
+       /*
+        * pads the TSS to be cacheline-aligned (size is 0x100)
+        */
+       unsigned long __cacheline_filler[35];
+       /*
+        * .. and then another 0x100 bytes for emergency kernel stack
+        */
+       unsigned long stack[64];
+} __attribute__((packed));
+
+#define ARCH_MIN_TASKALIGN     16
+
+struct thread_struct {
+/* cached TLS descriptors. */
+       struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+       unsigned long   esp0;
+       unsigned long   sysenter_cs;
+       unsigned long   eip;
+       unsigned long   esp;
+       unsigned long   fs;
+       unsigned long   gs;
+       unsigned int    io_pl;
+/* Hardware debugging registers */
+       unsigned long   debugreg[8];  /* %%db0-7 debug registers */
+/* fault info */
+       unsigned long   cr2, trap_no, error_code;
+/* floating point info */
+       union i387_union        i387;
+/* virtual 86 mode info */
+       struct vm86_struct __user * vm86_info;
+       unsigned long           screen_bitmap;
+       unsigned long           v86flags, v86mask, saved_esp0;
+       unsigned int            saved_fs, saved_gs;
+/* IO permissions */
+       unsigned long   *io_bitmap_ptr;
+/* max allowed port in the bitmap, in bytes: */
+       unsigned long   io_bitmap_max;
+};
+
+#define INIT_THREAD  {                                                 \
+       .vm86_info = NULL,                                              \
+       .sysenter_cs = __KERNEL_CS,                                     \
+       .io_bitmap_ptr = NULL,                                          \
+}
+
+/*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+#define INIT_TSS  {                                                    \
+       .esp0           = sizeof(init_stack) + (long)&init_stack,       \
+       .ss0            = __KERNEL_DS,                                  \
+       .ss1            = __KERNEL_CS,                                  \
+       .ldt            = GDT_ENTRY_LDT,                                \
+       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,                     \
+       .io_bitmap      = { [ 0 ... IO_BITMAP_LONGS] = ~0 },            \
+}
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+       tss->esp0 = thread->esp0;
+       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+       if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+               tss->ss1 = thread->sysenter_cs;
+               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+       }
+       HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
+}
+
+#define start_thread(regs, new_eip, new_esp) do {              \
+       __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));       \
+       set_fs(USER_DS);                                        \
+       regs->xds = __USER_DS;                                  \
+       regs->xes = __USER_DS;                                  \
+       regs->xss = __USER_DS;                                  \
+       regs->xcs = __USER_CS;                                  \
+       regs->eip = new_eip;                                    \
+       regs->esp = new_esp;                                    \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+void show_trace(struct task_struct *task, unsigned long *stack);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
+#define KSTK_TOP(info)                                                 \
+({                                                                     \
+       unsigned long *__ptr = (unsigned long *)(info);                 \
+       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
+})
+
+#define task_pt_regs(task)                                             \
+({                                                                     \
+       struct pt_regs *__regs__;                                       \
+       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
+       __regs__ - 1;                                                   \
+})
+
+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
+
+
+struct microcode_header {
+       unsigned int hdrver;
+       unsigned int rev;
+       unsigned int date;
+       unsigned int sig;
+       unsigned int cksum;
+       unsigned int ldrver;
+       unsigned int pf;
+       unsigned int datasize;
+       unsigned int totalsize;
+       unsigned int reserved[3];
+};
+
+struct microcode {
+       struct microcode_header hdr;
+       unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+       unsigned int sig;
+       unsigned int pf;
+       unsigned int cksum;
+};
+
+struct extended_sigtable {
+       unsigned int count;
+       unsigned int cksum;
+       unsigned int reserved[3];
+       struct extended_signature sigs[0];
+};
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE      _IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+       __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+#define cpu_relax()    rep_nop()
+
+/* generic versions from gas */
+#define GENERIC_NOP1   ".byte 0x90\n"
+#define GENERIC_NOP2           ".byte 0x89,0xf6\n"
+#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
+#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
+#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
+#define GENERIC_NOP6   ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP7   ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP8   GENERIC_NOP1 GENERIC_NOP7
+
+/* Opteron nops */
+#define K8_NOP1 GENERIC_NOP1
+#define K8_NOP2        ".byte 0x66,0x90\n" 
+#define K8_NOP3        ".byte 0x66,0x66,0x90\n" 
+#define K8_NOP4        ".byte 0x66,0x66,0x66,0x90\n" 
+#define K8_NOP5        K8_NOP3 K8_NOP2 
+#define K8_NOP6        K8_NOP3 K8_NOP3
+#define K8_NOP7        K8_NOP4 K8_NOP3
+#define K8_NOP8        K8_NOP4 K8_NOP4
+
+/* K7 nops */
+/* uses eax dependencies (arbitary choice) */
+#define K7_NOP1  GENERIC_NOP1
+#define K7_NOP2        ".byte 0x8b,0xc0\n" 
+#define K7_NOP3        ".byte 0x8d,0x04,0x20\n"
+#define K7_NOP4        ".byte 0x8d,0x44,0x20,0x00\n"
+#define K7_NOP5        K7_NOP4 ASM_NOP1
+#define K7_NOP6        ".byte 0x8d,0x80,0,0,0,0\n"
+#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
+#define K7_NOP8        K7_NOP7 ASM_NOP1
+
+#ifdef CONFIG_MK8
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+#elif defined(CONFIG_MK7)
+#define ASM_NOP1 K7_NOP1
+#define ASM_NOP2 K7_NOP2
+#define ASM_NOP3 K7_NOP3
+#define ASM_NOP4 K7_NOP4
+#define ASM_NOP5 K7_NOP5
+#define ASM_NOP6 K7_NOP6
+#define ASM_NOP7 K7_NOP7
+#define ASM_NOP8 K7_NOP8
+#else
+#define ASM_NOP1 GENERIC_NOP1
+#define ASM_NOP2 GENERIC_NOP2
+#define ASM_NOP3 GENERIC_NOP3
+#define ASM_NOP4 GENERIC_NOP4
+#define ASM_NOP5 GENERIC_NOP5
+#define ASM_NOP6 GENERIC_NOP6
+#define ASM_NOP7 GENERIC_NOP7
+#define ASM_NOP8 GENERIC_NOP8
+#endif
+
+#define ASM_NOP_MAX 8
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+   because they are microcoded there and very slow.
+   However we don't do prefetches for pre XP Athlons currently
+   That should be fixed. */
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+       alternative_input(ASM_NOP4,
+                         "prefetchnta (%1)",
+                         X86_FEATURE_XMM,
+                         "r" (x));
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for 
+   spinlocks to avoid one state transition in the cache coherency protocol. */
+extern inline void prefetchw(const void *x)
+{
+       alternative_input(ASM_NOP4,
+                         "prefetchw (%1)",
+                         X86_FEATURE_3DNOW,
+                         "r" (x));
+}
+#define spin_lock_prefetch(x)  prefetchw(x)
+
+extern void select_idle_routine(const struct cpuinfo_x86 *c);
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/ptrace.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/ptrace.h
new file mode 100644 (file)
index 0000000..2036837
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS  13
+#define EFL 14
+#define UESP 15
+#define SS   16
+#define FRAME_SIZE 17
+
+/* this struct defines the way the registers are stored on the 
+   stack during a system call. */
+
+struct pt_regs {
+       long ebx;
+       long ecx;
+       long edx;
+       long esi;
+       long edi;
+       long ebp;
+       long eax;
+       int  xds;
+       int  xes;
+       long orig_eax;
+       long eip;
+       int  xcs;
+       long eflags;
+       long esp;
+       int  xss;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS            12
+#define PTRACE_SETREGS            13
+#define PTRACE_GETFPREGS          14
+#define PTRACE_SETFPREGS          15
+#define PTRACE_GETFPXREGS         18
+#define PTRACE_SETFPXREGS         19
+
+#define PTRACE_OLDSETOPTIONS         21
+
+#define PTRACE_GET_THREAD_AREA    25
+#define PTRACE_SET_THREAD_AREA    26
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
+#define instruction_pointer(regs) ((regs)->eip)
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+#endif
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/segment.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/segment.h
new file mode 100644 (file)
index 0000000..288243f
--- /dev/null
@@ -0,0 +1,96 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/*
+ * The layout of the per-CPU GDT under Linux:
+ *
+ *   0 - null
+ *   1 - reserved
+ *   2 - reserved
+ *   3 - reserved
+ *
+ *   4 - unused                        <==== new cacheline
+ *   5 - unused
+ *
+ *  ------- start of TLS (Thread-Local Storage) segments:
+ *
+ *   6 - TLS segment #1                        [ glibc's TLS segment ]
+ *   7 - TLS segment #2                        [ Wine's %fs Win32 segment ]
+ *   8 - TLS segment #3
+ *   9 - reserved
+ *  10 - reserved
+ *  11 - reserved
+ *
+ *  ------- start of kernel segments:
+ *
+ *  12 - kernel code segment           <==== new cacheline
+ *  13 - kernel data segment
+ *  14 - default user CS
+ *  15 - default user DS
+ *  16 - TSS
+ *  17 - LDT
+ *  18 - PNPBIOS support (16->32 gate)
+ *  19 - PNPBIOS support
+ *  20 - PNPBIOS support
+ *  21 - PNPBIOS support
+ *  22 - PNPBIOS support
+ *  23 - APM BIOS support
+ *  24 - APM BIOS support
+ *  25 - APM BIOS support 
+ *
+ *  26 - unused
+ *  27 - unused
+ *  28 - unused
+ *  29 - unused
+ *  30 - unused
+ *  31 - TSS for double fault handler
+ */
+#define GDT_ENTRY_TLS_ENTRIES  3
+#define GDT_ENTRY_TLS_MIN      6
+#define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#define GDT_ENTRY_DEFAULT_USER_CS      14
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
+
+#define GDT_ENTRY_DEFAULT_USER_DS      15
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
+
+#define GDT_ENTRY_KERNEL_BASE  12
+
+#define GDT_ENTRY_KERNEL_CS            (GDT_ENTRY_KERNEL_BASE + 0)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
+
+#define GDT_ENTRY_KERNEL_DS            (GDT_ENTRY_KERNEL_BASE + 1)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
+
+#define GDT_ENTRY_TSS                  (GDT_ENTRY_KERNEL_BASE + 4)
+#define GDT_ENTRY_LDT                  (GDT_ENTRY_KERNEL_BASE + 5)
+
+#define GDT_ENTRY_PNPBIOS_BASE         (GDT_ENTRY_KERNEL_BASE + 6)
+#define GDT_ENTRY_APMBIOS_BASE         (GDT_ENTRY_KERNEL_BASE + 11)
+
+#define GDT_ENTRY_DOUBLEFAULT_TSS      31
+
+/*
+ * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
+ */
+#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
+
+#define GDT_SIZE (GDT_ENTRIES * 8)
+
+/* Simple and small GDT entries for booting only */
+
+#define __BOOT_CS      FLAT_GUESTOS_CS
+
+#define __BOOT_DS      FLAT_GUESTOS_DS
+
+/*
+ * The interrupt descriptor table has room for 256 idt's,
+ * the global descriptor table is dependent on the number
+ * of tasks we can have..
+ */
+#define IDT_ENTRIES 256
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/setup.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/setup.h
new file mode 100644 (file)
index 0000000..03a3a64
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ *     Just a place holder. We don't want to have to test x86 before
+ *     we include stuff
+ */
+
+#ifndef _i386_SETUP_H
+#define _i386_SETUP_H
+
+#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x)    ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x)    ((x) << PAGE_SHIFT)
+
+/*
+ * Reserved space for vmalloc and iomap - defined in asm/page.h
+ */
+#define MAXMEM_PFN     PFN_DOWN(MAXMEM)
+#define MAX_NONPAE_PFN (1 << 20)
+
+#define PARAM_SIZE 2048
+#define COMMAND_LINE_SIZE 256
+
+#define OLD_CL_MAGIC_ADDR      0x90020
+#define OLD_CL_MAGIC           0xA33F
+#define OLD_CL_BASE_ADDR       0x90000
+#define OLD_CL_OFFSET          0x90022
+#define NEW_CL_POINTER         0x228   /* Relative to real mode data */
+
+#ifndef __ASSEMBLY__
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+extern unsigned char boot_params[PARAM_SIZE];
+
+#define PARAM  (boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
+#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
+#define INITRD_START (__pa(xen_start_info.mod_start))
+#define INITRD_SIZE (xen_start_info.mod_len)
+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _i386_SETUP_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
new file mode 100644 (file)
index 0000000..8093de0
--- /dev/null
@@ -0,0 +1,83 @@
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ ( 
+        "lock btsl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btrl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btcl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btsl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btrl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+
+    __asm__ __volatile__ (
+        "lock btcl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+    return ((1UL << (nr & 31)) & 
+            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "btl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+    return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h
new file mode 100644 (file)
index 0000000..133f1ea
--- /dev/null
@@ -0,0 +1,522 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <asm/synch_bitops.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/evtchn.h>
+
+#ifdef __KERNEL__
+
+struct task_struct;    /* one of the stranger aspects of C forward declarations.. */
+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+
+#define switch_to(prev,next,last) do {                                 \
+       unsigned long esi,edi;                                          \
+       asm volatile("pushfl\n\t"                                       \
+                    "pushl %%ebp\n\t"                                  \
+                    "movl %%esp,%0\n\t"        /* save ESP */          \
+                    "movl %5,%%esp\n\t"        /* restore ESP */       \
+                    "movl $1f,%1\n\t"          /* save EIP */          \
+                    "pushl %6\n\t"             /* restore EIP */       \
+                    "jmp __switch_to\n"                                \
+                    "1:\t"                                             \
+                    "popl %%ebp\n\t"                                   \
+                    "popfl"                                            \
+                    :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
+                     "=a" (last),"=S" (esi),"=D" (edi)                 \
+                    :"m" (next->thread.esp),"m" (next->thread.eip),    \
+                     "2" (prev), "d" (next));                          \
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+       "rorl $16,%%edx\n\t" \
+       "movb %%dl,%2\n\t" \
+       "movb %%dh,%3" \
+       :"=&d" (__pr) \
+       :"m" (*((addr)+2)), \
+        "m" (*((addr)+4)), \
+        "m" (*((addr)+7)), \
+         "0" (base) \
+        ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+       "rorl $16,%%edx\n\t" \
+       "movb %2,%%dh\n\t" \
+       "andb $0xf0,%%dh\n\t" \
+       "orb %%dh,%%dl\n\t" \
+       "movb %%dl,%2" \
+       :"=&d" (__lr) \
+       :"m" (*(addr)), \
+        "m" (*((addr)+6)), \
+        "0" (limit) \
+        ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+       unsigned long __base;
+       __asm__("movb %3,%%dh\n\t"
+               "movb %2,%%dl\n\t"
+               "shll $16,%%edx\n\t"
+               "movw %1,%%dx"
+               :"=&d" (__base)
+               :"m" (*((addr)+2)),
+                "m" (*((addr)+4)),
+                "m" (*((addr)+7)));
+       return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value)                 \
+       asm volatile("\n"                       \
+               "1:\t"                          \
+               "movl %0,%%" #seg "\n"          \
+               "2:\n"                          \
+               ".section .fixup,\"ax\"\n"      \
+               "3:\t"                          \
+               "pushl $0\n\t"                  \
+               "popl %%" #seg "\n\t"           \
+               "jmp 2b\n"                      \
+               ".previous\n"                   \
+               ".section __ex_table,\"a\"\n\t" \
+               ".align 4\n\t"                  \
+               ".long 1b,3b\n"                 \
+               ".previous"                     \
+               : :"m" (*(unsigned int *)&(value)))
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+       asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+/* NB. 'clts' is done for us by Xen during virtual trap. */
+#define clts() ((void)0)
+#define read_cr0() \
+       BUG();
+#define write_cr0(x) \
+       BUG();
+
+#define read_cr4() \
+       BUG();
+#define write_cr4(x) \
+       BUG();
+#define stts() (HYPERVISOR_fpu_taskswitch())
+
+#endif /* __KERNEL__ */
+
+static inline void wbinvd(void)
+{
+    mmu_update_t u;
+    u.ptr = MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_FLUSH_CACHE;
+    (void)HYPERVISOR_mmu_update(&u, 1, NULL);
+}
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+       unsigned long __limit;
+       __asm__("lsll %1,%0"
+               :"=r" (__limit):"r" (segment));
+       return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+               unsigned int low, unsigned int high)
+{
+       __asm__ __volatile__ (
+               "\n1:\t"
+               "movl (%0), %%eax\n\t"
+               "movl 4(%0), %%edx\n\t"
+               "lock cmpxchg8b (%0)\n\t"
+               "jnz 1b"
+               : /* no outputs */
+               :       "D"(ptr),
+                       "b"(low),
+                       "c"(high)
+               :       "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+                                                unsigned long long value)
+{
+       __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x)      *(((unsigned int*)&(x))+0)
+#define ll_high(x)     *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+                        unsigned long long value)
+{
+       __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ *       but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+       switch (size) {
+               case 1:
+                       __asm__ __volatile__("xchgb %b0,%1"
+                               :"=q" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 2:
+                       __asm__ __volatile__("xchgw %w0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 4:
+                       __asm__ __volatile__("xchgl %0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+       }
+       return x;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+#endif
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       unsigned long prev;
+       switch (size) {
+       case 1:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 2:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 4:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       }
+       return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+                                       (unsigned long)(n),sizeof(*(ptr))))
+    
+#ifdef __KERNEL__
+struct alt_instr { 
+       __u8 *instr;            /* original instruction */
+       __u8 *replacement;
+       __u8  cpuid;            /* cpuid bit set for replacement */
+       __u8  instrlen;         /* length of original instruction */
+       __u8  replacementlen;   /* length of new instruction, <= instrlen */ 
+       __u8  pad;
+}; 
+#endif
+
+/* 
+ * Alternative instructions for different CPU types or capabilities.
+ * 
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ * 
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ * 
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature)       \
+       asm volatile ("661:\n\t" oldinstr "\n662:\n"                 \
+                     ".section .altinstructions,\"a\"\n"            \
+                     "  .align 4\n"                                   \
+                     "  .long 661b\n"            /* label */          \
+                     "  .long 663f\n"            /* new instruction */         \
+                     "  .byte %c0\n"             /* feature bit */    \
+                     "  .byte 662b-661b\n"       /* sourcelen */      \
+                     "  .byte 664f-663f\n"       /* replacementlen */ \
+                     ".previous\n"                                             \
+                     ".section .altinstr_replacement,\"ax\"\n"                 \
+                     "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
+                     ".previous" :: "i" (feature) : "memory")  
+
+/*
+ * Alternative inline assembly with input.
+ * 
+ * Pecularities:
+ * No memory clobber here. 
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the 
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input)                  \
+       asm volatile ("661:\n\t" oldinstr "\n662:\n"                            \
+                     ".section .altinstructions,\"a\"\n"                       \
+                     "  .align 4\n"                                            \
+                     "  .long 661b\n"            /* label */                   \
+                     "  .long 663f\n"            /* new instruction */         \
+                     "  .byte %c0\n"             /* feature bit */             \
+                     "  .byte 662b-661b\n"       /* sourcelen */               \
+                     "  .byte 664f-663f\n"       /* replacementlen */          \
+                     ".previous\n"                                             \
+                     ".section .altinstr_replacement,\"ax\"\n"                 \
+                     "663:\n\t" newinstr "\n664:\n"   /* replacement */        \
+                     ".previous" :: "i" (feature), input)  
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+/* 
+ * Actually only lfence would be needed for mb() because all stores done 
+ * by the kernel should be already ordered. But keep a full barrier for now. 
+ */
+
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *     CPU 0                           CPU 1
+ *
+ *     b = 2;
+ *     memory_barrier();
+ *     p = &b;                         q = p;
+ *                                     read_barrier_depends();
+ *                                     d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *     CPU 0                           CPU 1
+ *
+ *     a = 2;
+ *     memory_barrier();
+ *     b = 3;                          y = b;
+ *                                     read_barrier_depends();
+ *                                     x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like thiswhere there are no data dependencies.
+ **/
+
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_X86_OOSTORE
+/* Actually there are no OOO store capable CPUs for now that do SSE, 
+   but make it already an possibility. */
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define wmb()  __asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb()       mb()
+#define smp_rmb()      rmb()
+#define smp_wmb()      wmb()
+#define smp_read_barrier_depends()     read_barrier_depends()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb()       barrier()
+#define smp_rmb()      barrier()
+#define smp_wmb()      barrier()
+#define smp_read_barrier_depends()     do { } while(0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* interrupt control.. */
+
+/* 
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operatiosn must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli()                                                               \
+do {                                                                          \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
+    barrier();                                                                \
+} while (0)
+
+#define __sti()                                                               \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
+    barrier(); /* unmask then check (avoid races) */                          \
+    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
+        force_evtchn_callback();                                              \
+} while (0)
+
+#define __save_flags(x)                                                       \
+do {                                                                          \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+} while (0)
+
+#define __restore_flags(x)                                                    \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) {            \
+        barrier(); /* unmask then check (avoid races) */                      \
+        if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )          \
+            force_evtchn_callback();                                          \
+    }                                                                         \
+} while (0)
+
+#define safe_halt()             ((void)0)
+
+#define __save_and_cli(x)                                                     \
+do {                                                                          \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
+    barrier();                                                                \
+} while (0)
+
+#define __save_and_sti(x)                                                     \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                           \
+    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
+    barrier(); /* unmask then check (avoid races) */                          \
+    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
+        force_evtchn_callback();                                              \
+} while (0)
+
+#define local_irq_save(x)      __save_and_cli(x)
+#define local_irq_restore(x)    __restore_flags(x)
+#define local_save_flags(x)     __save_flags(x)
+#define local_irq_disable()     __cli()
+#define local_irq_enable()      __sti()
+
+#define irqs_disabled()                        \
+       HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern int es7000_plat;
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/timer.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/timer.h
new file mode 100644 (file)
index 0000000..e478d7d
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef _ASMi386_TIMER_H
+#define _ASMi386_TIMER_H
+
+/**
+ * struct timer_ops - used to define a timer source
+ *
+ * @name: name of the timer.
+ * @init: Probes and initializes the timer. Takes clock= override 
+ *        string as an argument. Returns 0 on success, anything else
+ *        on failure.
+ * @mark_offset: called by the timer interrupt.
+ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
+ *               since the last timer interupt.
+ * @monotonic_clock: returns the number of nanoseconds since the init of the
+ *                   timer.
+ * @delay: delays this many clock cycles.
+ */
+struct timer_opts{
+       char* name;
+       int (*init)(char *override);
+       void (*mark_offset)(void);
+       unsigned long (*get_offset)(void);
+       unsigned long long (*monotonic_clock)(void);
+       void (*delay)(unsigned long);
+};
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+extern struct timer_opts* select_timer(void);
+extern void clock_fallback(void);
+void setup_pit_timer(void);
+
+/* Modifiers for buggy PIT handling */
+
+extern int pit_latch_buggy;
+
+extern struct timer_opts *cur_timer;
+extern int timer_ack;
+
+/* list of externed timers */
+extern struct timer_opts timer_none;
+extern struct timer_opts timer_pit;
+extern struct timer_opts timer_tsc;
+#ifdef CONFIG_X86_CYCLONE_TIMER
+extern struct timer_opts timer_cyclone;
+#endif
+extern struct timer_opts timer_xen;
+
+extern unsigned long calibrate_tsc(void);
+extern void init_cpu_khz(void);
+#ifdef CONFIG_HPET_TIMER
+extern struct timer_opts timer_hpet;
+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+extern struct timer_opts timer_pmtmr;
+#endif
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/tlbflush.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
new file mode 100644 (file)
index 0000000..14030f9
--- /dev/null
@@ -0,0 +1,121 @@
+#ifndef _I386_TLBFLUSH_H
+#define _I386_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb() do {                                             \
+       xen_tlb_flush();                                                \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global()                                           \
+       do {                                                            \
+               xen_tlb_flush();                                        \
+       } while (0)
+
+extern unsigned long pgkern_mask;
+
+# define __flush_tlb_all()                                             \
+       do {                                                            \
+               if (cpu_has_pge)                                        \
+                       __flush_tlb_global();                           \
+               else                                                    \
+                       __flush_tlb();                                  \
+       } while (0)
+
+#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
+
+#define __flush_tlb_single(addr) do {                                  \
+       xen_invlpg(addr);                                               \
+} while (/* CONSTCOND */0)
+
+# define __flush_tlb_one(addr) __flush_tlb_single(addr)
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       if (mm == current->active_mm)
+               __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+       unsigned long addr)
+{
+       if (vma->vm_mm == current->active_mm)
+               __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+       unsigned long start, unsigned long end)
+{
+       if (vma->vm_mm == current->active_mm)
+               __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+       __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()    flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK    1
+#define TLBSTATE_LAZY  2
+
+struct tlb_state
+{
+       struct mm_struct *active_mm;
+       int state;
+       char __cacheline_padding[L1_CACHE_BYTES-8];
+};
+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
+
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+       /* i386 does not keep any page table caches in TLB */
+}
+
+#endif /* _I386_TLBFLUSH_H */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/vga.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/vga.h
new file mode 100644 (file)
index 0000000..14b8209
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ *     Access to VGA videoram
+ *
+ *     (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+/*
+ *     On the PC, we can just recalculate addresses and then
+ *     access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/xor.h b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/xor.h
new file mode 100644 (file)
index 0000000..79a45de
--- /dev/null
@@ -0,0 +1,884 @@
+/*
+ * include/asm-i386/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for MMX and SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High-speed RAID5 checksumming functions utilizing MMX instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+#define LD(x,y)                "       movq   8*("#x")(%1), %%mm"#y"   ;\n"
+#define ST(x,y)                "       movq %%mm"#y",   8*("#x")(%1)   ;\n"
+#define XO1(x,y)       "       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
+#define XO2(x,y)       "       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
+#define XO3(x,y)       "       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
+#define XO4(x,y)       "       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
+
+#include <asm/i387.h>
+
+static void
+xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+       ST(i,0)                                 \
+               XO1(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO1(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO1(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+             unsigned long *p3)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+               XO1(i+1,1)                      \
+                       XO1(i+2,2)              \
+                               XO1(i+3,3)      \
+       XO2(i,0)                                \
+       ST(i,0)                                 \
+               XO2(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO2(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO2(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       addl $128, %3         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+             unsigned long *p3, unsigned long *p4)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+               XO1(i+1,1)                      \
+                       XO1(i+2,2)              \
+                               XO1(i+3,3)      \
+       XO2(i,0)                                \
+               XO2(i+1,1)                      \
+                       XO2(i+2,2)              \
+                               XO2(i+3,3)      \
+       XO3(i,0)                                \
+       ST(i,0)                                 \
+               XO3(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO3(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO3(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       addl $128, %3         ;\n"
+       "       addl $128, %4         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+
+static void
+xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+             unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       /* Make sure GCC forgets anything it knows about p4 or p5,
+          such that it won't pass to the asm volatile below a
+          register that is shared with any other variable.  That's
+          because we modify p4 and p5 there, but we can't mark them
+          as read/write, otherwise we'd overflow the 10-asm-operands
+          limit of GCC < 3.1.  */
+       __asm__ ("" : "+r" (p4), "+r" (p5));
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+               XO1(i+1,1)                      \
+                       XO1(i+2,2)              \
+                               XO1(i+3,3)      \
+       XO2(i,0)                                \
+               XO2(i+1,1)                      \
+                       XO2(i+2,2)              \
+                               XO2(i+3,3)      \
+       XO3(i,0)                                \
+               XO3(i+1,1)                      \
+                       XO3(i+2,2)              \
+                               XO3(i+3,3)      \
+       XO4(i,0)                                \
+       ST(i,0)                                 \
+               XO4(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO4(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO4(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       addl $128, %3         ;\n"
+       "       addl $128, %4         ;\n"
+       "       addl $128, %5         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       : "r" (p4), "r" (p5) 
+       : "memory");
+
+       /* p4 and p5 were modified, and now the variables are dead.
+          Clobber them just to be sure nobody does something stupid
+          like assuming they have some legal value.  */
+       __asm__ ("" : "=r" (p4), "=r" (p5));
+
+       kernel_fpu_end();
+}
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef BLOCK
+
+static void
+xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+       " .align 32                  ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+       
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+            unsigned long *p3)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+       " .align 32,0x90             ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       pxor   (%3), %%mm0   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       pxor  8(%3), %%mm1   ;\n"
+       "       pxor 16(%3), %%mm2   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       pxor 24(%3), %%mm3   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       pxor 32(%3), %%mm4   ;\n"
+       "       pxor 40(%3), %%mm5   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       pxor 48(%3), %%mm6   ;\n"
+       "       pxor 56(%3), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+      
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       addl $64, %3         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       :
+       : "memory" );
+
+       kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+            unsigned long *p3, unsigned long *p4)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+       " .align 32,0x90             ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       pxor   (%3), %%mm0   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       pxor  8(%3), %%mm1   ;\n"
+       "       pxor   (%4), %%mm0   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       pxor 16(%3), %%mm2   ;\n"
+       "       pxor  8(%4), %%mm1   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       pxor 16(%4), %%mm2   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       pxor 24(%3), %%mm3   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       pxor 32(%3), %%mm4   ;\n"
+       "       pxor 24(%4), %%mm3   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       pxor 40(%3), %%mm5   ;\n"
+       "       pxor 32(%4), %%mm4   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       pxor 40(%4), %%mm5   ;\n"
+       "       pxor 48(%3), %%mm6   ;\n"
+       "       pxor 56(%3), %%mm7   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 48(%4), %%mm6   ;\n"
+       "       pxor 56(%4), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+      
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       addl $64, %3         ;\n"
+       "       addl $64, %4         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+            unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       /* Make sure GCC forgets anything it knows about p4 or p5,
+          such that it won't pass to the asm volatile below a
+          register that is shared with any other variable.  That's
+          because we modify p4 and p5 there, but we can't mark them
+          as read/write, otherwise we'd overflow the 10-asm-operands
+          limit of GCC < 3.1.  */
+       __asm__ ("" : "+r" (p4), "+r" (p5));
+
+       __asm__ __volatile__ (
+       " .align 32,0x90             ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       pxor   (%3), %%mm0   ;\n"
+       "       pxor  8(%3), %%mm1   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       pxor   (%4), %%mm0   ;\n"
+       "       pxor  8(%4), %%mm1   ;\n"
+       "       pxor 16(%3), %%mm2   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       pxor   (%5), %%mm0   ;\n"
+       "       pxor  8(%5), %%mm1   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       pxor 16(%4), %%mm2   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       pxor 16(%5), %%mm2   ;\n"
+       "       pxor 24(%3), %%mm3   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 24(%4), %%mm3   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       pxor 24(%5), %%mm3   ;\n"
+       "       pxor 32(%3), %%mm4   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       pxor 32(%4), %%mm4   ;\n"
+       "       pxor 40(%3), %%mm5   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       pxor 32(%5), %%mm4   ;\n"
+       "       pxor 40(%4), %%mm5   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       pxor 48(%3), %%mm6   ;\n"
+       "       pxor 56(%3), %%mm7   ;\n"
+       "       pxor 40(%5), %%mm5   ;\n"
+       "       pxor 48(%4), %%mm6   ;\n"
+       "       pxor 56(%4), %%mm7   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 48(%5), %%mm6   ;\n"
+       "       pxor 56(%5), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+      
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       addl $64, %3         ;\n"
+       "       addl $64, %4         ;\n"
+       "       addl $64, %5         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       : "r" (p4), "r" (p5)
+       : "memory");
+
+       /* p4 and p5 were modified, and now the variables are dead.
+          Clobber them just to be sure nobody does something stupid
+          like assuming they have some legal value.  */
+       __asm__ ("" : "=r" (p4), "=r" (p5));
+
+       kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_pII_mmx = {
+       .name = "pII_mmx",
+       .do_2 = xor_pII_mmx_2,
+       .do_3 = xor_pII_mmx_3,
+       .do_4 = xor_pII_mmx_4,
+       .do_5 = xor_pII_mmx_5,
+};
+
+static struct xor_block_template xor_block_p5_mmx = {
+       .name = "p5_mmx",
+       .do_2 = xor_p5_mmx_2,
+       .do_3 = xor_p5_mmx_3,
+       .do_4 = xor_p5_mmx_4,
+       .do_5 = xor_p5_mmx_5,
+};
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+#define XMMS_SAVE do {                         \
+       preempt_disable();                      \
+       if (!(current_thread_info()->status & TS_USEDFPU))      \
+               clts();                         \
+       __asm__ __volatile__ (                  \
+               "movups %%xmm0,(%1)     ;\n\t"  \
+               "movups %%xmm1,0x10(%1) ;\n\t"  \
+               "movups %%xmm2,0x20(%1) ;\n\t"  \
+               "movups %%xmm3,0x30(%1) ;\n\t"  \
+               : "=&r" (cr0)                   \
+               : "r" (xmm_save)                \
+               : "memory");                    \
+} while(0)
+
+#define XMMS_RESTORE do {                      \
+       __asm__ __volatile__ (                  \
+               "sfence                 ;\n\t"  \
+               "movups (%1),%%xmm0     ;\n\t"  \
+               "movups 0x10(%1),%%xmm1 ;\n\t"  \
+               "movups 0x20(%1),%%xmm2 ;\n\t"  \
+               "movups 0x30(%1),%%xmm3 ;\n\t"  \
+               :                               \
+               : "r" (cr0), "r" (xmm_save)     \
+               : "memory");                    \
+       if (!(current_thread_info()->status & TS_USEDFPU))      \
+               stts();                         \
+       preempt_enable();                       \
+} while(0)
+
+#define ALIGN16 __attribute__((aligned(16)))
+
+#define OFFS(x)                "16*("#x")"
+#define PF_OFFS(x)     "256+16*("#x")"
+#define        PF0(x)          "       prefetchnta "PF_OFFS(x)"(%1)            ;\n"
+#define LD(x,y)                "       movaps   "OFFS(x)"(%1), %%xmm"#y"       ;\n"
+#define ST(x,y)                "       movaps %%xmm"#y",   "OFFS(x)"(%1)       ;\n"
+#define PF1(x)         "       prefetchnta "PF_OFFS(x)"(%2)            ;\n"
+#define PF2(x)         "       prefetchnta "PF_OFFS(x)"(%3)            ;\n"
+#define PF3(x)         "       prefetchnta "PF_OFFS(x)"(%4)            ;\n"
+#define PF4(x)         "       prefetchnta "PF_OFFS(x)"(%5)            ;\n"
+#define PF5(x)         "       prefetchnta "PF_OFFS(x)"(%6)            ;\n"
+#define XO1(x,y)       "       xorps   "OFFS(x)"(%2), %%xmm"#y"        ;\n"
+#define XO2(x,y)       "       xorps   "OFFS(x)"(%3), %%xmm"#y"        ;\n"
+#define XO3(x,y)       "       xorps   "OFFS(x)"(%4), %%xmm"#y"        ;\n"
+#define XO4(x,y)       "       xorps   "OFFS(x)"(%5), %%xmm"#y"        ;\n"
+#define XO5(x,y)       "       xorps   "OFFS(x)"(%6), %%xmm"#y"        ;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2)
+       :
+        : "memory");
+
+       XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF2(i)                                  \
+                               PF2(i+2)                \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               XO2(i,0)                                \
+                       XO2(i+1,1)                      \
+                               XO2(i+2,2)              \
+                                       XO2(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r"(p2), "+r"(p3)
+       :
+        : "memory" );
+
+       XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3, unsigned long *p4)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF2(i)                                  \
+                               PF2(i+2)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               PF3(i)                                  \
+                               PF3(i+2)                \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO2(i,0)                                \
+                       XO2(i+1,1)                      \
+                               XO2(i+2,2)              \
+                                       XO2(i+3,3)      \
+               XO3(i,0)                                \
+                       XO3(i+1,1)                      \
+                               XO3(i+2,2)              \
+                                       XO3(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       addl $256, %4           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+       :
+        : "memory" );
+
+       XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+       /* Make sure GCC forgets anything it knows about p4 or p5,
+          such that it won't pass to the asm volatile below a
+          register that is shared with any other variable.  That's
+          because we modify p4 and p5 there, but we can't mark them
+          as read/write, otherwise we'd overflow the 10-asm-operands
+          limit of GCC < 3.1.  */
+       __asm__ ("" : "+r" (p4), "+r" (p5));
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF2(i)                                  \
+                               PF2(i+2)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               PF3(i)                                  \
+                               PF3(i+2)                \
+               XO2(i,0)                                \
+                       XO2(i+1,1)                      \
+                               XO2(i+2,2)              \
+                                       XO2(i+3,3)      \
+               PF4(i)                                  \
+                               PF4(i+2)                \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO3(i,0)                                \
+                       XO3(i+1,1)                      \
+                               XO3(i+2,2)              \
+                                       XO3(i+3,3)      \
+               XO4(i,0)                                \
+                       XO4(i+1,1)                      \
+                               XO4(i+2,2)              \
+                                       XO4(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       addl $256, %4           ;\n"
+        "       addl $256, %5           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       : "r" (p4), "r" (p5)
+       : "memory");
+
+       /* p4 and p5 were modified, and now the variables are dead.
+          Clobber them just to be sure nobody does something stupid
+          like assuming they have some legal value.  */
+       __asm__ ("" : "=r" (p4), "=r" (p5));
+
+       XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_pIII_sse = {
+        .name = "pIII_sse",
+        .do_2 =  xor_sse_2,
+        .do_3 =  xor_sse_3,
+        .do_4 =  xor_sse_4,
+        .do_5 = xor_sse_5,
+};
+
+/* Also try the generic routines.  */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES                              \
+       do {                                            \
+               xor_speed(&xor_block_8regs);            \
+               xor_speed(&xor_block_8regs_p);          \
+               xor_speed(&xor_block_32regs);           \
+               xor_speed(&xor_block_32regs_p);         \
+               if (cpu_has_xmm)                        \
+                       xor_speed(&xor_block_pIII_sse); \
+               if (cpu_has_mmx) {                      \
+                       xor_speed(&xor_block_pII_mmx);  \
+                       xor_speed(&xor_block_p5_mmx);   \
+               }                                       \
+       } while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+   We may also be able to load into the L1 only depending on how the cpu
+   deals with a load to a line that is being prefetched.  */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+       (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/ctrl_if.h b/linux-2.6.9-xen-sparse/include/asm-xen/ctrl_if.h
new file mode 100644 (file)
index 0000000..9b01d85
--- /dev/null
@@ -0,0 +1,160 @@
+/******************************************************************************
+ * ctrl_if.h
+ * 
+ * Management functions for special interface to the domain controller.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_XEN__CTRL_IF_H__
+#define __ASM_XEN__CTRL_IF_H__
+
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/queues.h>
+
+typedef control_msg_t ctrl_msg_t;
+
+/*
+ * Callback function type. Called for asynchronous processing of received
+ * request messages, and responses to previously-transmitted request messages.
+ * The parameters are (@msg, @id).
+ *  @msg: Original request/response message (not a copy). The message can be
+ *        modified in-place by the handler (e.g., a response callback can
+ *        turn a request message into a response message in place). The message
+ *        is no longer accessible after the callback handler returns -- if the
+ *        message is required to persist for longer then it must be copied.
+ *  @id:  (Response callbacks only) The 'id' that was specified when the
+ *        original request message was queued for transmission.
+ */
+typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
+
+/*
+ * Send @msg to the domain controller. Execute @hnd when a response is
+ * received, passing the response message and the specified @id. This
+ * operation will not block: it will return -EAGAIN if there is no space.
+ * Notes:
+ *  1. The @msg is copied if it is transmitted and so can be freed after this
+ *     function returns.
+ *  2. If @hnd is NULL then no callback is executed.
+ */
+int
+ctrl_if_send_message_noblock(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd,
+    unsigned long id);
+
+/*
+ * Send @msg to the domain controller. Execute @hnd when a response is
+ * received, passing the response message and the specified @id. This
+ * operation will block until the message is sent, or a signal is received
+ * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
+ * Notes:
+ *  1. The @msg is copied if it is transmitted and so can be freed after this
+ *     function returns.
+ *  2. If @hnd is NULL then no callback is executed.
+ */
+int
+ctrl_if_send_message_block(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd, 
+    unsigned long id, 
+    long wait_state);
+
+/*
+ * Send @msg to the domain controller. Block until the response is received,
+ * and then copy it into the provided buffer, @rmsg.
+ */
+int
+ctrl_if_send_message_and_get_response(
+    ctrl_msg_t *msg,
+    ctrl_msg_t *rmsg,
+    long wait_state);
+
+/*
+ * Request a callback when there is /possibly/ space to immediately send a
+ * message to the domain controller. This function returns 0 if there is
+ * already space to trasnmit a message --- in this case the callback task /may/
+ * still be executed. If this function returns 1 then the callback /will/ be
+ * executed when space becomes available.
+ */
+int
+ctrl_if_enqueue_space_callback(
+    struct tq_struct *task);
+
+/*
+ * Send a response (@msg) to a message from the domain controller. This will 
+ * never block.
+ * Notes:
+ *  1. The @msg is copied and so can be freed after this function returns.
+ *  2. The @msg may be the original request message, modified in-place.
+ */
+void
+ctrl_if_send_response(
+    ctrl_msg_t *msg);
+
+/*
+ * Register a receiver for typed messages from the domain controller. The 
+ * handler (@hnd) is called for every received message of specified @type.
+ * Returns TRUE (non-zero) if the handler was successfully registered.
+ * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
+ * occur in a context in which it is safe to yield (i.e., process context).
+ */
+#define CALLBACK_IN_BLOCKING_CONTEXT 1
+int ctrl_if_register_receiver(
+    u8 type, 
+    ctrl_msg_handler_t hnd,
+    unsigned int flags);
+
+/*
+ * Unregister a receiver for typed messages from the domain controller. The 
+ * handler (@hnd) will not be executed after this function returns.
+ */
+void
+ctrl_if_unregister_receiver(
+    u8 type, ctrl_msg_handler_t hnd);
+
+/* Suspend/resume notifications. */
+void ctrl_if_suspend(void);
+void ctrl_if_resume(void);
+
+/* Start-of-day setup. */
+void ctrl_if_init(void);
+
+/*
+ * Returns TRUE if there are no outstanding message requests at the domain
+ * controller. This can be used to ensure that messages have really flushed
+ * through when it is not possible to use the response-callback interface.
+ * WARNING: If other subsystems are using the control interface then this
+ * function might never return TRUE!
+ */
+int ctrl_if_transmitter_empty(void);  /* !! DANGEROUS FUNCTION !! */
+
+/*
+ * Manually discard response messages from the domain controller. 
+ * WARNING: This is usually done automatically -- this function should only
+ * be called when normal interrupt mechanisms are disabled!
+ */
+void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
+
+#endif /* __ASM_XEN__CONTROL_IF_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/evtchn.h b/linux-2.6.9-xen-sparse/include/asm-xen/evtchn.h
new file mode 100644 (file)
index 0000000..2ad902e
--- /dev/null
@@ -0,0 +1,107 @@
+/******************************************************************************
+ * evtchn.h
+ * 
+ * Communication via Xen event channels.
+ * Also definitions for the device that demuxes notifications to userspace.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_EVTCHN_H__
+#define __ASM_EVTCHN_H__
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
+#include <asm/hypervisor-ifs/event_channel.h>
+
+/*
+ * LOW-LEVEL DEFINITIONS
+ */
+
+/* Force a proper event-channel callback from Xen. */
+void force_evtchn_callback(void);
+
+/* Entry point for notifications into Linux subsystems. */
+void evtchn_do_upcall(struct pt_regs *regs);
+
+/* Entry point for notifications into the userland character device. */
+void evtchn_device_upcall(int port);
+
+static inline void mask_evtchn(int port)
+{
+    shared_info_t *s = HYPERVISOR_shared_info;
+    synch_set_bit(port, &s->evtchn_mask[0]);
+}
+
+static inline void unmask_evtchn(int port)
+{
+    shared_info_t *s = HYPERVISOR_shared_info;
+
+    synch_clear_bit(port, &s->evtchn_mask[0]);
+
+    /*
+     * The following is basically the equivalent of 'hw_resend_irq'. Just like
+     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
+     */
+    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
+         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+    {
+        s->vcpu_data[0].evtchn_upcall_pending = 1;
+        if ( !s->vcpu_data[0].evtchn_upcall_mask )
+            force_evtchn_callback();
+    }
+}
+
+static inline void clear_evtchn(int port)
+{
+    shared_info_t *s = HYPERVISOR_shared_info;
+    synch_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+static inline void notify_via_evtchn(int port)
+{
+    evtchn_op_t op;
+    op.cmd = EVTCHNOP_send;
+    op.u.send.local_port = port;
+    (void)HYPERVISOR_event_channel_op(&op);
+}
+
+/*
+ * CHARACTER-DEVICE DEFINITIONS
+ */
+
+/* /dev/xen/evtchn resides at device number major=10, minor=201 */
+#define EVTCHN_MINOR 201
+
+/* /dev/xen/evtchn ioctls: */
+/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
+#define EVTCHN_RESET  _IO('E', 1)
+/* EVTCHN_BIND: Bind to teh specified event-channel port. */
+#define EVTCHN_BIND   _IO('E', 2)
+/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
+#define EVTCHN_UNBIND _IO('E', 3)
+
+#endif /* __ASM_EVTCHN_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/gnttab.h b/linux-2.6.9-xen-sparse/include/asm-xen/gnttab.h
new file mode 100644 (file)
index 0000000..6e52923
--- /dev/null
@@ -0,0 +1,35 @@
+/******************************************************************************
+ * gnttab.h
+ * 
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __ASM_GNTTAB_H__
+#define __ASM_GNTTAB_H__
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/hypervisor-ifs/grant_table.h>
+
+int
+gnttab_grant_foreign_access(
+    domid_t domid, unsigned long frame, int readonly);
+
+void
+gnttab_end_foreign_access(
+    grant_ref_t ref, int readonly);
+
+int
+gnttab_grant_foreign_transfer(
+    domid_t domid);
+
+unsigned long
+gnttab_end_foreign_transfer(
+    grant_ref_t ref);
+
+#endif /* __ASM_GNTTAB_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h b/linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h
new file mode 100644 (file)
index 0000000..3054b30
--- /dev/null
@@ -0,0 +1,650 @@
+/******************************************************************************
+ * hypervisor.h
+ * 
+ * Linux-specific hypervisor handling.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm/hypervisor-ifs/dom0_ops.h>
+#include <asm/hypervisor-ifs/io/domain_controller.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/* arch/xen/i386/kernel/setup.c */
+union xen_start_info_union
+{
+    start_info_t xen_start_info;
+    char padding[512];
+};
+extern union xen_start_info_union xen_start_info_union;
+#define xen_start_info (xen_start_info_union.xen_start_info)
+
+/* arch/xen/kernel/process.c */
+void xen_cpu_idle (void);
+
+/* arch/xen/i386/kernel/hypervisor.c */
+void do_hypervisor_callback(struct pt_regs *regs);
+
+/* arch/xen/i386/mm/init.c */
+/* NOTE: caller must call flush_page_update_queue() */
+#define PROT_ON  1
+#define PROT_OFF 0
+void /* __init */ protect_page(pgd_t *dpgd, void *page, int mode);
+void /* __init */ protect_pagetable(pgd_t *dpgd, pgd_t *spgd, int mode);
+
+/* arch/xen/i386/kernel/head.S */
+void lgdt_finish(void);
+
+/* arch/xen/i386/mm/hypervisor.c */
+/*
+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
+ * be MACHINE addresses.
+ */
+
+extern unsigned int mmu_update_queue_idx;
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
+void queue_pt_switch(unsigned long ptr);
+void queue_tlb_flush(void);
+void queue_invlpg(unsigned long ptr);
+void queue_pgd_pin(unsigned long ptr);
+void queue_pgd_unpin(unsigned long ptr);
+void queue_pte_pin(unsigned long ptr);
+void queue_pte_unpin(unsigned long ptr);
+void queue_set_ldt(unsigned long ptr, unsigned long bytes);
+void queue_machphys_update(unsigned long mfn, unsigned long pfn);
+void xen_l1_entry_update(pte_t *ptr, unsigned long val);
+void xen_l2_entry_update(pmd_t *ptr, unsigned long val);
+void xen_pt_switch(unsigned long ptr);
+void xen_tlb_flush(void);
+void xen_invlpg(unsigned long ptr);
+void xen_pgd_pin(unsigned long ptr);
+void xen_pgd_unpin(unsigned long ptr);
+void xen_pte_pin(unsigned long ptr);
+void xen_pte_unpin(unsigned long ptr);
+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
+void xen_machphys_update(unsigned long mfn, unsigned long pfn);
+#define MMU_UPDATE_DEBUG 0
+
+#if MMU_UPDATE_DEBUG > 0
+typedef struct {
+    void *ptr;
+    unsigned long val, pteval;
+    void *ptep;
+    int line; char *file;
+} page_update_debug_t;
+extern page_update_debug_t update_debug_queue[];
+#define queue_l1_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ queue_l1_entry_update((_p),(_v));                                \
+})
+#define queue_l2_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ queue_l2_entry_update((_p),(_v));                                \
+})
+#endif
+
+#if MMU_UPDATE_DEBUG > 1
+#if MMU_UPDATE_DEBUG > 2
+#undef queue_l1_entry_update
+#define queue_l1_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ printk("L1 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
+        (_p), virt_to_machine(_p), pte_val(*(_p)),                 \
+        (unsigned long)(_v));                                     \
+ queue_l1_entry_update((_p),(_v));                                \
+})
+#endif
+#undef queue_l2_entry_update
+#define queue_l2_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ printk("L2 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
+        (_p), virt_to_machine(_p), pmd_val(*_p),                  \
+        (unsigned long)(_v));                                     \
+ queue_l2_entry_update((_p),(_v));                                \
+})
+#define queue_pt_switch(_p) ({                                    \
+ printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p));     \
+ queue_pt_switch(_p);                                             \
+})   
+#define queue_tlb_flush() ({                                      \
+ printk("TLB FLUSH %s %d\n", __FILE__, __LINE__);                 \
+ queue_tlb_flush();                                               \
+})   
+#define queue_invlpg(_p) ({                                       \
+ printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p));       \
+ queue_invlpg(_p);                                                \
+})   
+#define queue_pgd_pin(_p) ({                                      \
+ printk("PGD PIN %s %d: %08lx/%08lx\n", __FILE__, __LINE__, (_p), \
+       phys_to_machine(_p));                                     \
+ queue_pgd_pin(_p);                                               \
+})   
+#define queue_pgd_unpin(_p) ({                                    \
+ printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
+ queue_pgd_unpin(_p);                                             \
+})   
+#define queue_pte_pin(_p) ({                                      \
+ printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
+ queue_pte_pin(_p);                                               \
+})   
+#define queue_pte_unpin(_p) ({                                    \
+ printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
+ queue_pte_unpin(_p);                                             \
+})   
+#define queue_set_ldt(_p,_l) ({                                        \
+ printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
+ queue_set_ldt((_p), (_l));                                            \
+})   
+#endif
+
+void _flush_page_update_queue(void);
+static inline int flush_page_update_queue(void)
+{
+    unsigned int idx = mmu_update_queue_idx;
+    if ( idx != 0 ) _flush_page_update_queue();
+    return idx;
+}
+#define xen_flush_page_update_queue() (_flush_page_update_queue())
+#define XEN_flush_page_update_queue() (_flush_page_update_queue())
+void MULTICALL_flush_page_update_queue(void);
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+/* Allocate a contiguous empty region of low memory. Return virtual start. */
+unsigned long allocate_empty_lowmem_region(unsigned long pages);
+/* Deallocate a contiguous region of low memory. Return it to the allocator. */
+void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
+#endif
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+static inline int
+HYPERVISOR_set_trap_table(
+    trap_info_t *table)
+{
+    int ret;
+    unsigned long ignore;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ignore)
+       : "0" (__HYPERVISOR_set_trap_table), "1" (table)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+    mmu_update_t *req, int count, int *success_count)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+       : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+         "3" (success_count)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+    unsigned long *frame_list, int entries)
+{
+    int ret;
+    unsigned long ign1, ign2;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+       : "memory" );
+
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+    unsigned long ss, unsigned long esp)
+{
+    int ret;
+    unsigned long ign1, ign2;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+    unsigned long event_selector, unsigned long event_address,
+    unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3, ign4;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+       : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
+         "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+    void)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+    void)
+{
+    int ret;
+    unsigned long ign;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign)
+       : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+    void)
+{
+    int ret;
+    unsigned long ign1;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1)
+       : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+    void)
+{
+    int ret;
+    unsigned long ign1;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1)
+       : "0" (__HYPERVISOR_sched_op),
+         "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+        : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+    void)
+{
+    int ret;
+    unsigned long ign1;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1)
+       : "0" (__HYPERVISOR_sched_op),
+         "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+        : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+    unsigned long srec)
+{
+    int ret;
+    unsigned long ign1, ign2;
+
+    /* NB. On suspend, control software expects a suspend record in %esi. */
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=S" (ign2)
+       : "0" (__HYPERVISOR_sched_op),
+        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
+        "S" (srec) : "memory");
+
+    return ret;
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+    u64 timeout)
+{
+    int ret;
+    unsigned long timeout_hi = (unsigned long)(timeout>>32);
+    unsigned long timeout_lo = (unsigned long)timeout;
+    unsigned long ign1, ign2;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
+       : "memory");
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+    dom0_op_t *dom0_op)
+{
+    int ret;
+    unsigned long ign1;
+
+    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1)
+       : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
+       : "memory");
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+    int reg, unsigned long value)
+{
+    int ret;
+    unsigned long ign1, ign2;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
+       : "memory" );
+
+    return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+    int reg)
+{
+    unsigned long ret;
+    unsigned long ign;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign)
+       : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+    unsigned long ma, unsigned long word1, unsigned long word2)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+       : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
+         "3" (word2)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_set_fast_trap(
+    int idx)
+{
+    int ret;
+    unsigned long ign;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign)
+       : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+    unsigned int op, unsigned long *extent_list,
+    unsigned long nr_extents, unsigned int extent_order)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3, ign4, ign5;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
+         "=D" (ign5)
+       : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
+         "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
+        : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_multicall(
+    void *call_list, int nr_calls)
+{
+    int ret;
+    unsigned long ign1, ign2;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+    unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+       : "0" (__HYPERVISOR_update_va_mapping), 
+          "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
+       : "memory" );
+
+    if ( unlikely(ret < 0) )
+    {
+        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
+               page_nr, (new_val).pte_low, flags);
+        BUG();
+    }
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+    void *op)
+{
+    int ret;
+    unsigned long ignore;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ignore)
+       : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_xen_version(
+    int cmd)
+{
+    int ret;
+    unsigned long ignore;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ignore)
+       : "0" (__HYPERVISOR_xen_version), "1" (cmd)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_console_io(
+    int cmd, int count, char *str)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+       : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+    void *physdev_op)
+{
+    int ret;
+    unsigned long ign;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign)
+       : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+    unsigned int cmd, void *uop, unsigned int count)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+       : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
+       : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+    unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3, ign4;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+       : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
+          "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
+        "memory" );
+    
+    return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+    unsigned int cmd, unsigned int type)
+{
+    int ret;
+    unsigned long ign1, ign2;
+
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
+       : "memory" );
+
+    return ret;
+}
+
+#endif /* __HYPERVISOR_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/multicall.h b/linux-2.6.9-xen-sparse/include/asm-xen/multicall.h
new file mode 100644 (file)
index 0000000..ca169b5
--- /dev/null
@@ -0,0 +1,107 @@
+/******************************************************************************
+ * multicall.h
+ * 
+ * Copyright (c) 2003-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __MULTICALL_H__
+#define __MULTICALL_H__
+
+#include <asm-xen/hypervisor.h>
+
+extern multicall_entry_t multicall_list[];
+extern int nr_multicall_ents;
+
+static inline void queue_multicall0(unsigned long op)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall1(unsigned long op, unsigned long arg1)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall2(
+    unsigned long op, unsigned long arg1, unsigned long arg2)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall3(
+    unsigned long op, unsigned long arg1, unsigned long arg2,
+    unsigned long arg3)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    multicall_list[i].args[2] = arg3;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall4(
+    unsigned long op, unsigned long arg1, unsigned long arg2,
+    unsigned long arg3, unsigned long arg4)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    multicall_list[i].args[2] = arg3;
+    multicall_list[i].args[3] = arg4;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall5(
+    unsigned long op, unsigned long arg1, unsigned long arg2,
+    unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    multicall_list[i].args[2] = arg3;
+    multicall_list[i].args[3] = arg4;
+    multicall_list[i].args[4] = arg5;
+    nr_multicall_ents = i+1;
+}
+
+static inline void execute_multicall_list(void)
+{
+    if ( unlikely(nr_multicall_ents == 0) ) return;
+    (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
+    nr_multicall_ents = 0;
+}
+
+#endif /* __MULTICALL_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/proc_cmd.h b/linux-2.6.9-xen-sparse/include/asm-xen/proc_cmd.h
new file mode 100644 (file)
index 0000000..4292427
--- /dev/null
@@ -0,0 +1,65 @@
+/******************************************************************************
+ * proc_cmd.h
+ * 
+ * Interface to /proc/cmd and /proc/xen/privcmd.
+ */
+
+#ifndef __PROC_CMD_H__
+#define __PROC_CMD_H__
+
+typedef struct privcmd_hypercall
+{
+    unsigned long op;
+    unsigned long arg[5];
+} privcmd_hypercall_t;
+
+typedef struct privcmd_mmap_entry {
+    unsigned long va;
+    unsigned long mfn;
+    unsigned long npages;
+} privcmd_mmap_entry_t; 
+
+typedef struct privcmd_mmap {
+    int num;
+    domid_t dom; /* target domain */
+    privcmd_mmap_entry_t *entry;
+} privcmd_mmap_t; 
+
+typedef struct privcmd_mmapbatch {
+    int num;     // number of pages to populate
+    domid_t dom; // target domain 
+    unsigned long addr;  // virtual address
+    unsigned long *arr; // array of mfns - top nibble set on err
+} privcmd_mmapbatch_t; 
+
+typedef struct privcmd_blkmsg
+{
+    unsigned long op;
+    void         *buf;
+    int           buf_size;
+} privcmd_blkmsg_t;
+
+/*
+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
+ * @arg: &privcmd_hypercall_t
+ * Return: Value returned from execution of the specified hypercall.
+ */
+#define IOCTL_PRIVCMD_HYPERCALL         \
+    _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
+
+/*
+ * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
+ * @arg: n/a
+ * Return: Port associated with domain-controller end of control event channel
+ *         for the initial domain.
+ */
+#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
+    _IOC(_IOC_NONE, 'P', 1, 0)
+#define IOCTL_PRIVCMD_MMAP             \
+    _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
+#define IOCTL_PRIVCMD_MMAPBATCH             \
+    _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
+#define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN \
+    _IOC(_IOC_READ, 'P', 4, sizeof(unsigned long))
+
+#endif /* __PROC_CMD_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/queues.h b/linux-2.6.9-xen-sparse/include/asm-xen/queues.h
new file mode 100644 (file)
index 0000000..eb17e33
--- /dev/null
@@ -0,0 +1,81 @@
+
+/*
+ * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
+ * queues. Unfortunately the semantics is not the same. With task queues we 
+ * can defer work until a particular event occurs -- this is not
+ * straightforwardly done with work queues (queued work is performed asap, or
+ * after some fixed timeout). Conversely, work queues are a (slightly) neater
+ * way of deferring work to a process context than using task queues in 2.4.
+ * 
+ * This is a bit of a needless reimplementation -- should have just pulled
+ * the code from 2.4, but I tried leveraging work queues to simplify things.
+ * They didn't help. :-(
+ */
+
+#ifndef __QUEUES_H__
+#define __QUEUES_H__
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+struct tq_struct { 
+    void (*fn)(void *);
+    void *arg;
+    struct list_head list;
+    unsigned long pending;
+};
+#define INIT_TQUEUE(_name, _fn, _arg)               \
+    do {                                            \
+        INIT_LIST_HEAD(&(_name)->list);             \
+        (_name)->pending = 0;                       \
+        (_name)->fn = (_fn); (_name)->arg = (_arg); \
+    } while ( 0 )
+#define DECLARE_TQUEUE(_name, _fn, _arg)            \
+    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
+
+typedef struct {
+    struct list_head list;
+    spinlock_t       lock;
+} task_queue;
+#define DECLARE_TASK_QUEUE(_name) \
+    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
+
+static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
+{
+    unsigned long flags;
+    if ( test_and_set_bit(0, &tqe->pending) )
+        return 0;
+    spin_lock_irqsave(&tql->lock, flags);
+    list_add_tail(&tqe->list, &tql->list);
+    spin_unlock_irqrestore(&tql->lock, flags);
+    return 1;
+}
+
+static inline void run_task_queue(task_queue *tql)
+{
+    struct list_head head, *ent;
+    struct tq_struct *tqe;
+    unsigned long flags;
+    void (*fn)(void *);
+    void *arg;
+
+    spin_lock_irqsave(&tql->lock, flags);
+    list_add(&head, &tql->list);
+    list_del_init(&tql->list);
+    spin_unlock_irqrestore(&tql->lock, flags);
+
+    while ( !list_empty(&head) )
+    {
+        ent = head.next;
+        list_del_init(ent);
+        tqe = list_entry(ent, struct tq_struct, list);
+        fn  = tqe->fn;
+        arg = tqe->arg;
+        wmb();
+        tqe->pending = 0;
+        fn(arg);
+    }
+}
+
+#endif /* __QUEUES_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/suspend.h b/linux-2.6.9-xen-sparse/include/asm-xen/suspend.h
new file mode 100644 (file)
index 0000000..f05e1b2
--- /dev/null
@@ -0,0 +1,43 @@
+/******************************************************************************
+ * suspend.h
+ * 
+ * Copyright (c) 2003-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_XEN_SUSPEND_H__
+#define __ASM_XEN_SUSPEND_H__
+
+typedef struct suspend_record_st {
+    /* To be filled in before resume. */
+    start_info_t resume_info;
+    /*
+     * The number of a machine frame containing, in sequence, the number of
+     * each machine frame that contains PFN -> MFN translation table data.
+     */
+    unsigned long pfn_to_mfn_frame_list;
+    /* Number of entries in the PFN -> MFN translation table. */
+    unsigned long nr_pfns;
+} suspend_record_t;
+
+#endif /* __ASM_XEN_SUSPEND_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/asm-xen/xen_proc.h b/linux-2.6.9-xen-sparse/include/asm-xen/xen_proc.h
new file mode 100644 (file)
index 0000000..d62791e
--- /dev/null
@@ -0,0 +1,13 @@
+
+#ifndef __ASM_XEN_PROC_H__
+#define __ASM_XEN_PROC_H__
+
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+extern struct proc_dir_entry *create_xen_proc_entry(
+    const char *name, mode_t mode);
+extern void remove_xen_proc_entry(
+    const char *name);
+
+#endif /* __ASM_XEN_PROC_H__ */
diff --git a/linux-2.6.9-xen-sparse/include/linux/bio.h b/linux-2.6.9-xen-sparse/include/linux/bio.h
new file mode 100644 (file)
index 0000000..1f2f504
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * 2.5 block I/O model
+ *
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ */
+#ifndef __LINUX_BIO_H
+#define __LINUX_BIO_H
+
+#include <linux/highmem.h>
+#include <linux/mempool.h>
+
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+
+#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
+#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
+#define BIOVEC_VIRT_OVERSIZE(x)        ((x) > BIO_VMERGE_MAX_SIZE)
+#else
+#define BIOVEC_VIRT_START_SIZE(x)      0
+#define BIOVEC_VIRT_OVERSIZE(x)                0
+#endif
+
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY    0
+#endif
+
+#define BIO_DEBUG
+
+#ifdef BIO_DEBUG
+#define BIO_BUG_ON     BUG_ON
+#else
+#define BIO_BUG_ON
+#endif
+
+#define BIO_MAX_PAGES          (256)
+#define BIO_MAX_SIZE           (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SECTORS                (BIO_MAX_SIZE >> 9)
+
+/*
+ * was unsigned short, but we might as well be ready for > 64kB I/O pages
+ */
+struct bio_vec {
+       struct page     *bv_page;
+       unsigned int    bv_len;
+       unsigned int    bv_offset;
+};
+
+struct bio;
+typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
+typedef void (bio_destructor_t) (struct bio *);
+
+/*
+ * main unit of I/O for the block layer and lower layers (ie drivers and
+ * stacking drivers)
+ */
+struct bio {
+       sector_t                bi_sector;
+       struct bio              *bi_next;       /* request queue link */
+       struct block_device     *bi_bdev;
+       unsigned long           bi_flags;       /* status, command, etc */
+       unsigned long           bi_rw;          /* bottom bits READ/WRITE,
+                                                * top bits priority
+                                                */
+
+       unsigned short          bi_vcnt;        /* how many bio_vec's */
+       unsigned short          bi_idx;         /* current index into bvl_vec */
+
+       /* Number of segments in this BIO after
+        * physical address coalescing is performed.
+        */
+       unsigned short          bi_phys_segments;
+
+       /* Number of segments after physical and DMA remapping
+        * hardware coalescing is performed.
+        */
+       unsigned short          bi_hw_segments;
+
+       unsigned int            bi_size;        /* residual I/O count */
+
+       /*
+        * To keep track of the max hw size, we account for the
+        * sizes of the first and last virtually mergeable segments
+        * in this bio
+        */
+       unsigned int            bi_hw_front_size;
+       unsigned int            bi_hw_back_size;
+
+       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
+
+       struct bio_vec          *bi_io_vec;     /* the actual vec list */
+
+       bio_end_io_t            *bi_end_io;
+       atomic_t                bi_cnt;         /* pin count */
+
+       void                    *bi_private;
+
+       bio_destructor_t        *bi_destructor; /* destructor */
+};
+
+/*
+ * bio flags
+ */
+#define BIO_UPTODATE   0       /* ok after I/O completion */
+#define BIO_RW_BLOCK   1       /* RW_AHEAD set, and read/write would block */
+#define BIO_EOF                2       /* out-out-bounds error */
+#define BIO_SEG_VALID  3       /* nr_hw_seg valid */
+#define BIO_CLONED     4       /* doesn't own data */
+#define BIO_BOUNCED    5       /* bio is a bounce bio */
+#define BIO_USER_MAPPED 6      /* contains user pages */
+#define BIO_EOPNOTSUPP 7       /* not supported */
+#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
+
+/*
+ * top 4 bits of bio flags indicate the pool this bio came from
+ */
+#define BIO_POOL_BITS          (4)
+#define BIO_POOL_OFFSET                (BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_MASK          (1UL << BIO_POOL_OFFSET)
+#define BIO_POOL_IDX(bio)      ((bio)->bi_flags >> BIO_POOL_OFFSET)    
+
+/*
+ * bio bi_rw flags
+ *
+ * bit 0 -- read (not set) or write (set)
+ * bit 1 -- rw-ahead when set
+ * bit 2 -- barrier
+ * bit 3 -- fail fast, don't want low level driver retries
+ * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
+ */
+#define BIO_RW         0
+#define BIO_RW_AHEAD   1
+#define BIO_RW_BARRIER 2
+#define BIO_RW_FAILFAST        3
+#define BIO_RW_SYNC    4
+
+/*
+ * various member access, note that bio_data should of course not be used
+ * on highmem page vectors
+ */
+#define bio_iovec_idx(bio, idx)        (&((bio)->bi_io_vec[(idx)]))
+#define bio_iovec(bio)         bio_iovec_idx((bio), (bio)->bi_idx)
+#define bio_page(bio)          bio_iovec((bio))->bv_page
+#define bio_offset(bio)                bio_iovec((bio))->bv_offset
+#define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
+#define bio_sectors(bio)       ((bio)->bi_size >> 9)
+#define bio_cur_sectors(bio)   (bio_iovec(bio)->bv_len >> 9)
+#define bio_data(bio)          (page_address(bio_page((bio))) + bio_offset((bio)))
+#define bio_barrier(bio)       ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
+#define bio_sync(bio)          ((bio)->bi_rw & (1 << BIO_RW_SYNC))
+#define bio_failfast(bio)      ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
+#define bio_rw_ahead(bio)      ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
+
+/*
+ * will die
+ */
+#define bio_to_phys(bio)       (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
+#define bvec_to_phys(bv)       (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+
+/*
+ * queues that have highmem support enabled may still need to revert to
+ * PIO transfers occasionally and thus map high pages temporarily. For
+ * permanent PIO fall back, user is probably better off disabling highmem
+ * I/O completely on that queue (see ide-dma for example)
+ */
+#define __bio_kmap_atomic(bio, idx, kmtype)                            \
+       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) +    \
+               bio_iovec_idx((bio), (idx))->bv_offset)
+
+#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
+
+/*
+ * merge helpers etc
+ */
+
+#define __BVEC_END(bio)                bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
+#define __BVEC_START(bio)      bio_iovec_idx((bio), (bio)->bi_idx)
+/* Platforms may set this to restrict multi-page buffer merging. */
+#ifndef BIOVEC_PHYS_MERGEABLE
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
+       ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#endif
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2)      \
+       ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
+#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
+       (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
+#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
+       __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+#define BIO_SEG_BOUNDARY(q, b1, b2) \
+       BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
+
+#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
+
+/*
+ * drivers should not use the __ version unless they _really_ want to
+ * run through the entire bio and not just pending pieces
+ */
+#define __bio_for_each_segment(bvl, bio, i, start_idx)                 \
+       for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
+            i < (bio)->bi_vcnt;                                        \
+            bvl++, i++)
+
+#define bio_for_each_segment(bvl, bio, i)                              \
+       __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
+
+/*
+ * get a reference to a bio, so it won't disappear. the intended use is
+ * something like:
+ *
+ * bio_get(bio);
+ * submit_bio(rw, bio);
+ * if (bio->bi_flags ...)
+ *     do_something
+ * bio_put(bio);
+ *
+ * without the bio_get(), it could potentially complete I/O before submit_bio
+ * returns. and then bio would be freed memory when if (bio->bi_flags ...)
+ * runs
+ */
+#define bio_get(bio)   atomic_inc(&(bio)->bi_cnt)
+
+
+/*
+ * A bio_pair is used when we need to split a bio.
+ * This can only happen for a bio that refers to just one
+ * page of data, and in the unusual situation when the
+ * page crosses a chunk/device boundary
+ *
+ * The address of the master bio is stored in bio1.bi_private
+ * The address of the pool the pair was allocated from is stored
+ *   in bio2.bi_private
+ */
+struct bio_pair {
+       struct bio      bio1, bio2;
+       struct bio_vec  bv1, bv2;
+       atomic_t        cnt;
+       int             error;
+};
+extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
+                                 int first_sectors);
+extern mempool_t *bio_split_pool;
+extern void bio_pair_release(struct bio_pair *dbio);
+
+extern struct bio *bio_alloc(int, int);
+extern void bio_put(struct bio *);
+
+extern void bio_endio(struct bio *, unsigned int, int);
+struct request_queue;
+extern int bio_phys_segments(struct request_queue *, struct bio *);
+extern int bio_hw_segments(struct request_queue *, struct bio *);
+
+extern void __bio_clone(struct bio *, struct bio *);
+extern struct bio *bio_clone(struct bio *, int);
+
+extern void bio_init(struct bio *);
+
+extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+extern int bio_get_nr_vecs(struct block_device *);
+extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
+                               unsigned long, unsigned int, int);
+extern void bio_unmap_user(struct bio *);
+extern void bio_set_pages_dirty(struct bio *bio);
+extern void bio_check_pages_dirty(struct bio *bio);
+extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
+extern int bio_uncopy_user(struct bio *);
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * remember to add offset! and never ever reenable interrupts between a
+ * bvec_kmap_irq and bvec_kunmap_irq!!
+ *
+ * This function MUST be inlined - it plays with the CPU interrupt flags.
+ * Hence the `extern inline'.
+ */
+extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+{
+       unsigned long addr;
+
+       /*
+        * might not be a highmem page, but the preempt/irq count
+        * balancing is a lot nicer this way
+        */
+       local_irq_save(*flags);
+       addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
+
+       BUG_ON(addr & ~PAGE_MASK);
+
+       return (char *) addr + bvec->bv_offset;
+}
+
+extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+{
+       unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
+
+       kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
+       local_irq_restore(*flags);
+}
+
+#else
+#define bvec_kmap_irq(bvec, flags)     (page_address((bvec)->bv_page) + (bvec)->bv_offset)
+#define bvec_kunmap_irq(buf, flags)    do { *(flags) = 0; } while (0)
+#endif
+
+extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+                                  unsigned long *flags)
+{
+       return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+}
+#define __bio_kunmap_irq(buf, flags)   bvec_kunmap_irq(buf, flags)
+
+#define bio_kmap_irq(bio, flags) \
+       __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+#define bio_kunmap_irq(buf,flags)      __bio_kunmap_irq(buf, flags)
+
+#endif /* __LINUX_BIO_H */
diff --git a/linux-2.6.9-xen-sparse/include/linux/page-flags.h b/linux-2.6.9-xen-sparse/include/linux/page-flags.h
new file mode 100644 (file)
index 0000000..9e4cd6d
--- /dev/null
@@ -0,0 +1,338 @@
+/*
+ * Macros for manipulating and testing page->flags
+ */
+
+#ifndef PAGE_FLAGS_H
+#define PAGE_FLAGS_H
+
+#include <linux/percpu.h>
+#include <linux/cache.h>
+#include <asm/pgtable.h>
+
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped out. Some
+ * of them might not even exist (eg empty_bad_page)...
+ *
+ * The PG_private bitflag is set if page->private contains a valid value.
+ *
+ * During disk I/O, PG_locked is used. This bit is set before I/O and
+ * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
+ * waiting for the I/O on this page to complete.
+ *
+ * PG_uptodate tells whether the page's contents is valid.  When a read
+ * completes, the page becomes uptodate, unless a disk I/O error happened.
+ *
+ * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
+ * which is set any time the system accesses that page through the (mapping,
+ * index) hash table.  This referenced bit, together with the referenced bit
+ * in the page tables, is used to manipulate page->age and move the page across
+ * the active, inactive_dirty and inactive_clean lists.
+ *
+ * Note that the referenced bit, the page->lru list_head and the active,
+ * inactive_dirty and inactive_clean lists are protected by the
+ * zone->lru_lock, and *NOT* by the usual PG_locked bit!
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit.  The generic code
+ * guarantees that this bit is cleared for a page when it first is entered into
+ * the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual address
+ * space, they need to be kmapped separately for doing IO on the pages.  The
+ * struct page (these bits with information) are always mapped into kernel
+ * address space...
+ */
+
+/*
+ * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
+ * locked- and dirty-page accounting.  The top eight bits of page->flags are
+ * used for page->zone, so putting flag bits there doesn't work.
+ */
+#define PG_locked               0      /* Page is locked. Don't touch. */
+#define PG_error                1
+#define PG_referenced           2
+#define PG_uptodate             3
+
+#define PG_dirty                4
+#define PG_lru                  5
+#define PG_active               6
+#define PG_slab                         7      /* slab debug (Suparna wants this) */
+
+#define PG_highmem              8
+#define PG_checked              9      /* kill me in 2.5.<early>. */
+#define PG_arch_1              10
+#define PG_reserved            11
+
+#define PG_private             12      /* Has something at ->private */
+#define PG_writeback           13      /* Page is under writeback */
+#define PG_nosave              14      /* Used for system suspend/resume */
+#define PG_compound            15      /* Part of a compound page */
+
+#define PG_swapcache           16      /* Swap page: swp_entry_t in private */
+#define PG_mappedtodisk                17      /* Has blocks allocated on-disk */
+#define PG_reclaim             18      /* To be reclaimed asap */
+
+#define PG_foreign             21      /* Page belongs to foreign allocator */
+
+
+/*
+ * Global page accounting.  One instance per CPU.  Only unsigned longs are
+ * allowed.
+ */
+struct page_state {
+       unsigned long nr_dirty;         /* Dirty writeable pages */
+       unsigned long nr_writeback;     /* Pages under writeback */
+       unsigned long nr_unstable;      /* NFS unstable pages */
+       unsigned long nr_page_table_pages;/* Pages used for pagetables */
+       unsigned long nr_mapped;        /* mapped into pagetables */
+       unsigned long nr_slab;          /* In slab */
+#define GET_PAGE_STATE_LAST nr_slab
+
+       /*
+        * The below are zeroed by get_page_state().  Use get_full_page_state()
+        * to add up all these.
+        */
+       unsigned long pgpgin;           /* Disk reads */
+       unsigned long pgpgout;          /* Disk writes */
+       unsigned long pswpin;           /* swap reads */
+       unsigned long pswpout;          /* swap writes */
+       unsigned long pgalloc_high;     /* page allocations */
+
+       unsigned long pgalloc_normal;
+       unsigned long pgalloc_dma;
+       unsigned long pgfree;           /* page freeings */
+       unsigned long pgactivate;       /* pages moved inactive->active */
+       unsigned long pgdeactivate;     /* pages moved active->inactive */
+
+       unsigned long pgfault;          /* faults (major+minor) */
+       unsigned long pgmajfault;       /* faults (major only) */
+       unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
+       unsigned long pgrefill_normal;
+       unsigned long pgrefill_dma;
+
+       unsigned long pgsteal_high;     /* total highmem pages reclaimed */
+       unsigned long pgsteal_normal;
+       unsigned long pgsteal_dma;
+       unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
+       unsigned long pgscan_kswapd_normal;
+
+       unsigned long pgscan_kswapd_dma;
+       unsigned long pgscan_direct_high;/* total highmem pages scanned */
+       unsigned long pgscan_direct_normal;
+       unsigned long pgscan_direct_dma;
+       unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
+
+       unsigned long slabs_scanned;    /* slab objects scanned */
+       unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
+       unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
+       unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
+       unsigned long allocstall;       /* direct reclaim calls */
+
+       unsigned long pgrotated;        /* pages rotated to tail of the LRU */
+};
+
+DECLARE_PER_CPU(struct page_state, page_states);
+
+extern void get_page_state(struct page_state *ret);
+extern void get_full_page_state(struct page_state *ret);
+extern unsigned long __read_page_state(unsigned offset);
+
+#define read_page_state(member) \
+       __read_page_state(offsetof(struct page_state, member))
+
+#define mod_page_state(member, delta)                                  \
+       do {                                                            \
+               unsigned long flags;                                    \
+               local_irq_save(flags);                                  \
+               __get_cpu_var(page_states).member += (delta);           \
+               local_irq_restore(flags);                               \
+       } while (0)
+
+
+#define inc_page_state(member) mod_page_state(member, 1UL)
+#define dec_page_state(member) mod_page_state(member, 0UL - 1)
+#define add_page_state(member,delta) mod_page_state(member, (delta))
+#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
+
+#define mod_page_state_zone(zone, member, delta)                       \
+       do {                                                            \
+               unsigned long flags;                                    \
+               local_irq_save(flags);                                  \
+               if (is_highmem(zone))                                   \
+                       __get_cpu_var(page_states).member##_high += (delta);\
+               else if (is_normal(zone))                               \
+                       __get_cpu_var(page_states).member##_normal += (delta);\
+               else                                                    \
+                       __get_cpu_var(page_states).member##_dma += (delta);\
+               local_irq_restore(flags);                               \
+       } while (0)
+
+/*
+ * Manipulation of page state flags
+ */
+#define PageLocked(page)               \
+               test_bit(PG_locked, &(page)->flags)
+#define SetPageLocked(page)            \
+               set_bit(PG_locked, &(page)->flags)
+#define TestSetPageLocked(page)                \
+               test_and_set_bit(PG_locked, &(page)->flags)
+#define ClearPageLocked(page)          \
+               clear_bit(PG_locked, &(page)->flags)
+#define TestClearPageLocked(page)      \
+               test_and_clear_bit(PG_locked, &(page)->flags)
+
+#define PageError(page)                test_bit(PG_error, &(page)->flags)
+#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
+
+#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page)        set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page)      clear_bit(PG_referenced, &(page)->flags)
+#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
+
+#define PageUptodate(page)     test_bit(PG_uptodate, &(page)->flags)
+#ifndef SetPageUptodate
+#define SetPageUptodate(page)  set_bit(PG_uptodate, &(page)->flags)
+#endif
+#define ClearPageUptodate(page)        clear_bit(PG_uptodate, &(page)->flags)
+
+#define PageDirty(page)                test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
+#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
+#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
+
+#define SetPageLRU(page)       set_bit(PG_lru, &(page)->flags)
+#define PageLRU(page)          test_bit(PG_lru, &(page)->flags)
+#define TestSetPageLRU(page)   test_and_set_bit(PG_lru, &(page)->flags)
+#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+
+#define PageActive(page)       test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page)    set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page)  clear_bit(PG_active, &(page)->flags)
+#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
+#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
+
+#define PageSlab(page)         test_bit(PG_slab, &(page)->flags)
+#define SetPageSlab(page)      set_bit(PG_slab, &(page)->flags)
+#define ClearPageSlab(page)    clear_bit(PG_slab, &(page)->flags)
+#define TestClearPageSlab(page)        test_and_clear_bit(PG_slab, &(page)->flags)
+#define TestSetPageSlab(page)  test_and_set_bit(PG_slab, &(page)->flags)
+
+#ifdef CONFIG_HIGHMEM
+#define PageHighMem(page)      test_bit(PG_highmem, &(page)->flags)
+#else
+#define PageHighMem(page)      0 /* needed to optimize away at compile time */
+#endif
+
+#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+
+#define PageReserved(page)     test_bit(PG_reserved, &(page)->flags)
+#define SetPageReserved(page)  set_bit(PG_reserved, &(page)->flags)
+#define ClearPageReserved(page)        clear_bit(PG_reserved, &(page)->flags)
+#define __ClearPageReserved(page)      __clear_bit(PG_reserved, &(page)->flags)
+
+#define SetPagePrivate(page)   set_bit(PG_private, &(page)->flags)
+#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
+#define PagePrivate(page)      test_bit(PG_private, &(page)->flags)
+
+#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
+#define SetPageWriteback(page)                                         \
+       do {                                                            \
+               if (!test_and_set_bit(PG_writeback,                     \
+                               &(page)->flags))                        \
+                       inc_page_state(nr_writeback);                   \
+       } while (0)
+#define TestSetPageWriteback(page)                                     \
+       ({                                                              \
+               int ret;                                                \
+               ret = test_and_set_bit(PG_writeback,                    \
+                                       &(page)->flags);                \
+               if (!ret)                                               \
+                       inc_page_state(nr_writeback);                   \
+               ret;                                                    \
+       })
+#define ClearPageWriteback(page)                                       \
+       do {                                                            \
+               if (test_and_clear_bit(PG_writeback,                    \
+                               &(page)->flags))                        \
+                       dec_page_state(nr_writeback);                   \
+       } while (0)
+#define TestClearPageWriteback(page)                                   \
+       ({                                                              \
+               int ret;                                                \
+               ret = test_and_clear_bit(PG_writeback,                  \
+                               &(page)->flags);                        \
+               if (ret)                                                \
+                       dec_page_state(nr_writeback);                   \
+               ret;                                                    \
+       })
+
+#define PageNosave(page)       test_bit(PG_nosave, &(page)->flags)
+#define SetPageNosave(page)    set_bit(PG_nosave, &(page)->flags)
+#define TestSetPageNosave(page)        test_and_set_bit(PG_nosave, &(page)->flags)
+#define ClearPageNosave(page)          clear_bit(PG_nosave, &(page)->flags)
+#define TestClearPageNosave(page)      test_and_clear_bit(PG_nosave, &(page)->flags)
+
+#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
+#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
+#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
+
+#define PageReclaim(page)      test_bit(PG_reclaim, &(page)->flags)
+#define SetPageReclaim(page)   set_bit(PG_reclaim, &(page)->flags)
+#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
+#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+
+#define PageCompound(page)     test_bit(PG_compound, &(page)->flags)
+#define SetPageCompound(page)  set_bit(PG_compound, &(page)->flags)
+#define ClearPageCompound(page)        clear_bit(PG_compound, &(page)->flags)
+
+/* A foreign page uses a custom destructor rather than the buddy allocator. */
+#ifdef CONFIG_FOREIGN_PAGES
+#define PageForeign(page)      test_bit(PG_foreign, &(page)->flags)
+#define SetPageForeign(page, dtor) do {                \
+       set_bit(PG_foreign, &(page)->flags);    \
+       (page)->mapping = (void *)dtor;         \
+} while (0)
+#define ClearPageForeign(page) do {            \
+       clear_bit(PG_foreign, &(page)->flags);  \
+       (page)->mapping = NULL;                 \
+} while (0)
+#define PageForeignDestructor(page)    \
+       ( (void (*) (struct page *)) (page)->mapping )
+#else
+#define PageForeign(page)      0
+#define PageForeignDestructor(page)    void
+#endif
+
+#ifdef CONFIG_SWAP
+#define PageSwapCache(page)    test_bit(PG_swapcache, &(page)->flags)
+#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
+#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
+#else
+#define PageSwapCache(page)    0
+#endif
+
+struct page;   /* forward declaration */
+
+int test_clear_page_dirty(struct page *page);
+int __clear_page_dirty(struct page *page);
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
+
+static inline void clear_page_dirty(struct page *page)
+{
+       test_clear_page_dirty(page);
+}
+
+static inline void set_page_writeback(struct page *page)
+{
+       test_set_page_writeback(page);
+}
+
+#endif /* PAGE_FLAGS_H */
diff --git a/linux-2.6.9-xen-sparse/include/linux/skbuff.h b/linux-2.6.9-xen-sparse/include/linux/skbuff.h
new file mode 100644 (file)
index 0000000..57a2843
--- /dev/null
@@ -0,0 +1,1189 @@
+/*
+ *     Definitions for the 'struct sk_buff' memory handlers.
+ *
+ *     Authors:
+ *             Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *             Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/cache.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/poll.h>
+#include <linux/net.h>
+#include <net/checksum.h>
+
+#define HAVE_ALLOC_SKB         /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB     /* Ditto 8)                */
+#define SLAB_SKB               /* Slabified skbuffs       */
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+#define SKB_DATA_ALIGN(X)      (((X) + (SMP_CACHE_BYTES - 1)) & \
+                                ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_ORDER(X, ORDER)        (((PAGE_SIZE << (ORDER)) - (X) - \
+                                 sizeof(struct skb_shared_info)) & \
+                                 ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_HEAD(X)                (SKB_MAX_ORDER((X), 0))
+#define SKB_MAX_ALLOC          (SKB_MAX_ORDER(0, 2))
+
+/* A. Checksumming of received packets by device.
+ *
+ *     NONE: device failed to checksum this packet.
+ *             skb->csum is undefined.
+ *
+ *     UNNECESSARY: device parsed packet and wouldbe verified checksum.
+ *             skb->csum is undefined.
+ *           It is bad option, but, unfortunately, many of vendors do this.
+ *           Apparently with secret goal to sell you new device, when you
+ *           will add new protocol to your host. F.e. IPv6. 8)
+ *
+ *     HW: the most generic way. Device supplied checksum of _all_
+ *         the packet as seen by netif_rx in skb->csum.
+ *         NOTE: Even if device supports only some protocols, but
+ *         is able to produce some skb->csum, it MUST use HW,
+ *         not UNNECESSARY.
+ *
+ * B. Checksumming on output.
+ *
+ *     NONE: skb is checksummed by protocol or csum is not required.
+ *
+ *     HW: device is required to csum packet as seen by hard_start_xmit
+ *     from skb->h.raw to the end and to record the checksum
+ *     at skb->h.raw+skb->csum.
+ *
+ *     Device must show its capabilities in dev->features, set
+ *     at device setup time.
+ *     NETIF_F_HW_CSUM - it is clever device, it is able to checksum
+ *                       everything.
+ *     NETIF_F_NO_CSUM - loopback or reliable single hop media.
+ *     NETIF_F_IP_CSUM - device is dumb. It is able to csum only
+ *                       TCP/UDP over IPv4. Sigh. Vendors like this
+ *                       way by an unknown reason. Though, see comment above
+ *                       about CHECKSUM_UNNECESSARY. 8)
+ *
+ *     Any questions? No questions, good.              --ANK
+ */
+
+#ifdef __i386__
+#define NET_CALLER(arg) (*(((void **)&arg) - 1))
+#else
+#define NET_CALLER(arg) __builtin_return_address(0)
+#endif
+
+struct net_device;
+
+#ifdef CONFIG_NETFILTER
+struct nf_conntrack {
+       atomic_t use;
+       void (*destroy)(struct nf_conntrack *);
+};
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+struct nf_bridge_info {
+       atomic_t use;
+       struct net_device *physindev;
+       struct net_device *physoutdev;
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+       struct net_device *netoutdev;
+#endif
+       unsigned int mask;
+       unsigned long data[32 / sizeof(unsigned long)];
+};
+#endif
+
+#endif
+
+struct sk_buff_head {
+       /* These two members must be first. */
+       struct sk_buff  *next;
+       struct sk_buff  *prev;
+
+       __u32           qlen;
+       spinlock_t      lock;
+};
+
+struct sk_buff;
+
+/* To allow 64K frame to be packed as single skb without frag_list */
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+
+typedef struct skb_frag_struct skb_frag_t;
+
+struct skb_frag_struct {
+       struct page *page;
+       __u16 page_offset;
+       __u16 size;
+};
+
+/* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+struct skb_shared_info {
+       atomic_t        dataref;
+       unsigned int    nr_frags;
+       unsigned short  tso_size;
+       unsigned short  tso_segs;
+       struct sk_buff  *frag_list;
+       skb_frag_t      frags[MAX_SKB_FRAGS];
+};
+
+/** 
+ *     struct sk_buff - socket buffer
+ *     @next: Next buffer in list
+ *     @prev: Previous buffer in list
+ *     @list: List we are on
+ *     @sk: Socket we are owned by
+ *     @stamp: Time we arrived
+ *     @dev: Device we arrived on/are leaving by
+ *     @input_dev: Device we arrived on
+ *      @real_dev: The real device we are using
+ *     @h: Transport layer header
+ *     @nh: Network layer header
+ *     @mac: Link layer header
+ *     @dst: FIXME: Describe this field
+ *     @cb: Control buffer. Free for use by every layer. Put private vars here
+ *     @len: Length of actual data
+ *     @data_len: Data length
+ *     @mac_len: Length of link layer header
+ *     @csum: Checksum
+ *     @__unused: Dead field, may be reused
+ *     @cloned: Head may be cloned (check refcnt to be sure)
+ *     @pkt_type: Packet class
+ *     @ip_summed: Driver fed us an IP checksum
+ *     @priority: Packet queueing priority
+ *     @users: User count - see {datagram,tcp}.c
+ *     @protocol: Packet protocol from driver
+ *     @security: Security level of packet
+ *     @truesize: Buffer size 
+ *     @head: Head of buffer
+ *     @data: Data head pointer
+ *     @tail: Tail pointer
+ *     @end: End pointer
+ *     @destructor: Destruct function
+ *     @nfmark: Can be used for communication between hooks
+ *     @nfcache: Cache info
+ *     @nfct: Associated connection, if any
+ *     @nfctinfo: Relationship of this skb to the connection
+ *     @nf_debug: Netfilter debugging
+ *     @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
+ *      @private: Data which is private to the HIPPI implementation
+ *     @tc_index: Traffic control index
+ */
+
+struct sk_buff {
+       /* These two members must be first. */
+       struct sk_buff          *next;
+       struct sk_buff          *prev;
+
+       struct sk_buff_head     *list;
+       struct sock             *sk;
+       struct timeval          stamp;
+       struct net_device       *dev;
+       struct net_device       *input_dev;
+       struct net_device       *real_dev;
+
+       union {
+               struct tcphdr   *th;
+               struct udphdr   *uh;
+               struct icmphdr  *icmph;
+               struct igmphdr  *igmph;
+               struct iphdr    *ipiph;
+               struct ipv6hdr  *ipv6h;
+               unsigned char   *raw;
+       } h;
+
+       union {
+               struct iphdr    *iph;
+               struct ipv6hdr  *ipv6h;
+               struct arphdr   *arph;
+               unsigned char   *raw;
+       } nh;
+
+       union {
+               unsigned char   *raw;
+       } mac;
+
+       struct  dst_entry       *dst;
+       struct  sec_path        *sp;
+
+       /*
+        * This is the control buffer. It is free to use for every
+        * layer. Please put your private variables there. If you
+        * want to keep them across layers you have to do a skb_clone()
+        * first. This is owned by whoever has the skb queued ATM.
+        */
+       char                    cb[40];
+
+       unsigned int            len,
+                               data_len,
+                               mac_len,
+                               csum;
+       unsigned char           local_df,
+                               cloned,
+                               pkt_type,
+                               ip_summed;
+       __u32                   priority;
+       unsigned short          protocol,
+                               security;
+
+       void                    (*destructor)(struct sk_buff *skb);
+#ifdef CONFIG_NETFILTER
+        unsigned long          nfmark;
+       __u32                   nfcache;
+       __u32                   nfctinfo;
+       struct nf_conntrack     *nfct;
+#ifdef CONFIG_NETFILTER_DEBUG
+        unsigned int           nf_debug;
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       struct nf_bridge_info   *nf_bridge;
+#endif
+#endif /* CONFIG_NETFILTER */
+#if defined(CONFIG_HIPPI)
+       union {
+               __u32           ifield;
+       } private;
+#endif
+#ifdef CONFIG_NET_SCHED
+       __u32                   tc_index;        /* traffic control index */
+#ifdef CONFIG_NET_CLS_ACT
+       __u32           tc_verd;               /* traffic control verdict */
+       __u32           tc_classid;            /* traffic control classid */
+#endif
+
+#endif
+
+
+       /* These elements must be at the end, see alloc_skb() for details.  */
+       unsigned int            truesize;
+       atomic_t                users;
+       unsigned char           *head,
+                               *data,
+                               *tail,
+                               *end;
+};
+
+#ifdef __KERNEL__
+/*
+ *     Handling routines are only of interest to the kernel
+ */
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+extern void           __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff *alloc_skb(unsigned int size, int priority);
+extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+                                           unsigned int size, int priority);
+extern void           kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
+extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
+extern int            pskb_expand_head(struct sk_buff *skb,
+                                       int nhead, int ntail, int gfp_mask);
+extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+                                           unsigned int headroom);
+extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+                                      int newheadroom, int newtailroom,
+                                      int priority);
+extern struct sk_buff *                skb_pad(struct sk_buff *skb, int pad);
+#define dev_kfree_skb(a)       kfree_skb(a)
+extern void          skb_over_panic(struct sk_buff *skb, int len,
+                                    void *here);
+extern void          skb_under_panic(struct sk_buff *skb, int len,
+                                     void *here);
+
+/* Internal */
+#define skb_shinfo(SKB)                ((struct skb_shared_info *)((SKB)->end))
+
+/**
+ *     skb_queue_empty - check if a queue is empty
+ *     @list: queue head
+ *
+ *     Returns true if the queue is empty, false otherwise.
+ */
+static inline int skb_queue_empty(const struct sk_buff_head *list)
+{
+       return list->next == (struct sk_buff *)list;
+}
+
+/**
+ *     skb_get - reference buffer
+ *     @skb: buffer to reference
+ *
+ *     Makes another reference to a socket buffer and returns a pointer
+ *     to the buffer.
+ */
+static inline struct sk_buff *skb_get(struct sk_buff *skb)
+{
+       atomic_inc(&skb->users);
+       return skb;
+}
+
+/*
+ * If users == 1, we are the only owner and are can avoid redundant
+ * atomic change.
+ */
+
+/**
+ *     kfree_skb - free an sk_buff
+ *     @skb: buffer to free
+ *
+ *     Drop a reference to the buffer and free it if the usage count has
+ *     hit zero.
+ */
+static inline void kfree_skb(struct sk_buff *skb)
+{
+       if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
+               __kfree_skb(skb);
+}
+
+/* Use this if you didn't touch the skb state [for fast switching] */
+static inline void kfree_skb_fast(struct sk_buff *skb)
+{
+       if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
+               kfree_skbmem(skb);
+}
+
+/**
+ *     skb_cloned - is the buffer a clone
+ *     @skb: buffer to check
+ *
+ *     Returns true if the buffer was generated with skb_clone() and is
+ *     one of multiple shared copies of the buffer. Cloned buffers are
+ *     shared data so must not be written to under normal circumstances.
+ */
+static inline int skb_cloned(const struct sk_buff *skb)
+{
+       return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
+}
+
+/**
+ *     skb_shared - is the buffer shared
+ *     @skb: buffer to check
+ *
+ *     Returns true if more than one person has a reference to this
+ *     buffer.
+ */
+static inline int skb_shared(const struct sk_buff *skb)
+{
+       return atomic_read(&skb->users) != 1;
+}
+
+/**
+ *     skb_share_check - check if buffer is shared and if so clone it
+ *     @skb: buffer to check
+ *     @pri: priority for memory allocation
+ *
+ *     If the buffer is shared the buffer is cloned and the old copy
+ *     drops a reference. A new clone with a single reference is returned.
+ *     If the buffer is not shared the original buffer is returned. When
+ *     being called from interrupt status or with spinlocks held pri must
+ *     be GFP_ATOMIC.
+ *
+ *     NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+{
+       might_sleep_if(pri & __GFP_WAIT);
+       if (skb_shared(skb)) {
+               struct sk_buff *nskb = skb_clone(skb, pri);
+               kfree_skb(skb);
+               skb = nskb;
+       }
+       return skb;
+}
+
+/*
+ *     Copy shared buffers into a new sk_buff. We effectively do COW on
+ *     packets to handle cases where we have a local reader and forward
+ *     and a couple of other messy ones. The normal one is tcpdumping
+ *     a packet thats being forwarded.
+ */
+
+/**
+ *     skb_unshare - make a copy of a shared buffer
+ *     @skb: buffer to check
+ *     @pri: priority for memory allocation
+ *
+ *     If the socket buffer is a clone then this function creates a new
+ *     copy of the data, drops a reference count on the old copy and returns
+ *     the new copy with the reference count at 1. If the buffer is not a clone
+ *     the original buffer is returned. When called with a spinlock held or
+ *     from interrupt state @pri must be %GFP_ATOMIC
+ *
+ *     %NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+{
+       might_sleep_if(pri & __GFP_WAIT);
+       if (skb_cloned(skb)) {
+               struct sk_buff *nskb = skb_copy(skb, pri);
+               kfree_skb(skb); /* Free our shared copy */
+               skb = nskb;
+       }
+       return skb;
+}
+
+/**
+ *     skb_peek
+ *     @list_: list to peek at
+ *
+ *     Peek an &sk_buff. Unlike most other operations you _MUST_
+ *     be careful with this one. A peek leaves the buffer on the
+ *     list and someone else may run off with it. You must hold
+ *     the appropriate locks or have a private queue to do this.
+ *
+ *     Returns %NULL for an empty list or a pointer to the head element.
+ *     The reference count is not incremented and the reference is therefore
+ *     volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+       struct sk_buff *list = ((struct sk_buff *)list_)->next;
+       if (list == (struct sk_buff *)list_)
+               list = NULL;
+       return list;
+}
+
+/**
+ *     skb_peek_tail
+ *     @list_: list to peek at
+ *
+ *     Peek an &sk_buff. Unlike most other operations you _MUST_
+ *     be careful with this one. A peek leaves the buffer on the
+ *     list and someone else may run off with it. You must hold
+ *     the appropriate locks or have a private queue to do this.
+ *
+ *     Returns %NULL for an empty list or a pointer to the tail element.
+ *     The reference count is not incremented and the reference is therefore
+ *     volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+{
+       struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+       if (list == (struct sk_buff *)list_)
+               list = NULL;
+       return list;
+}
+
+/**
+ *     skb_queue_len   - get queue length
+ *     @list_: list to measure
+ *
+ *     Return the length of an &sk_buff queue.
+ */
+static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
+{
+       return list_->qlen;
+}
+
+static inline void skb_queue_head_init(struct sk_buff_head *list)
+{
+       spin_lock_init(&list->lock);
+       list->prev = list->next = (struct sk_buff *)list;
+       list->qlen = 0;
+}
+
+/*
+ *     Insert an sk_buff at the start of a list.
+ *
+ *     The "__skb_xxxx()" functions are the non-atomic ones that
+ *     can only be called with interrupts disabled.
+ */
+
+/**
+ *     __skb_queue_head - queue a buffer at the list head
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the start of a list. This function takes no locks
+ *     and you must therefore hold required locks before calling it.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_head(struct sk_buff_head *list,
+                                   struct sk_buff *newsk)
+{
+       struct sk_buff *prev, *next;
+
+       newsk->list = list;
+       list->qlen++;
+       prev = (struct sk_buff *)list;
+       next = prev->next;
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+}
+
+/**
+ *     __skb_queue_tail - queue a buffer at the list tail
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the end of a list. This function takes no locks
+ *     and you must therefore hold required locks before calling it.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_tail(struct sk_buff_head *list,
+                                  struct sk_buff *newsk)
+{
+       struct sk_buff *prev, *next;
+
+       newsk->list = list;
+       list->qlen++;
+       next = (struct sk_buff *)list;
+       prev = next->prev;
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+}
+
+
+/**
+ *     __skb_dequeue - remove from the head of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the head of the list. This function does not take any locks
+ *     so must be used with appropriate locks held only. The head item is
+ *     returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+       struct sk_buff *next, *prev, *result;
+
+       prev = (struct sk_buff *) list;
+       next = prev->next;
+       result = NULL;
+       if (next != prev) {
+               result       = next;
+               next         = next->next;
+               list->qlen--;
+               next->prev   = prev;
+               prev->next   = next;
+               result->next = result->prev = NULL;
+               result->list = NULL;
+       }
+       return result;
+}
+
+
+/*
+ *     Insert a packet on a list.
+ */
+extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk);
+static inline void __skb_insert(struct sk_buff *newsk,
+                               struct sk_buff *prev, struct sk_buff *next,
+                               struct sk_buff_head *list)
+{
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+       newsk->list = list;
+       list->qlen++;
+}
+
+/*
+ *     Place a packet after a given packet in a list.
+ */
+extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk);
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+       __skb_insert(newsk, old, old->next, old->list);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern void       skb_unlink(struct sk_buff *skb);
+static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+       struct sk_buff *next, *prev;
+
+       list->qlen--;
+       next       = skb->next;
+       prev       = skb->prev;
+       skb->next  = skb->prev = NULL;
+       skb->list  = NULL;
+       next->prev = prev;
+       prev->next = next;
+}
+
+
+/* XXX: more streamlined implementation */
+
+/**
+ *     __skb_dequeue_tail - remove from the tail of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the tail of the list. This function does not take any locks
+ *     so must be used with appropriate locks held only. The tail item is
+ *     returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+{
+       struct sk_buff *skb = skb_peek_tail(list);
+       if (skb)
+               __skb_unlink(skb, list);
+       return skb;
+}
+
+
+static inline int skb_is_nonlinear(const struct sk_buff *skb)
+{
+       return skb->data_len;
+}
+
+static inline unsigned int skb_headlen(const struct sk_buff *skb)
+{
+       return skb->len - skb->data_len;
+}
+
+static inline int skb_pagelen(const struct sk_buff *skb)
+{
+       int i, len = 0;
+
+       for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
+               len += skb_shinfo(skb)->frags[i].size;
+       return len + skb_headlen(skb);
+}
+
+static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
+                                     struct page *page, int off, int size)
+{
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+       frag->page                = page;
+       frag->page_offset         = off;
+       frag->size                = size;
+       skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+#define SKB_PAGE_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->nr_frags)
+#define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->frag_list)
+#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
+
+/*
+ *     Add data to an sk_buff
+ */
+static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+       unsigned char *tmp = skb->tail;
+       SKB_LINEAR_ASSERT(skb);
+       skb->tail += len;
+       skb->len  += len;
+       return tmp;
+}
+
+/**
+ *     skb_put - add data to a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer. If this would
+ *     exceed the total buffer size the kernel will panic. A pointer to the
+ *     first byte of the extra data is returned.
+ */
+static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+       unsigned char *tmp = skb->tail;
+       SKB_LINEAR_ASSERT(skb);
+       skb->tail += len;
+       skb->len  += len;
+       if (unlikely(skb->tail>skb->end))
+               skb_over_panic(skb, len, current_text_addr());
+       return tmp;
+}
+
+static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+       skb->data -= len;
+       skb->len  += len;
+       return skb->data;
+}
+
+/**
+ *     skb_push - add data to the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer at the buffer
+ *     start. If this would exceed the total buffer headroom the kernel will
+ *     panic. A pointer to the first byte of the extra data is returned.
+ */
+static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+{
+       skb->data -= len;
+       skb->len  += len;
+       if (unlikely(skb->data<skb->head))
+               skb_under_panic(skb, len, current_text_addr());
+       return skb->data;
+}
+
+static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
+{
+       skb->len -= len;
+       BUG_ON(skb->len < skb->data_len);
+       return skb->data += len;
+}
+
+/**
+ *     skb_pull - remove data from the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to remove
+ *
+ *     This function removes data from the start of a buffer, returning
+ *     the memory to the headroom. A pointer to the next data in the buffer
+ *     is returned. Once the data has been pulled future pushes will overwrite
+ *     the old data.
+ */
+static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+{
+       return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
+extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+
+static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+       if (len > skb_headlen(skb) &&
+           !__pskb_pull_tail(skb, len-skb_headlen(skb)))
+               return NULL;
+       skb->len -= len;
+       return skb->data += len;
+}
+
+static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+       return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
+}
+
+static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+       if (likely(len <= skb_headlen(skb)))
+               return 1;
+       if (unlikely(len > skb->len))
+               return 0;
+       return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
+}
+
+/**
+ *     skb_headroom - bytes at buffer head
+ *     @skb: buffer to check
+ *
+ *     Return the number of bytes of free space at the head of an &sk_buff.
+ */
+static inline int skb_headroom(const struct sk_buff *skb)
+{
+       return skb->data - skb->head;
+}
+
+/**
+ *     skb_tailroom - bytes at buffer end
+ *     @skb: buffer to check
+ *
+ *     Return the number of bytes of free space at the tail of an sk_buff
+ */
+static inline int skb_tailroom(const struct sk_buff *skb)
+{
+       return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
+}
+
+/**
+ *     skb_reserve - adjust headroom
+ *     @skb: buffer to alter
+ *     @len: bytes to move
+ *
+ *     Increase the headroom of an empty &sk_buff by reducing the tail
+ *     room. This is only allowed for an empty buffer.
+ */
+static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+{
+       skb->data += len;
+       skb->tail += len;
+}
+
+/*
+ * CPUs often take a performance hit when accessing unaligned memory
+ * locations. The actual performance hit varies, it can be small if the
+ * hardware handles it or large if we have to take an exception and fix it
+ * in software.
+ *
+ * Since an ethernet header is 14 bytes network drivers often end up with
+ * the IP header at an unaligned offset. The IP header can be aligned by
+ * shifting the start of the packet by 2 bytes. Drivers should do this
+ * with:
+ *
+ * skb_reserve(NET_IP_ALIGN);
+ *
+ * The downside to this alignment of the IP header is that the DMA is now
+ * unaligned. On some architectures the cost of an unaligned DMA is high
+ * and this cost outweighs the gains made by aligning the IP header.
+ * 
+ * Since this trade off varies between architectures, we allow NET_IP_ALIGN
+ * to be overridden.
+ */
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN   2
+#endif
+
+extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
+
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (!skb->data_len) {
+               skb->len  = len;
+               skb->tail = skb->data + len;
+       } else
+               ___pskb_trim(skb, len, 0);
+}
+
+/**
+ *     skb_trim - remove end from a buffer
+ *     @skb: buffer to alter
+ *     @len: new length
+ *
+ *     Cut the length of a buffer down by removing data from the tail. If
+ *     the buffer is already under the length specified it is not modified.
+ */
+static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (skb->len > len)
+               __skb_trim(skb, len);
+}
+
+
+static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (!skb->data_len) {
+               skb->len  = len;
+               skb->tail = skb->data+len;
+               return 0;
+       }
+       return ___pskb_trim(skb, len, 1);
+}
+
+static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+       return (len < skb->len) ? __pskb_trim(skb, len) : 0;
+}
+
+/**
+ *     skb_orphan - orphan a buffer
+ *     @skb: buffer to orphan
+ *
+ *     If a buffer currently has an owner then we call the owner's
+ *     destructor function and make the @skb unowned. The buffer continues
+ *     to exist but is no longer charged to its former owner.
+ */
+static inline void skb_orphan(struct sk_buff *skb)
+{
+       if (skb->destructor)
+               skb->destructor(skb);
+       skb->destructor = NULL;
+       skb->sk         = NULL;
+}
+
+/**
+ *     __skb_queue_purge - empty a list
+ *     @list: list to empty
+ *
+ *     Delete all buffers on an &sk_buff list. Each buffer is removed from
+ *     the list and one reference dropped. This function does not take the
+ *     list lock and the caller must hold the relevant locks to use it.
+ */
+extern void skb_queue_purge(struct sk_buff_head *list);
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+       while ((skb = __skb_dequeue(list)) != NULL)
+               kfree_skb(skb);
+}
+
+/**
+ *     __dev_alloc_skb - allocate an skbuff for sending
+ *     @length: length to allocate
+ *     @gfp_mask: get_free_pages mask, passed to alloc_skb
+ *
+ *     Allocate a new &sk_buff and assign it a usage count of one. The
+ *     buffer has unspecified headroom built in. Users should allocate
+ *     the headroom they think they need without accounting for the
+ *     built in space. The built in space is used for optimisations.
+ *
+ *     %NULL is returned in there is no free memory.
+ */
+#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+                                             int gfp_mask)
+{
+       struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
+       if (likely(skb))
+               skb_reserve(skb, 16);
+       return skb;
+}
+#else
+extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
+#endif
+
+/**
+ *     dev_alloc_skb - allocate an skbuff for sending
+ *     @length: length to allocate
+ *
+ *     Allocate a new &sk_buff and assign it a usage count of one. The
+ *     buffer has unspecified headroom built in. Users should allocate
+ *     the headroom they think they need without accounting for the
+ *     built in space. The built in space is used for optimisations.
+ *
+ *     %NULL is returned in there is no free memory. Although this function
+ *     allocates memory it can be called from an interrupt.
+ */
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+       return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+
+/**
+ *     skb_cow - copy header of skb when it is required
+ *     @skb: buffer to cow
+ *     @headroom: needed headroom
+ *
+ *     If the skb passed lacks sufficient headroom or its data part
+ *     is shared, data is reallocated. If reallocation fails, an error
+ *     is returned and original skb is not changed.
+ *
+ *     The result is skb with writable area skb->head...skb->tail
+ *     and at least @headroom of space at head.
+ */
+static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
+{
+       int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
+
+       if (delta < 0)
+               delta = 0;
+
+       if (delta || skb_cloned(skb))
+               return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
+       return 0;
+}
+
+/**
+ *     skb_padto       - pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Returns the buffer, which may be a replacement
+ *     for the original, or NULL for out of memory - in which case
+ *     the original buffer is still freed.
+ */
+static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
+{
+       unsigned int size = skb->len;
+       if (likely(size >= len))
+               return skb;
+       return skb_pad(skb, len-size);
+}
+
+static inline int skb_add_data(struct sk_buff *skb,
+                              char __user *from, int copy)
+{
+       const int off = skb->len;
+
+       if (skb->ip_summed == CHECKSUM_NONE) {
+               int err = 0;
+               unsigned int csum = csum_and_copy_from_user(from,
+                                                           skb_put(skb, copy),
+                                                           copy, 0, &err);
+               if (!err) {
+                       skb->csum = csum_block_add(skb->csum, csum, off);
+                       return 0;
+               }
+       } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+               return 0;
+
+       __skb_trim(skb, off);
+       return -EFAULT;
+}
+
+static inline int skb_can_coalesce(struct sk_buff *skb, int i,
+                                  struct page *page, int off)
+{
+       if (i) {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+               return page == frag->page &&
+                      off == frag->page_offset + frag->size;
+       }
+       return 0;
+}
+
+/**
+ *     skb_linearize - convert paged skb to linear one
+ *     @skb: buffer to linarize
+ *     @gfp: allocation mode
+ *
+ *     If there is no free memory -ENOMEM is returned, otherwise zero
+ *     is returned and the old skb data released.
+ */
+extern int __skb_linearize(struct sk_buff *skb, int gfp);
+static inline int skb_linearize(struct sk_buff *skb, int gfp)
+{
+       return __skb_linearize(skb, gfp);
+}
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+       BUG_ON(in_irq());
+
+       local_bh_disable();
+#endif
+       return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+       kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+       local_bh_enable();
+#endif
+}
+
+#define skb_queue_walk(queue, skb) \
+               for (skb = (queue)->next, prefetch(skb->next);  \
+                    (skb != (struct sk_buff *)(queue));        \
+                    skb = skb->next, prefetch(skb->next))
+
+
+extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+                                        int noblock, int *err);
+extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
+                                    struct poll_table_struct *wait);
+extern int            skb_copy_datagram(const struct sk_buff *from,
+                                        int offset, char __user *to, int size);
+extern int            skb_copy_datagram_iovec(const struct sk_buff *from,
+                                              int offset, struct iovec *to,
+                                              int size);
+extern int            skb_copy_and_csum_datagram(const struct sk_buff *skb,
+                                                 int offset, u8 __user *to,
+                                                 int len, unsigned int *csump);
+extern int            skb_copy_and_csum_datagram_iovec(const
+                                                       struct sk_buff *skb,
+                                                       int hlen,
+                                                       struct iovec *iov);
+extern void           skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+extern unsigned int    skb_checksum(const struct sk_buff *skb, int offset,
+                                   int len, unsigned int csum);
+extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
+                                    void *to, int len);
+extern unsigned int    skb_copy_and_csum_bits(const struct sk_buff *skb,
+                                             int offset, u8 *to, int len,
+                                             unsigned int csum);
+extern void           skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+extern void           skb_split(struct sk_buff *skb,
+                                struct sk_buff *skb1, const u32 len);
+
+static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+                                      int len, void *buffer)
+{
+       int hlen = skb_headlen(skb);
+
+       if (offset + len <= hlen)
+               return skb->data + offset;
+
+       if (skb_copy_bits(skb, offset, buffer, len) < 0)
+               return NULL;
+
+       return buffer;
+}
+
+extern void skb_init(void);
+extern void skb_add_mtu(int mtu);
+
+struct skb_iter {
+       /* Iteration functions set these */
+       unsigned char *data;
+       unsigned int len;
+
+       /* Private to iteration */
+       unsigned int nextfrag;
+       struct sk_buff *fraglist;
+};
+
+/* Keep iterating until skb_iter_next returns false. */
+extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i);
+extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i);
+/* Call this if aborting loop before !skb_iter_next */
+extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i);
+
+#ifdef CONFIG_NETFILTER
+static inline void nf_conntrack_put(struct nf_conntrack *nfct)
+{
+       if (nfct && atomic_dec_and_test(&nfct->use))
+               nfct->destroy(nfct);
+}
+static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+{
+       if (nfct)
+               atomic_inc(&nfct->use);
+}
+static inline void nf_reset(struct sk_buff *skb)
+{
+       nf_conntrack_put(skb->nfct);
+       skb->nfct = NULL;
+#ifdef CONFIG_NETFILTER_DEBUG
+       skb->nf_debug = 0;
+#endif
+}
+static inline void nf_reset_debug(struct sk_buff *skb)
+{
+#ifdef CONFIG_NETFILTER_DEBUG
+       skb->nf_debug = 0;
+#endif
+}
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+{
+       if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
+               kfree(nf_bridge);
+}
+static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+{
+       if (nf_bridge)
+               atomic_inc(&nf_bridge->use);
+}
+#endif /* CONFIG_BRIDGE_NETFILTER */
+#else /* CONFIG_NETFILTER */
+static inline void nf_reset(struct sk_buff *skb) {}
+#endif /* CONFIG_NETFILTER */
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux-2.6.9-xen-sparse/mkbuildtree b/linux-2.6.9-xen-sparse/mkbuildtree
new file mode 100755 (executable)
index 0000000..f932b92
--- /dev/null
@@ -0,0 +1,111 @@
+#!/bin/sh
+
+# mkbuildtree <build tree>
+#
+# Creates symbolic links in <build tree> for the sparse tree
+# in the current directory.
+
+# Script to determine the relative path between two directories.
+# Copyright (c) D. J. Hawkey Jr. 2002
+# Fixed for Xen project by K. Fraser in 2003.  
+abs_to_rel ()
+{
+       local CWD SRCPATH
+                
+       if [ "$1" != "/" -a "${1##*[^/]}" = "/" ]; then
+               SRCPATH=${1%?}
+       else
+               SRCPATH=$1
+       fi
+       if [ "$2" != "/" -a "${2##*[^/]}" = "/" ]; then
+               DESTPATH=${2%?}
+       else
+               DESTPATH=$2
+       fi
+
+       CWD=$PWD
+       [ "${1%%[^/]*}" != "/" ] && cd $1 && SRCPATH=$PWD
+       [ "${2%%[^/]*}" != "/" ] && cd $2 && DESTPATH=$PWD
+       [ "$CWD" != "$PWD" ] && cd $CWD
+
+       BASEPATH=$SRCPATH
+
+       [ "$SRCPATH" = "$DESTPATH" ] && DESTPATH="." && return
+       [ "$SRCPATH" = "/" ] && DESTPATH=${DESTPATH#?} && return
+
+       while [ "$BASEPATH/" != "${DESTPATH%${DESTPATH#$BASEPATH/}}" ]; do
+          BASEPATH=${BASEPATH%/*}
+       done
+
+       SRCPATH=${SRCPATH#$BASEPATH}
+        DESTPATH=${DESTPATH#$BASEPATH}
+        DESTPATH=${DESTPATH#?}
+       while [ -n "$SRCPATH" ]; do
+               SRCPATH=${SRCPATH%/*}
+               DESTPATH="../$DESTPATH"
+       done
+
+       [ -z "$BASEPATH" ] && BASEPATH="/"
+       [ "${DESTPATH##*[^/]}" = "/" ] && DESTPATH=${DESTPATH%?}
+}
+
+# relative_lndir <target_dir>
+# Creates a tree of symlinks in the current working directory that mirror
+# real files in <target_dir>. <target_dir> should be relative to the current
+# working directory. Symlinks in <target_dir> are ignored. Source-control files
+# are ignored.
+relative_lndir ()
+{
+  local SYMLINK_DIR REAL_DIR pref i j
+  SYMLINK_DIR=$PWD
+  REAL_DIR=$1
+  (
+  cd $REAL_DIR
+  for i in `find . -type d | grep -v SCCS`; do
+    [ -d $SYMLINK_DIR/$i ] || mkdir -p $SYMLINK_DIR/$i
+    (
+    cd $i
+    pref=`echo $i | sed -e 's#/[^/]*#../#g' -e 's#^\.##'`
+    for j in `find . -type f -o -type l -maxdepth 1`; do
+      ln -sf ${pref}${REAL_DIR}/$i/$j ${SYMLINK_DIR}/$i/$j
+    done
+    )
+  done
+  )
+}
+
+[ "$1" == "" ] && { echo "Syntax: $0 <linux tree to xenify>"; exit 1; }
+
+# Get absolute path to the destination directory
+pushd . >/dev/null
+cd ${1}
+AD=$PWD
+popd >/dev/null
+  
+# Get absolute path to the source directory
+AS=`pwd`
+
+# Get path to source, relative to destination
+abs_to_rel ${AD} ${AS}
+RS=$DESTPATH
+
+# Remove old copies of files and directories at the destination
+for i in `find . -type f -o -type l` ; do rm -f ${AD}/${i#./} ; done
+
+# We now work from the destination directory
+cd ${AD}
+
+# Remove old symlinks
+for i in `find . -type l`; do rm -f $i; done
+
+# Create symlinks of files and directories which exist in the sparse source
+relative_lndir ${RS}
+rm -f mkbuildtree
+
+
+# Create links to the shared definitions of the hypervisor interface
+rm -rf ${AD}/include/asm-xen/hypervisor-ifs
+mkdir  ${AD}/include/asm-xen/hypervisor-ifs
+cd     ${AD}/include/asm-xen/hypervisor-ifs
+relative_lndir ../../../${RS}/../xen/include/hypervisor-ifs
+
diff --git a/linux-2.6.9-xen-sparse/mm/memory.c b/linux-2.6.9-xen-sparse/mm/memory.c
new file mode 100644 (file)
index 0000000..ffd40c5
--- /dev/null
@@ -0,0 +1,1825 @@
+/*
+ *  linux/mm/memory.c
+ *
+ *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ */
+
+/*
+ * demand-loading started 01.12.91 - seems it is high on the list of
+ * things wanted, and it should be easy to implement. - Linus
+ */
+
+/*
+ * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
+ * pages started 02.12.91, seems to work. - Linus.
+ *
+ * Tested sharing by executing about 30 /bin/sh: under the old kernel it
+ * would have taken more than the 6M I have free, but it worked well as
+ * far as I could see.
+ *
+ * Also corrected some "invalidate()"s - I wasn't doing enough of them.
+ */
+
+/*
+ * Real VM (paging to/from disk) started 18.12.91. Much more work and
+ * thought has to go into this. Oh, well..
+ * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
+ *             Found it. Everything seems to work now.
+ * 20.12.91  -  Ok, making the swap-device changeable like the root.
+ */
+
+/*
+ * 05.04.94  -  Multi-page memory management added for v1.1.
+ *             Idea by Alex Bligh (alex@cconcepts.co.uk)
+ *
+ * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
+ *             (Gerhard.Wichert@pdb.siemens.de)
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+
+#include <linux/swapops.h>
+#include <linux/elf.h>
+
+#ifndef CONFIG_DISCONTIGMEM
+/* use the per-pgdat data instead for discontigmem - mbligh */
+unsigned long max_mapnr;
+struct page *mem_map;
+
+EXPORT_SYMBOL(max_mapnr);
+EXPORT_SYMBOL(mem_map);
+#endif
+
+unsigned long num_physpages;
+/*
+ * A number of key systems in x86 including ioremap() rely on the assumption
+ * that high_memory defines the upper bound on direct map memory, then end
+ * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
+ * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
+ * and ZONE_HIGHMEM.
+ */
+void * high_memory;
+struct page *highmem_start_page;
+unsigned long vmalloc_earlyreserve;
+
+EXPORT_SYMBOL(num_physpages);
+EXPORT_SYMBOL(highmem_start_page);
+EXPORT_SYMBOL(high_memory);
+EXPORT_SYMBOL(vmalloc_earlyreserve);
+
+/*
+ * We special-case the C-O-W ZERO_PAGE, because it's such
+ * a common occurrence (no need to read the page to know
+ * that it's zero - better for the cache and memory subsystem).
+ */
+static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
+{
+       if (from == ZERO_PAGE(address)) {
+               clear_user_highpage(to, address);
+               return;
+       }
+       copy_user_highpage(to, from, address);
+}
+
+/*
+ * Note: this doesn't free the actual pages themselves. That
+ * has been handled earlier when unmapping all the memory regions.
+ */
+static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
+{
+       struct page *page;
+
+       if (pmd_none(*dir))
+               return;
+       if (unlikely(pmd_bad(*dir))) {
+               pmd_ERROR(*dir);
+               pmd_clear(dir);
+               return;
+       }
+       page = pmd_page(*dir);
+       pmd_clear(dir);
+       dec_page_state(nr_page_table_pages);
+       pte_free_tlb(tlb, page);
+}
+
+static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
+{
+       int j;
+       pmd_t * pmd;
+
+       if (pgd_none(*dir))
+               return;
+       if (unlikely(pgd_bad(*dir))) {
+               pgd_ERROR(*dir);
+               pgd_clear(dir);
+               return;
+       }
+       pmd = pmd_offset(dir, 0);
+       pgd_clear(dir);
+       for (j = 0; j < PTRS_PER_PMD ; j++)
+               free_one_pmd(tlb, pmd+j);
+       pmd_free_tlb(tlb, pmd);
+}
+
+/*
+ * This function clears all user-level page tables of a process - this
+ * is needed by execve(), so that old pages aren't in the way.
+ *
+ * Must be called with pagetable lock held.
+ */
+void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
+{
+       pgd_t * page_dir = tlb->mm->pgd;
+
+       page_dir += first;
+       do {
+               free_one_pgd(tlb, page_dir);
+               page_dir++;
+       } while (--nr);
+}
+
+pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+{
+       if (!pmd_present(*pmd)) {
+               struct page *new;
+
+               spin_unlock(&mm->page_table_lock);
+               new = pte_alloc_one(mm, address);
+               spin_lock(&mm->page_table_lock);
+               if (!new)
+                       return NULL;
+
+               /*
+                * Because we dropped the lock, we should re-check the
+                * entry, as somebody else could have populated it..
+                */
+               if (pmd_present(*pmd)) {
+                       pte_free(new);
+                       goto out;
+               }
+               inc_page_state(nr_page_table_pages);
+               pmd_populate(mm, pmd, new);
+       }
+out:
+       return pte_offset_map(pmd, address);
+}
+
+pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+{
+       if (!pmd_present(*pmd)) {
+               pte_t *new;
+
+               spin_unlock(&mm->page_table_lock);
+               new = pte_alloc_one_kernel(mm, address);
+               spin_lock(&mm->page_table_lock);
+               if (!new)
+                       return NULL;
+
+               /*
+                * Because we dropped the lock, we should re-check the
+                * entry, as somebody else could have populated it..
+                */
+               if (pmd_present(*pmd)) {
+                       pte_free_kernel(new);
+                       goto out;
+               }
+               pmd_populate_kernel(mm, pmd, new);
+       }
+out:
+       return pte_offset_kernel(pmd, address);
+}
+#define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
+#define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
+
+/*
+ * copy one vm_area from one task to the other. Assumes the page tables
+ * already present in the new task to be cleared in the whole range
+ * covered by this vma.
+ *
+ * 08Jan98 Merged into one routine from several inline routines to reduce
+ *         variable count and make things faster. -jj
+ *
+ * dst->page_table_lock is held on entry and exit,
+ * but may be dropped within pmd_alloc() and pte_alloc_map().
+ */
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+                       struct vm_area_struct *vma)
+{
+       pgd_t * src_pgd, * dst_pgd;
+       unsigned long address = vma->vm_start;
+       unsigned long end = vma->vm_end;
+       unsigned long cow;
+
+       if (is_vm_hugetlb_page(vma))
+               return copy_hugetlb_page_range(dst, src, vma);
+
+       cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+       src_pgd = pgd_offset(src, address)-1;
+       dst_pgd = pgd_offset(dst, address)-1;
+
+       for (;;) {
+               pmd_t * src_pmd, * dst_pmd;
+
+               src_pgd++; dst_pgd++;
+               
+               /* copy_pmd_range */
+               
+               if (pgd_none(*src_pgd))
+                       goto skip_copy_pmd_range;
+               if (unlikely(pgd_bad(*src_pgd))) {
+                       pgd_ERROR(*src_pgd);
+                       pgd_clear(src_pgd);
+skip_copy_pmd_range:   address = (address + PGDIR_SIZE) & PGDIR_MASK;
+                       if (!address || (address >= end))
+                               goto out;
+                       continue;
+               }
+
+               src_pmd = pmd_offset(src_pgd, address);
+               dst_pmd = pmd_alloc(dst, dst_pgd, address);
+               if (!dst_pmd)
+                       goto nomem;
+
+               do {
+                       pte_t * src_pte, * dst_pte;
+               
+                       /* copy_pte_range */
+               
+                       if (pmd_none(*src_pmd))
+                               goto skip_copy_pte_range;
+                       if (unlikely(pmd_bad(*src_pmd))) {
+                               pmd_ERROR(*src_pmd);
+                               pmd_clear(src_pmd);
+skip_copy_pte_range:
+                               address = (address + PMD_SIZE) & PMD_MASK;
+                               if (address >= end)
+                                       goto out;
+                               goto cont_copy_pmd_range;
+                       }
+
+                       dst_pte = pte_alloc_map(dst, dst_pmd, address);
+                       if (!dst_pte)
+                               goto nomem;
+                       spin_lock(&src->page_table_lock);       
+                       src_pte = pte_offset_map_nested(src_pmd, address);
+                       do {
+                               pte_t pte = *src_pte;
+                               struct page *page;
+                               unsigned long pfn;
+
+                               /* copy_one_pte */
+
+                               if (pte_none(pte))
+                                       goto cont_copy_pte_range_noset;
+                               /* pte contains position in swap, so copy. */
+                               if (!pte_present(pte)) {
+                                       if (!pte_file(pte))
+                                               swap_duplicate(pte_to_swp_entry(pte));
+                                       set_pte(dst_pte, pte);
+                                       goto cont_copy_pte_range_noset;
+                               }
+                               pfn = pte_pfn(pte);
+                               /* the pte points outside of valid memory, the
+                                * mapping is assumed to be good, meaningful
+                                * and not mapped via rmap - duplicate the
+                                * mapping as is.
+                                */
+                               page = NULL;
+                               if (pfn_valid(pfn)) 
+                                       page = pfn_to_page(pfn); 
+
+                               if (!page || PageReserved(page)) {
+                                       set_pte(dst_pte, pte);
+                                       goto cont_copy_pte_range_noset;
+                               }
+
+                               /*
+                                * If it's a COW mapping, write protect it both
+                                * in the parent and the child
+                                */
+                               if (cow) {
+                                       ptep_set_wrprotect(src_pte);
+                                       pte = *src_pte;
+                               }
+
+                               /*
+                                * If it's a shared mapping, mark it clean in
+                                * the child
+                                */
+                               if (vma->vm_flags & VM_SHARED)
+                                       pte = pte_mkclean(pte);
+                               pte = pte_mkold(pte);
+                               get_page(page);
+                               dst->rss++;
+                               set_pte(dst_pte, pte);
+                               page_dup_rmap(page);
+cont_copy_pte_range_noset:
+                               address += PAGE_SIZE;
+                               if (address >= end) {
+                                       pte_unmap_nested(src_pte);
+                                       pte_unmap(dst_pte);
+                                       goto out_unlock;
+                               }
+                               src_pte++;
+                               dst_pte++;
+                       } while ((unsigned long)src_pte & PTE_TABLE_MASK);
+                       pte_unmap_nested(src_pte-1);
+                       pte_unmap(dst_pte-1);
+                       spin_unlock(&src->page_table_lock);
+                       cond_resched_lock(&dst->page_table_lock);
+cont_copy_pmd_range:
+                       src_pmd++;
+                       dst_pmd++;
+               } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
+       }
+out_unlock:
+       spin_unlock(&src->page_table_lock);
+out:
+       return 0;
+nomem:
+       return -ENOMEM;
+}
+
+static void zap_pte_range(struct mmu_gather *tlb,
+               pmd_t *pmd, unsigned long address,
+               unsigned long size, struct zap_details *details)
+{
+       unsigned long offset;
+       pte_t *ptep;
+
+       if (pmd_none(*pmd))
+               return;
+       if (unlikely(pmd_bad(*pmd))) {
+               pmd_ERROR(*pmd);
+               pmd_clear(pmd);
+               return;
+       }
+       ptep = pte_offset_map(pmd, address);
+       offset = address & ~PMD_MASK;
+       if (offset + size > PMD_SIZE)
+               size = PMD_SIZE - offset;
+       size &= PAGE_MASK;
+       if (details && !details->check_mapping && !details->nonlinear_vma)
+               details = NULL;
+       for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+               pte_t pte = *ptep;
+               if (pte_none(pte))
+                       continue;
+               if (pte_present(pte)) {
+                       struct page *page = NULL;
+                       unsigned long pfn = pte_pfn(pte);
+                       if (pfn_valid(pfn)) {
+                               page = pfn_to_page(pfn);
+                               if (PageReserved(page))
+                                       page = NULL;
+                       }
+                       if (unlikely(details) && page) {
+                               /*
+                                * unmap_shared_mapping_pages() wants to
+                                * invalidate cache without truncating:
+                                * unmap shared but keep private pages.
+                                */
+                               if (details->check_mapping &&
+                                   details->check_mapping != page->mapping)
+                                       continue;
+                               /*
+                                * Each page->index must be checked when
+                                * invalidating or truncating nonlinear.
+                                */
+                               if (details->nonlinear_vma &&
+                                   (page->index < details->first_index ||
+                                    page->index > details->last_index))
+                                       continue;
+                       }
+                       pte = ptep_get_and_clear(ptep);
+                       tlb_remove_tlb_entry(tlb, ptep, address+offset);
+                       if (unlikely(!page))
+                               continue;
+                       if (unlikely(details) && details->nonlinear_vma
+                           && linear_page_index(details->nonlinear_vma,
+                                       address+offset) != page->index)
+                               set_pte(ptep, pgoff_to_pte(page->index));
+                       if (pte_dirty(pte))
+                               set_page_dirty(page);
+                       if (pte_young(pte) && !PageAnon(page))
+                               mark_page_accessed(page);
+                       tlb->freed++;
+                       page_remove_rmap(page);
+                       tlb_remove_page(tlb, page);
+                       continue;
+               }
+               /*
+                * If details->check_mapping, we leave swap entries;
+                * if details->nonlinear_vma, we leave file entries.
+                */
+               if (unlikely(details))
+                       continue;
+               if (!pte_file(pte))
+                       free_swap_and_cache(pte_to_swp_entry(pte));
+               pte_clear(ptep);
+       }
+       pte_unmap(ptep-1);
+}
+
+static void zap_pmd_range(struct mmu_gather *tlb,
+               pgd_t * dir, unsigned long address,
+               unsigned long size, struct zap_details *details)
+{
+       pmd_t * pmd;
+       unsigned long end;
+
+       if (pgd_none(*dir))
+               return;
+       if (unlikely(pgd_bad(*dir))) {
+               pgd_ERROR(*dir);
+               pgd_clear(dir);
+               return;
+       }
+       pmd = pmd_offset(dir, address);
+       end = address + size;
+       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+       do {
+               zap_pte_range(tlb, pmd, address, end - address, details);
+               address = (address + PMD_SIZE) & PMD_MASK; 
+               pmd++;
+       } while (address && (address < end));
+}
+
+static void unmap_page_range(struct mmu_gather *tlb,
+               struct vm_area_struct *vma, unsigned long address,
+               unsigned long end, struct zap_details *details)
+{
+       pgd_t * dir;
+
+       BUG_ON(address >= end);
+       dir = pgd_offset(vma->vm_mm, address);
+       tlb_start_vma(tlb, vma);
+       do {
+               zap_pmd_range(tlb, dir, address, end - address, details);
+               address = (address + PGDIR_SIZE) & PGDIR_MASK;
+               dir++;
+       } while (address && (address < end));
+       tlb_end_vma(tlb, vma);
+}
+
+/* Dispose of an entire struct mmu_gather per rescheduling point */
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
+#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE)
+#endif
+
+/* For UP, 256 pages at a time gives nice low latency */
+#if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
+#define ZAP_BLOCK_SIZE (256 * PAGE_SIZE)
+#endif
+
+/* No preempt: go for improved straight-line efficiency */
+#if !defined(CONFIG_PREEMPT)
+#define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
+#endif
+
+/**
+ * unmap_vmas - unmap a range of memory covered by a list of vma's
+ * @tlbp: address of the caller's struct mmu_gather
+ * @mm: the controlling mm_struct
+ * @vma: the starting vma
+ * @start_addr: virtual address at which to start unmapping
+ * @end_addr: virtual address at which to end unmapping
+ * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
+ * @details: details of nonlinear truncation or shared cache invalidation
+ *
+ * Returns the number of vma's which were covered by the unmapping.
+ *
+ * Unmap all pages in the vma list.  Called under page_table_lock.
+ *
+ * We aim to not hold page_table_lock for too long (for scheduling latency
+ * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
+ * return the ending mmu_gather to the caller.
+ *
+ * Only addresses between `start' and `end' will be unmapped.
+ *
+ * The VMA list must be sorted in ascending virtual address order.
+ *
+ * unmap_vmas() assumes that the caller will flush the whole unmapped address
+ * range after unmap_vmas() returns.  So the only responsibility here is to
+ * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
+ * drops the lock and schedules.
+ */
+int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long start_addr,
+               unsigned long end_addr, unsigned long *nr_accounted,
+               struct zap_details *details)
+{
+       unsigned long zap_bytes = ZAP_BLOCK_SIZE;
+       unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
+       int tlb_start_valid = 0;
+       int ret = 0;
+       int atomic = details && details->atomic;
+
+       for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
+               unsigned long start;
+               unsigned long end;
+
+               start = max(vma->vm_start, start_addr);
+               if (start >= vma->vm_end)
+                       continue;
+               end = min(vma->vm_end, end_addr);
+               if (end <= vma->vm_start)
+                       continue;
+
+               if (vma->vm_flags & VM_ACCOUNT)
+                       *nr_accounted += (end - start) >> PAGE_SHIFT;
+
+               ret++;
+               while (start != end) {
+                       unsigned long block;
+
+                       if (!tlb_start_valid) {
+                               tlb_start = start;
+                               tlb_start_valid = 1;
+                       }
+
+                       if (is_vm_hugetlb_page(vma)) {
+                               block = end - start;
+                               unmap_hugepage_range(vma, start, end);
+                       } else {
+                               block = min(zap_bytes, end - start);
+                               unmap_page_range(*tlbp, vma, start,
+                                               start + block, details);
+                       }
+
+                       start += block;
+                       zap_bytes -= block;
+                       if ((long)zap_bytes > 0)
+                               continue;
+                       if (!atomic && need_resched()) {
+                               int fullmm = tlb_is_full_mm(*tlbp);
+                               tlb_finish_mmu(*tlbp, tlb_start, start);
+                               cond_resched_lock(&mm->page_table_lock);
+                               *tlbp = tlb_gather_mmu(mm, fullmm);
+                               tlb_start_valid = 0;
+                       }
+                       zap_bytes = ZAP_BLOCK_SIZE;
+               }
+       }
+       return ret;
+}
+
+/**
+ * zap_page_range - remove user pages in a given range
+ * @vma: vm_area_struct holding the applicable pages
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
+ * @details: details of nonlinear truncation or shared cache invalidation
+ */
+void zap_page_range(struct vm_area_struct *vma, unsigned long address,
+               unsigned long size, struct zap_details *details)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct mmu_gather *tlb;
+       unsigned long end = address + size;
+       unsigned long nr_accounted = 0;
+
+       if (is_vm_hugetlb_page(vma)) {
+               zap_hugepage_range(vma, address, size);
+               return;
+       }
+
+       lru_add_drain();
+       spin_lock(&mm->page_table_lock);
+       tlb = tlb_gather_mmu(mm, 0);
+       unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
+       tlb_finish_mmu(tlb, address, end);
+       spin_unlock(&mm->page_table_lock);
+}
+
+/*
+ * Do a quick page-table lookup for a single page.
+ * mm->page_table_lock must be held.
+ */
+struct page *
+follow_page(struct mm_struct *mm, unsigned long address, int write) 
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+       unsigned long pfn;
+       struct page *page;
+
+       page = follow_huge_addr(mm, address, write);
+       if (! IS_ERR(page))
+               return page;
+
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               goto out;
+
+       pmd = pmd_offset(pgd, address);
+       if (pmd_none(*pmd))
+               goto out;
+       if (pmd_huge(*pmd))
+               return follow_huge_pmd(mm, address, pmd, write);
+       if (unlikely(pmd_bad(*pmd)))
+               goto out;
+
+       ptep = pte_offset_map(pmd, address);
+       if (!ptep)
+               goto out;
+
+       pte = *ptep;
+       pte_unmap(ptep);
+       if (pte_present(pte)) {
+               if (write && !pte_write(pte))
+                       goto out;
+               pfn = pte_pfn(pte);
+               if (pfn_valid(pfn)) {
+                       page = pfn_to_page(pfn);
+                       if (write && !pte_dirty(pte) && !PageDirty(page))
+                               set_page_dirty(page);
+                       mark_page_accessed(page);
+                       return page;
+               }
+       }
+
+out:
+       return NULL;
+}
+
+/* 
+ * Given a physical address, is there a useful struct page pointing to
+ * it?  This may become more complex in the future if we start dealing
+ * with IO-aperture pages for direct-IO.
+ */
+
+static inline struct page *get_page_map(struct page *page)
+{
+       if (!pfn_valid(page_to_pfn(page)))
+               return NULL;
+       return page;
+}
+
+
+static inline int
+untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
+                        unsigned long address)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+
+       /* Check if the vma is for an anonymous mapping. */
+       if (vma->vm_ops && vma->vm_ops->nopage)
+               return 0;
+
+       /* Check if page directory entry exists. */
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               return 1;
+
+       /* Check if page middle directory entry exists. */
+       pmd = pmd_offset(pgd, address);
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               return 1;
+
+       /* There is a pte slot for 'address' in 'mm'. */
+       return 0;
+}
+
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long start, int len, int write, int force,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       int i;
+       unsigned int flags;
+
+       /* 
+        * Require read or write permissions.
+        * If 'force' is set, we only require the "MAY" flags.
+        */
+       flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       i = 0;
+
+       do {
+               struct vm_area_struct * vma;
+
+               vma = find_extend_vma(mm, start);
+               if (!vma && in_gate_area(tsk, start)) {
+                       unsigned long pg = start & PAGE_MASK;
+                       struct vm_area_struct *gate_vma = get_gate_vma(tsk);
+                       pgd_t *pgd;
+                       pmd_t *pmd;
+                       pte_t *pte;
+                       if (write) /* user gate pages are read-only */
+                               return i ? : -EFAULT;
+                       pgd = pgd_offset_gate(mm, pg);
+                       if (!pgd)
+                               return i ? : -EFAULT;
+                       pmd = pmd_offset(pgd, pg);
+                       if (!pmd)
+                               return i ? : -EFAULT;
+                       pte = pte_offset_map(pmd, pg);
+                       if (!pte)
+                               return i ? : -EFAULT;
+                       if (!pte_present(*pte)) {
+                               pte_unmap(pte);
+                               return i ? : -EFAULT;
+                       }
+                       if (pages) {
+                               pages[i] = pte_page(*pte);
+                               get_page(pages[i]);
+                       }
+                       pte_unmap(pte);
+                       if (vmas)
+                               vmas[i] = gate_vma;
+                       i++;
+                       start += PAGE_SIZE;
+                       len--;
+                       continue;
+               }
+
+               if (!vma || (pages && (vma->vm_flags & VM_IO))
+                               || !(flags & vma->vm_flags))
+                       return i ? : -EFAULT;
+
+               if (is_vm_hugetlb_page(vma)) {
+                       i = follow_hugetlb_page(mm, vma, pages, vmas,
+                                               &start, &len, i);
+                       continue;
+               }
+               spin_lock(&mm->page_table_lock);
+               do {
+                       struct page *map;
+                       int lookup_write = write;
+                       while (!(map = follow_page(mm, start, lookup_write))) {
+                               /*
+                                * Shortcut for anonymous pages. We don't want
+                                * to force the creation of pages tables for
+                                * insanly big anonymously mapped areas that
+                                * nobody touched so far. This is important
+                                * for doing a core dump for these mappings.
+                                */
+                               if (!lookup_write &&
+                                   untouched_anonymous_page(mm,vma,start)) {
+                                       map = ZERO_PAGE(start);
+                                       break;
+                               }
+                               spin_unlock(&mm->page_table_lock);
+                               switch (handle_mm_fault(mm,vma,start,write)) {
+                               case VM_FAULT_MINOR:
+                                       tsk->min_flt++;
+                                       break;
+                               case VM_FAULT_MAJOR:
+                                       tsk->maj_flt++;
+                                       break;
+                               case VM_FAULT_SIGBUS:
+                                       return i ? i : -EFAULT;
+                               case VM_FAULT_OOM:
+                                       return i ? i : -ENOMEM;
+                               default:
+                                       BUG();
+                               }
+                               /*
+                                * Now that we have performed a write fault
+                                * and surely no longer have a shared page we
+                                * shouldn't write, we shouldn't ignore an
+                                * unwritable page in the page table if
+                                * we are forcing write access.
+                                */
+                               lookup_write = write && !force;
+                               spin_lock(&mm->page_table_lock);
+                       }
+                       if (pages) {
+                               pages[i] = get_page_map(map);
+                               if (!pages[i]) {
+                                       spin_unlock(&mm->page_table_lock);
+                                       while (i--)
+                                               page_cache_release(pages[i]);
+                                       i = -EFAULT;
+                                       goto out;
+                               }
+                               flush_dcache_page(pages[i]);
+                               if (!PageReserved(pages[i]))
+                                       page_cache_get(pages[i]);
+                       }
+                       if (vmas)
+                               vmas[i] = vma;
+                       i++;
+                       start += PAGE_SIZE;
+                       len--;
+               } while(len && start < vma->vm_end);
+               spin_unlock(&mm->page_table_lock);
+       } while(len);
+out:
+       return i;
+}
+
+EXPORT_SYMBOL(get_user_pages);
+
+static void zeromap_pte_range(pte_t * pte, unsigned long address,
+                                     unsigned long size, pgprot_t prot)
+{
+       unsigned long end;
+
+       address &= ~PMD_MASK;
+       end = address + size;
+       if (end > PMD_SIZE)
+               end = PMD_SIZE;
+       do {
+               pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
+               BUG_ON(!pte_none(*pte));
+               set_pte(pte, zero_pte);
+               address += PAGE_SIZE;
+               pte++;
+       } while (address && (address < end));
+}
+
+static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
+                                    unsigned long size, pgprot_t prot)
+{
+       unsigned long base, end;
+
+       base = address & PGDIR_MASK;
+       address &= ~PGDIR_MASK;
+       end = address + size;
+       if (end > PGDIR_SIZE)
+               end = PGDIR_SIZE;
+       do {
+               pte_t * pte = pte_alloc_map(mm, pmd, base + address);
+               if (!pte)
+                       return -ENOMEM;
+               zeromap_pte_range(pte, base + address, end - address, prot);
+               pte_unmap(pte);
+               address = (address + PMD_SIZE) & PMD_MASK;
+               pmd++;
+       } while (address && (address < end));
+       return 0;
+}
+
+int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, pgprot_t prot)
+{
+       int error = 0;
+       pgd_t * dir;
+       unsigned long beg = address;
+       unsigned long end = address + size;
+       struct mm_struct *mm = vma->vm_mm;
+
+       dir = pgd_offset(mm, address);
+       flush_cache_range(vma, beg, end);
+       if (address >= end)
+               BUG();
+
+       spin_lock(&mm->page_table_lock);
+       do {
+               pmd_t *pmd = pmd_alloc(mm, dir, address);
+               error = -ENOMEM;
+               if (!pmd)
+                       break;
+               error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
+               if (error)
+                       break;
+               address = (address + PGDIR_SIZE) & PGDIR_MASK;
+               dir++;
+       } while (address && (address < end));
+       /*
+        * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
+        */
+       flush_tlb_range(vma, beg, end);
+       spin_unlock(&mm->page_table_lock);
+       return error;
+}
+
+/*
+ * maps a range of physical memory into the requested pages. the old
+ * mappings are removed. any references to nonexistent pages results
+ * in null mappings (currently treated as "copy-on-access")
+ */
+static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
+       unsigned long phys_addr, pgprot_t prot)
+{
+       unsigned long end;
+       unsigned long pfn;
+
+       address &= ~PMD_MASK;
+       end = address + size;
+       if (end > PMD_SIZE)
+               end = PMD_SIZE;
+       pfn = phys_addr >> PAGE_SHIFT;
+       do {
+               BUG_ON(!pte_none(*pte));
+               if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
+                       set_pte(pte, pfn_pte(pfn, prot));
+               address += PAGE_SIZE;
+               pfn++;
+               pte++;
+       } while (address && (address < end));
+}
+
+static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+       unsigned long phys_addr, pgprot_t prot)
+{
+       unsigned long base, end;
+
+       base = address & PGDIR_MASK;
+       address &= ~PGDIR_MASK;
+       end = address + size;
+       if (end > PGDIR_SIZE)
+               end = PGDIR_SIZE;
+       phys_addr -= address;
+       do {
+               pte_t * pte = pte_alloc_map(mm, pmd, base + address);
+               if (!pte)
+                       return -ENOMEM;
+               remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
+               pte_unmap(pte);
+               address = (address + PMD_SIZE) & PMD_MASK;
+               pmd++;
+       } while (address && (address < end));
+       return 0;
+}
+
+/*  Note: this is only safe if the mm semaphore is held when called. */
+int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+{
+       int error = 0;
+       pgd_t * dir;
+       unsigned long beg = from;
+       unsigned long end = from + size;
+       struct mm_struct *mm = vma->vm_mm;
+
+       phys_addr -= from;
+       dir = pgd_offset(mm, from);
+       flush_cache_range(vma, beg, end);
+       if (from >= end)
+               BUG();
+
+       spin_lock(&mm->page_table_lock);
+       do {
+               pmd_t *pmd = pmd_alloc(mm, dir, from);
+               error = -ENOMEM;
+               if (!pmd)
+                       break;
+               error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
+               if (error)
+                       break;
+               from = (from + PGDIR_SIZE) & PGDIR_MASK;
+               dir++;
+       } while (from && (from < end));
+       /*
+        * Why flush? remap_pte_range has a BUG_ON for !pte_none()
+        */
+       flush_tlb_range(vma, beg, end);
+       spin_unlock(&mm->page_table_lock);
+       return error;
+}
+
+EXPORT_SYMBOL(remap_page_range);
+
+/*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
+ * servicing faults for write access.  In the normal case, do always want
+ * pte_mkwrite.  But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+       if (likely(vma->vm_flags & VM_WRITE))
+               pte = pte_mkwrite(pte);
+       return pte;
+}
+
+/*
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+ */
+static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 
+               pte_t *page_table)
+{
+       pte_t entry;
+
+       flush_cache_page(vma, address);
+       entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
+                             vma);
+       ptep_establish(vma, address, page_table, entry);
+       update_mmu_cache(vma, address, entry);
+}
+
+/*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+ * and decrementing the shared-page counter for the old page.
+ *
+ * Goto-purists beware: the only reason for goto's here is that it results
+ * in better assembly code.. The "default" path will see no jumps at all.
+ *
+ * Note that this routine assumes that the protection checks have been
+ * done by the caller (the low-level page fault routine in most cases).
+ * Thus we can safely just mark it writable once we've done any necessary
+ * COW.
+ *
+ * We also mark the page dirty at this point even though the page will
+ * change only once the write actually happens. This avoids a few races,
+ * and potentially makes it more efficient.
+ *
+ * We hold the mm semaphore and the page_table_lock on entry and exit
+ * with the page_table_lock released.
+ */
+static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
+       unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
+{
+       struct page *old_page, *new_page;
+       unsigned long pfn = pte_pfn(pte);
+       pte_t entry;
+
+       if (unlikely(!pfn_valid(pfn))) {
+               /*
+                * This should really halt the system so it can be debugged or
+                * at least the kernel stops what it's doing before it corrupts
+                * data, but for the moment just pretend this is OOM.
+                */
+               pte_unmap(page_table);
+               printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
+                               address);
+               spin_unlock(&mm->page_table_lock);
+               return VM_FAULT_OOM;
+       }
+       old_page = pfn_to_page(pfn);
+
+       if (!TestSetPageLocked(old_page)) {
+               int reuse = can_share_swap_page(old_page);
+               unlock_page(old_page);
+               if (reuse) {
+                       flush_cache_page(vma, address);
+                       entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
+                                             vma);
+                       ptep_set_access_flags(vma, address, page_table, entry, 1);
+                       update_mmu_cache(vma, address, entry);
+                       pte_unmap(page_table);
+                       spin_unlock(&mm->page_table_lock);
+                       return VM_FAULT_MINOR;
+               }
+       }
+       pte_unmap(page_table);
+
+       /*
+        * Ok, we need to copy. Oh, well..
+        */
+       if (!PageReserved(old_page))
+               page_cache_get(old_page);
+       spin_unlock(&mm->page_table_lock);
+
+       if (unlikely(anon_vma_prepare(vma)))
+               goto no_new_page;
+       new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+       if (!new_page)
+               goto no_new_page;
+       copy_cow_page(old_page,new_page,address);
+
+       /*
+        * Re-check the pte - we dropped the lock
+        */
+       spin_lock(&mm->page_table_lock);
+       page_table = pte_offset_map(pmd, address);
+       if (likely(pte_same(*page_table, pte))) {
+               if (PageReserved(old_page))
+                       ++mm->rss;
+               else
+                       page_remove_rmap(old_page);
+               break_cow(vma, new_page, address, page_table);
+               lru_cache_add_active(new_page);
+               page_add_anon_rmap(new_page, vma, address);
+
+               /* Free the old page.. */
+               new_page = old_page;
+       }
+       pte_unmap(page_table);
+       page_cache_release(new_page);
+       page_cache_release(old_page);
+       spin_unlock(&mm->page_table_lock);
+       return VM_FAULT_MINOR;
+
+no_new_page:
+       page_cache_release(old_page);
+       return VM_FAULT_OOM;
+}
+
+/*
+ * Helper function for unmap_mapping_range().
+ */
+static inline void unmap_mapping_range_list(struct prio_tree_root *root,
+                                           struct zap_details *details)
+{
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       pgoff_t vba, vea, zba, zea;
+
+       vma_prio_tree_foreach(vma, &iter, root,
+                       details->first_index, details->last_index) {
+               vba = vma->vm_pgoff;
+               vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
+               /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
+               zba = details->first_index;
+               if (zba < vba)
+                       zba = vba;
+               zea = details->last_index;
+               if (zea > vea)
+                       zea = vea;
+               zap_page_range(vma,
+                       ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
+                       (zea - zba + 1) << PAGE_SHIFT, details);
+       }
+}
+
+/**
+ * unmap_mapping_range - unmap the portion of all mmaps
+ * in the specified address_space corresponding to the specified
+ * page range in the underlying file.
+ * @address_space: the address space containing mmaps to be unmapped.
+ * @holebegin: byte in first page to unmap, relative to the start of
+ * the underlying file.  This will be rounded down to a PAGE_SIZE
+ * boundary.  Note that this is different from vmtruncate(), which
+ * must keep the partial page.  In contrast, we must get rid of
+ * partial pages.
+ * @holelen: size of prospective hole in bytes.  This will be rounded
+ * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
+ * end of the file.
+ * @even_cows: 1 when truncating a file, unmap even private COWed pages;
+ * but 0 when invalidating pagecache, don't throw away private data.
+ */
+void unmap_mapping_range(struct address_space *mapping,
+               loff_t const holebegin, loff_t const holelen, int even_cows)
+{
+       struct zap_details details;
+       pgoff_t hba = holebegin >> PAGE_SHIFT;
+       pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       /* Check for overflow. */
+       if (sizeof(holelen) > sizeof(hlen)) {
+               long long holeend =
+                       (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+               if (holeend & ~(long long)ULONG_MAX)
+                       hlen = ULONG_MAX - hba + 1;
+       }
+
+       details.check_mapping = even_cows? NULL: mapping;
+       details.nonlinear_vma = NULL;
+       details.first_index = hba;
+       details.last_index = hba + hlen - 1;
+       details.atomic = 1;     /* A spinlock is held */
+       if (details.last_index < details.first_index)
+               details.last_index = ULONG_MAX;
+
+       spin_lock(&mapping->i_mmap_lock);
+       /* Protect against page fault */
+       atomic_inc(&mapping->truncate_count);
+
+       if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
+               unmap_mapping_range_list(&mapping->i_mmap, &details);
+
+       /*
+        * In nonlinear VMAs there is no correspondence between virtual address
+        * offset and file offset.  So we must perform an exhaustive search
+        * across *all* the pages in each nonlinear VMA, not just the pages
+        * whose virtual address lies outside the file truncation point.
+        */
+       if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
+               struct vm_area_struct *vma;
+               list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+                                               shared.vm_set.list) {
+                       details.nonlinear_vma = vma;
+                       zap_page_range(vma, vma->vm_start,
+                               vma->vm_end - vma->vm_start, &details);
+               }
+       }
+       spin_unlock(&mapping->i_mmap_lock);
+}
+EXPORT_SYMBOL(unmap_mapping_range);
+
+/*
+ * Handle all mappings that got truncated by a "truncate()"
+ * system call.
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page.  Ugly, but necessary.
+ */
+int vmtruncate(struct inode * inode, loff_t offset)
+{
+       struct address_space *mapping = inode->i_mapping;
+       unsigned long limit;
+
+       if (inode->i_size < offset)
+               goto do_expand;
+       /*
+        * truncation of in-use swapfiles is disallowed - it would cause
+        * subsequent swapout to scribble on the now-freed blocks.
+        */
+       if (IS_SWAPFILE(inode))
+               goto out_busy;
+       i_size_write(inode, offset);
+       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+       truncate_inode_pages(mapping, offset);
+       goto out_truncate;
+
+do_expand:
+       limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+       if (limit != RLIM_INFINITY && offset > limit)
+               goto out_sig;
+       if (offset > inode->i_sb->s_maxbytes)
+               goto out_big;
+       i_size_write(inode, offset);
+
+out_truncate:
+       if (inode->i_op && inode->i_op->truncate)
+               inode->i_op->truncate(inode);
+       return 0;
+out_sig:
+       send_sig(SIGXFSZ, current, 0);
+out_big:
+       return -EFBIG;
+out_busy:
+       return -ETXTBSY;
+}
+
+EXPORT_SYMBOL(vmtruncate);
+
+/* 
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time.  We also make sure to queue
+ * the 'original' request together with the readahead ones...  
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ */
+void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
+{
+#ifdef CONFIG_NUMA
+       struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
+#endif
+       int i, num;
+       struct page *new_page;
+       unsigned long offset;
+
+       /*
+        * Get the number of handles we should do readahead io to.
+        */
+       num = valid_swaphandles(entry, &offset);
+       for (i = 0; i < num; offset++, i++) {
+               /* Ok, do the async read-ahead now */
+               new_page = read_swap_cache_async(swp_entry(swp_type(entry),
+                                                          offset), vma, addr);
+               if (!new_page)
+                       break;
+               page_cache_release(new_page);
+#ifdef CONFIG_NUMA
+               /*
+                * Find the next applicable VMA for the NUMA policy.
+                */
+               addr += PAGE_SIZE;
+               if (addr == 0)
+                       vma = NULL;
+               if (vma) {
+                       if (addr >= vma->vm_end) {
+                               vma = next_vma;
+                               next_vma = vma ? vma->vm_next : NULL;
+                       }
+                       if (vma && addr < vma->vm_start)
+                               vma = NULL;
+               } else {
+                       if (next_vma && addr >= next_vma->vm_start) {
+                               vma = next_vma;
+                               next_vma = vma->vm_next;
+                       }
+               }
+#endif
+       }
+       lru_add_drain();        /* Push any new pages onto the LRU now */
+}
+
+/*
+ * We hold the mm semaphore and the page_table_lock on entry and
+ * should release the pagetable lock on exit..
+ */
+static int do_swap_page(struct mm_struct * mm,
+       struct vm_area_struct * vma, unsigned long address,
+       pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
+{
+       struct page *page;
+       swp_entry_t entry = pte_to_swp_entry(orig_pte);
+       pte_t pte;
+       int ret = VM_FAULT_MINOR;
+
+       pte_unmap(page_table);
+       spin_unlock(&mm->page_table_lock);
+       page = lookup_swap_cache(entry);
+       if (!page) {
+               swapin_readahead(entry, address, vma);
+               page = read_swap_cache_async(entry, vma, address);
+               if (!page) {
+                       /*
+                        * Back out if somebody else faulted in this pte while
+                        * we released the page table lock.
+                        */
+                       spin_lock(&mm->page_table_lock);
+                       page_table = pte_offset_map(pmd, address);
+                       if (likely(pte_same(*page_table, orig_pte)))
+                               ret = VM_FAULT_OOM;
+                       else
+                               ret = VM_FAULT_MINOR;
+                       pte_unmap(page_table);
+                       spin_unlock(&mm->page_table_lock);
+                       goto out;
+               }
+
+               /* Had to read the page from swap area: Major fault */
+               ret = VM_FAULT_MAJOR;
+               inc_page_state(pgmajfault);
+               grab_swap_token();
+       }
+
+       mark_page_accessed(page);
+       lock_page(page);
+
+       /*
+        * Back out if somebody else faulted in this pte while we
+        * released the page table lock.
+        */
+       spin_lock(&mm->page_table_lock);
+       page_table = pte_offset_map(pmd, address);
+       if (unlikely(!pte_same(*page_table, orig_pte))) {
+               pte_unmap(page_table);
+               spin_unlock(&mm->page_table_lock);
+               unlock_page(page);
+               page_cache_release(page);
+               ret = VM_FAULT_MINOR;
+               goto out;
+       }
+
+       /* The page isn't present yet, go ahead with the fault. */
+               
+       swap_free(entry);
+       if (vm_swap_full())
+               remove_exclusive_swap_page(page);
+
+       mm->rss++;
+       pte = mk_pte(page, vma->vm_page_prot);
+       if (write_access && can_share_swap_page(page)) {
+               pte = maybe_mkwrite(pte_mkdirty(pte), vma);
+               write_access = 0;
+       }
+       unlock_page(page);
+
+       flush_icache_page(vma, page);
+       set_pte(page_table, pte);
+       page_add_anon_rmap(page, vma, address);
+
+       if (write_access) {
+               if (do_wp_page(mm, vma, address,
+                               page_table, pmd, pte) == VM_FAULT_OOM)
+                       ret = VM_FAULT_OOM;
+               goto out;
+       }
+
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, address, pte);
+       pte_unmap(page_table);
+       spin_unlock(&mm->page_table_lock);
+out:
+       return ret;
+}
+
+/*
+ * We are called with the MM semaphore and page_table_lock
+ * spinlock held to protect against concurrent faults in
+ * multithreaded programs. 
+ */
+static int
+do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+               pte_t *page_table, pmd_t *pmd, int write_access,
+               unsigned long addr)
+{
+       pte_t entry;
+       struct page * page = ZERO_PAGE(addr);
+
+       /* Read-only mapping of ZERO_PAGE. */
+       entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
+
+       /* ..except if it's a write access */
+       if (write_access) {
+               /* Allocate our own private page. */
+               pte_unmap(page_table);
+               spin_unlock(&mm->page_table_lock);
+
+               if (unlikely(anon_vma_prepare(vma)))
+                       goto no_mem;
+               page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+               if (!page)
+                       goto no_mem;
+               clear_user_highpage(page, addr);
+
+               spin_lock(&mm->page_table_lock);
+               page_table = pte_offset_map(pmd, addr);
+
+               if (!pte_none(*page_table)) {
+                       pte_unmap(page_table);
+                       page_cache_release(page);
+                       spin_unlock(&mm->page_table_lock);
+                       goto out;
+               }
+               mm->rss++;
+               entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
+                                                        vma->vm_page_prot)),
+                                     vma);
+               lru_cache_add_active(page);
+               mark_page_accessed(page);
+               page_add_anon_rmap(page, vma, addr);
+       }
+
+       ptep_establish_new(vma, addr, page_table, entry);
+       pte_unmap(page_table);
+
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, addr, entry);
+       spin_unlock(&mm->page_table_lock);
+out:
+       return VM_FAULT_MINOR;
+no_mem:
+       return VM_FAULT_OOM;
+}
+
+/*
+ * do_no_page() tries to create a new page mapping. It aggressively
+ * tries to share with existing pages, but makes a separate copy if
+ * the "write_access" parameter is true in order to avoid the next
+ * page fault.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
+ *
+ * This is called with the MM semaphore held and the page table
+ * spinlock held. Exit with the spinlock released.
+ */
+static int
+do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+       unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
+{
+       struct page * new_page;
+       struct address_space *mapping = NULL;
+       pte_t entry;
+       int sequence = 0;
+       int ret = VM_FAULT_MINOR;
+       int anon = 0;
+
+       if (!vma->vm_ops || !vma->vm_ops->nopage)
+               return do_anonymous_page(mm, vma, page_table,
+                                       pmd, write_access, address);
+       pte_unmap(page_table);
+       spin_unlock(&mm->page_table_lock);
+
+       if (vma->vm_file) {
+               mapping = vma->vm_file->f_mapping;
+               sequence = atomic_read(&mapping->truncate_count);
+       }
+       smp_rmb();  /* Prevent CPU from reordering lock-free ->nopage() */
+retry:
+       new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
+
+       /* no page was available -- either SIGBUS or OOM */
+       if (new_page == NOPAGE_SIGBUS)
+               return VM_FAULT_SIGBUS;
+       if (new_page == NOPAGE_OOM)
+               return VM_FAULT_OOM;
+
+       /*
+        * Should we do an early C-O-W break?
+        */
+       if (write_access && !(vma->vm_flags & VM_SHARED)) {
+               struct page *page;
+
+               if (unlikely(anon_vma_prepare(vma)))
+                       goto oom;
+               page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+               if (!page)
+                       goto oom;
+               copy_user_highpage(page, new_page, address);
+               page_cache_release(new_page);
+               new_page = page;
+               anon = 1;
+       }
+
+       spin_lock(&mm->page_table_lock);
+       /*
+        * For a file-backed vma, someone could have truncated or otherwise
+        * invalidated this page.  If unmap_mapping_range got called,
+        * retry getting the page.
+        */
+       if (mapping &&
+             (unlikely(sequence != atomic_read(&mapping->truncate_count)))) {
+               sequence = atomic_read(&mapping->truncate_count);
+               spin_unlock(&mm->page_table_lock);
+               page_cache_release(new_page);
+               goto retry;
+       }
+       page_table = pte_offset_map(pmd, address);
+
+       /*
+        * This silly early PAGE_DIRTY setting removes a race
+        * due to the bad i386 page protection. But it's valid
+        * for other architectures too.
+        *
+        * Note that if write_access is true, we either now have
+        * an exclusive copy of the page, or this is a shared mapping,
+        * so we can make it writable and dirty to avoid having to
+        * handle that later.
+        */
+       /* Only go through if we didn't race with anybody else... */
+       if (pte_none(*page_table)) {
+               if (!PageReserved(new_page))
+                       ++mm->rss;
+               flush_icache_page(vma, new_page);
+               entry = mk_pte(new_page, vma->vm_page_prot);
+               if (write_access)
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               ptep_establish_new(vma, address, page_table, entry);
+               if (anon) {
+                       lru_cache_add_active(new_page);
+                       page_add_anon_rmap(new_page, vma, address);
+               } else
+                       page_add_file_rmap(new_page);
+               pte_unmap(page_table);
+       } else {
+               /* One of our sibling threads was faster, back out. */
+               pte_unmap(page_table);
+               page_cache_release(new_page);
+               spin_unlock(&mm->page_table_lock);
+               goto out;
+       }
+
+       /* no need to invalidate: a not-present page shouldn't be cached */
+       update_mmu_cache(vma, address, entry);
+       spin_unlock(&mm->page_table_lock);
+out:
+       return ret;
+oom:
+       page_cache_release(new_page);
+       ret = VM_FAULT_OOM;
+       goto out;
+}
+
+/*
+ * Fault of a previously existing named mapping. Repopulate the pte
+ * from the encoded file_pte if possible. This enables swappable
+ * nonlinear vmas.
+ */
+static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
+       unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
+{
+       unsigned long pgoff;
+       int err;
+
+       BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
+       /*
+        * Fall back to the linear mapping if the fs does not support
+        * ->populate:
+        */
+       if (!vma->vm_ops || !vma->vm_ops->populate || 
+                       (write_access && !(vma->vm_flags & VM_SHARED))) {
+               pte_clear(pte);
+               return do_no_page(mm, vma, address, write_access, pte, pmd);
+       }
+
+       pgoff = pte_to_pgoff(*pte);
+
+       pte_unmap(pte);
+       spin_unlock(&mm->page_table_lock);
+
+       err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
+       if (err == -ENOMEM)
+               return VM_FAULT_OOM;
+       if (err)
+               return VM_FAULT_SIGBUS;
+       return VM_FAULT_MAJOR;
+}
+
+/*
+ * These routines also need to handle stuff like marking pages dirty
+ * and/or accessed for architectures that don't do it in hardware (most
+ * RISC architectures).  The early dirtying is also good on the i386.
+ *
+ * There is also a hook called "update_mmu_cache()" that architectures
+ * with external mmu caches can use to update those (ie the Sparc or
+ * PowerPC hashed page tables that act as extended TLBs).
+ *
+ * Note the "page_table_lock". It is to protect against kswapd removing
+ * pages from under us. Note that kswapd only ever _removes_ pages, never
+ * adds them. As such, once we have noticed that the page is not present,
+ * we can drop the lock early.
+ *
+ * The adding of pages is protected by the MM semaphore (which we hold),
+ * so we don't need to worry about a page being suddenly been added into
+ * our VM.
+ *
+ * We enter with the pagetable spinlock held, we are supposed to
+ * release it when done.
+ */
+static inline int handle_pte_fault(struct mm_struct *mm,
+       struct vm_area_struct * vma, unsigned long address,
+       int write_access, pte_t *pte, pmd_t *pmd)
+{
+       pte_t entry;
+
+       entry = *pte;
+       if (!pte_present(entry)) {
+               /*
+                * If it truly wasn't present, we know that kswapd
+                * and the PTE updates will not touch it later. So
+                * drop the lock.
+                */
+               if (pte_none(entry))
+                       return do_no_page(mm, vma, address, write_access, pte, pmd);
+               if (pte_file(entry))
+                       return do_file_page(mm, vma, address, write_access, pte, pmd);
+               return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
+       }
+
+       if (write_access) {
+               if (!pte_write(entry))
+                       return do_wp_page(mm, vma, address, pte, pmd, entry);
+
+               entry = pte_mkdirty(entry);
+       }
+       entry = pte_mkyoung(entry);
+       ptep_set_access_flags(vma, address, pte, entry, write_access);
+       update_mmu_cache(vma, address, entry);
+       pte_unmap(pte);
+       spin_unlock(&mm->page_table_lock);
+       return VM_FAULT_MINOR;
+}
+
+/*
+ * By the time we get here, we already hold the mm semaphore
+ */
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
+       unsigned long address, int write_access)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+
+       __set_current_state(TASK_RUNNING);
+       pgd = pgd_offset(mm, address);
+
+       inc_page_state(pgfault);
+
+       if (is_vm_hugetlb_page(vma))
+               return VM_FAULT_SIGBUS; /* mapping truncation does this. */
+
+       /*
+        * We need the page table lock to synchronize with kswapd
+        * and the SMP-safe atomic PTE updates.
+        */
+       spin_lock(&mm->page_table_lock);
+       pmd = pmd_alloc(mm, pgd, address);
+
+       if (pmd) {
+               pte_t * pte = pte_alloc_map(mm, pmd, address);
+               if (pte)
+                       return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+       }
+       spin_unlock(&mm->page_table_lock);
+       return VM_FAULT_OOM;
+}
+
+/*
+ * Allocate page middle directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ *
+ * On a two-level page table, this ends up actually being entirely
+ * optimized away.
+ */
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+       pmd_t *new;
+
+       spin_unlock(&mm->page_table_lock);
+       new = pmd_alloc_one(mm, address);
+       spin_lock(&mm->page_table_lock);
+       if (!new)
+               return NULL;
+
+       /*
+        * Because we dropped the lock, we should re-check the
+        * entry, as somebody else could have populated it..
+        */
+       if (pgd_present(*pgd)) {
+               pmd_free(new);
+               goto out;
+       }
+       pgd_populate(mm, pgd, new);
+out:
+       return pmd_offset(pgd, address);
+}
+
+int make_pages_present(unsigned long addr, unsigned long end)
+{
+       int ret, len, write;
+       struct vm_area_struct * vma;
+
+       vma = find_vma(current->mm, addr);
+       if (!vma)
+               return -1;
+       write = (vma->vm_flags & VM_WRITE) != 0;
+       if (addr >= end)
+               BUG();
+       if (end > vma->vm_end)
+               BUG();
+       len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
+       ret = get_user_pages(current, current->mm, addr,
+                       len, write, 0, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       return ret == len ? 0 : -1;
+}
+
+/* 
+ * Map a vmalloc()-space virtual address to the physical page.
+ */
+struct page * vmalloc_to_page(void * vmalloc_addr)
+{
+       unsigned long addr = (unsigned long) vmalloc_addr;
+       struct page *page = NULL;
+       pgd_t *pgd = pgd_offset_k(addr);
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+  
+       if (!pgd_none(*pgd)) {
+               pmd = pmd_offset(pgd, addr);
+               if (!pmd_none(*pmd)) {
+                       preempt_disable();
+                       ptep = pte_offset_map(pmd, addr);
+                       pte = *ptep;
+                       if (pte_present(pte))
+                               page = pte_page(pte);
+                       pte_unmap(ptep);
+                       preempt_enable();
+               }
+       }
+       return page;
+}
+
+EXPORT_SYMBOL(vmalloc_to_page);
+
+#if !defined(CONFIG_ARCH_GATE_AREA)
+
+#if defined(AT_SYSINFO_EHDR)
+struct vm_area_struct gate_vma;
+
+static int __init gate_vma_init(void)
+{
+       gate_vma.vm_mm = NULL;
+       gate_vma.vm_start = FIXADDR_USER_START;
+       gate_vma.vm_end = FIXADDR_USER_END;
+       gate_vma.vm_page_prot = PAGE_READONLY;
+       gate_vma.vm_flags = 0;
+       return 0;
+}
+__initcall(gate_vma_init);
+#endif
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+#ifdef AT_SYSINFO_EHDR
+       return &gate_vma;
+#else
+       return NULL;
+#endif
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+#ifdef AT_SYSINFO_EHDR
+       if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
+               return 1;
+#endif
+       return 0;
+}
+
+#endif
diff --git a/linux-2.6.9-xen-sparse/mm/page_alloc.c b/linux-2.6.9-xen-sparse/mm/page_alloc.c
new file mode 100644 (file)
index 0000000..9d560ae
--- /dev/null
@@ -0,0 +1,2074 @@
+/*
+ *  linux/mm/page_alloc.c
+ *
+ *  Manages the free list, the system allocates free pages here.
+ *  Note that kmalloc() lives in slab.c
+ *
+ *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *  Swap reorganised 29.12.95, Stephen Tweedie
+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
+ *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
+ *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
+ *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/pagevec.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/topology.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+
+#include <asm/tlbflush.h>
+
+DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
+struct pglist_data *pgdat_list;
+unsigned long totalram_pages;
+unsigned long totalhigh_pages;
+long nr_swap_pages;
+int numnodes = 1;
+int sysctl_lower_zone_protection = 0;
+
+EXPORT_SYMBOL(totalram_pages);
+EXPORT_SYMBOL(nr_swap_pages);
+
+/*
+ * Used by page_zone() to look up the address of the struct zone whose
+ * id is encoded in the upper bits of page->flags
+ */
+struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
+EXPORT_SYMBOL(zone_table);
+
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
+int min_free_kbytes = 1024;
+
+unsigned long __initdata nr_kernel_pages;
+unsigned long __initdata nr_all_pages;
+
+/*
+ * Temporary debugging check for pages not lying within a given zone.
+ */
+static int bad_range(struct zone *zone, struct page *page)
+{
+       if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
+               return 1;
+       if (page_to_pfn(page) < zone->zone_start_pfn)
+               return 1;
+       if (zone != page_zone(page))
+               return 1;
+       return 0;
+}
+
+static void bad_page(const char *function, struct page *page)
+{
+       printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
+               function, current->comm, page);
+       printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
+               (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags,
+               page->mapping, page_mapcount(page), page_count(page));
+       printk(KERN_EMERG "Backtrace:\n");
+       dump_stack();
+       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
+       page->flags &= ~(1 << PG_private        |
+                       1 << PG_locked  |
+                       1 << PG_lru     |
+                       1 << PG_active  |
+                       1 << PG_dirty   |
+                       1 << PG_swapcache |
+                       1 << PG_writeback);
+       set_page_count(page, 0);
+       reset_page_mapcount(page);
+       page->mapping = NULL;
+}
+
+#ifndef CONFIG_HUGETLB_PAGE
+#define prep_compound_page(page, order) do { } while (0)
+#define destroy_compound_page(page, order) do { } while (0)
+#else
+/*
+ * Higher-order pages are called "compound pages".  They are structured thusly:
+ *
+ * The first PAGE_SIZE page is called the "head page".
+ *
+ * The remaining PAGE_SIZE pages are called "tail pages".
+ *
+ * All pages have PG_compound set.  All pages have their ->private pointing at
+ * the head page (even the head page has this).
+ *
+ * The first tail page's ->mapping, if non-zero, holds the address of the
+ * compound page's put_page() function.
+ *
+ * The order of the allocation is stored in the first tail page's ->index
+ * This is only for debug at present.  This usage means that zero-order pages
+ * may not be compound.
+ */
+static void prep_compound_page(struct page *page, unsigned long order)
+{
+       int i;
+       int nr_pages = 1 << order;
+
+       page[1].mapping = NULL;
+       page[1].index = order;
+       for (i = 0; i < nr_pages; i++) {
+               struct page *p = page + i;
+
+               SetPageCompound(p);
+               p->private = (unsigned long)page;
+       }
+}
+
+static void destroy_compound_page(struct page *page, unsigned long order)
+{
+       int i;
+       int nr_pages = 1 << order;
+
+       if (!PageCompound(page))
+               return;
+
+       if (page[1].index != order)
+               bad_page(__FUNCTION__, page);
+
+       for (i = 0; i < nr_pages; i++) {
+               struct page *p = page + i;
+
+               if (!PageCompound(p))
+                       bad_page(__FUNCTION__, page);
+               if (p->private != (unsigned long)page)
+                       bad_page(__FUNCTION__, page);
+               ClearPageCompound(p);
+       }
+}
+#endif         /* CONFIG_HUGETLB_PAGE */
+
+/*
+ * Freeing function for a buddy system allocator.
+ *
+ * The concept of a buddy system is to maintain direct-mapped table
+ * (containing bit values) for memory blocks of various "orders".
+ * The bottom level table contains the map for the smallest allocatable
+ * units of memory (here, pages), and each level above it describes
+ * pairs of units from the levels below, hence, "buddies".
+ * At a high level, all that happens here is marking the table entry
+ * at the bottom level available, and propagating the changes upward
+ * as necessary, plus some accounting needed to play nicely with other
+ * parts of the VM system.
+ * At each level, we keep one bit for each pair of blocks, which
+ * is set to 1 iff only one of the pair is allocated.  So when we
+ * are allocating or freeing one, we can derive the state of the
+ * other.  That is, if we allocate a small block, and both were   
+ * free, the remainder of the region must be split into blocks.   
+ * If a block is freed, and its buddy is also free, then this
+ * triggers coalescing into a block of larger size.            
+ *
+ * -- wli
+ */
+
+static inline void __free_pages_bulk (struct page *page, struct page *base,
+               struct zone *zone, struct free_area *area, unsigned int order)
+{
+       unsigned long page_idx, index, mask;
+
+       if (order)
+               destroy_compound_page(page, order);
+       mask = (~0UL) << order;
+       page_idx = page - base;
+       if (page_idx & ~mask)
+               BUG();
+       index = page_idx >> (1 + order);
+
+       zone->free_pages += 1 << order;
+       while (order < MAX_ORDER-1) {
+               struct page *buddy1, *buddy2;
+
+               BUG_ON(area >= zone->free_area + MAX_ORDER);
+               if (!__test_and_change_bit(index, area->map))
+                       /*
+                        * the buddy page is still allocated.
+                        */
+                       break;
+
+               /* Move the buddy up one level. */
+               buddy1 = base + (page_idx ^ (1 << order));
+               buddy2 = base + page_idx;
+               BUG_ON(bad_range(zone, buddy1));
+               BUG_ON(bad_range(zone, buddy2));
+               list_del(&buddy1->lru);
+               mask <<= 1;
+               order++;
+               area++;
+               index >>= 1;
+               page_idx &= mask;
+       }
+       list_add(&(base + page_idx)->lru, &area->free_list);
+}
+
+static inline void free_pages_check(const char *function, struct page *page)
+{
+       if (    page_mapped(page) ||
+               page->mapping != NULL ||
+               page_count(page) != 0 ||
+               (page->flags & (
+                       1 << PG_lru     |
+                       1 << PG_private |
+                       1 << PG_locked  |
+                       1 << PG_active  |
+                       1 << PG_reclaim |
+                       1 << PG_slab    |
+                       1 << PG_swapcache |
+                       1 << PG_writeback )))
+               bad_page(function, page);
+       if (PageDirty(page))
+               ClearPageDirty(page);
+}
+
+/*
+ * Frees a list of pages. 
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free, or 0 for all on the list.
+ *
+ * If the zone was previously in an "all pages pinned" state then look to
+ * see if this freeing clears that state.
+ *
+ * And clear the zone's pages_scanned counter, to hold off the "all pages are
+ * pinned" detection logic.
+ */
+static int
+free_pages_bulk(struct zone *zone, int count,
+               struct list_head *list, unsigned int order)
+{
+       unsigned long flags;
+       struct free_area *area;
+       struct page *base, *page = NULL;
+       int ret = 0;
+
+       base = zone->zone_mem_map;
+       area = zone->free_area + order;
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->all_unreclaimable = 0;
+       zone->pages_scanned = 0;
+       while (!list_empty(list) && count--) {
+               page = list_entry(list->prev, struct page, lru);
+               /* have to delete it as __free_pages_bulk list manipulates */
+               list_del(&page->lru);
+               __free_pages_bulk(page, base, zone, area, order);
+               ret++;
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+       return ret;
+}
+
+void __free_pages_ok(struct page *page, unsigned int order)
+{
+       LIST_HEAD(list);
+       int i;
+
+       arch_free_page(page, order);
+
+       mod_page_state(pgfree, 1 << order);
+       for (i = 0 ; i < (1 << order) ; ++i)
+               free_pages_check(__FUNCTION__, page + i);
+       list_add(&page->lru, &list);
+       kernel_map_pages(page, 1<<order, 0);
+       free_pages_bulk(page_zone(page), 1, &list, order);
+}
+
+#define MARK_USED(index, order, area) \
+       __change_bit((index) >> (1+(order)), (area)->map)
+
+/*
+ * The order of subdivision here is critical for the IO subsystem.
+ * Please do not alter this order without good reasons and regression
+ * testing. Specifically, as large blocks of memory are subdivided,
+ * the order in which smaller blocks are delivered depends on the order
+ * they're subdivided in this function. This is the primary factor
+ * influencing the order in which pages are delivered to the IO
+ * subsystem according to empirical testing, and this is also justified
+ * by considering the behavior of a buddy system containing a single
+ * large block of memory acted on by a series of small allocations.
+ * This behavior is a critical factor in sglist merging's success.
+ *
+ * -- wli
+ */
+static inline struct page *
+expand(struct zone *zone, struct page *page,
+        unsigned long index, int low, int high, struct free_area *area)
+{
+       unsigned long size = 1 << high;
+
+       while (high > low) {
+               area--;
+               high--;
+               size >>= 1;
+               BUG_ON(bad_range(zone, &page[size]));
+               list_add(&page[size].lru, &area->free_list);
+               MARK_USED(index + size, high, area);
+       }
+       return page;
+}
+
+static inline void set_page_refs(struct page *page, int order)
+{
+#ifdef CONFIG_MMU
+       set_page_count(page, 1);
+#else
+       int i;
+
+       /*
+        * We need to reference all the pages for this order, otherwise if
+        * anyone accesses one of the pages with (get/put) it will be freed.
+        */
+       for (i = 0; i < (1 << order); i++)
+               set_page_count(page+i, 1);
+#endif /* CONFIG_MMU */
+}
+
+/*
+ * This page is about to be returned from the page allocator
+ */
+static void prep_new_page(struct page *page, int order)
+{
+       if (page->mapping || page_mapped(page) ||
+           (page->flags & (
+                       1 << PG_private |
+                       1 << PG_locked  |
+                       1 << PG_lru     |
+                       1 << PG_active  |
+                       1 << PG_dirty   |
+                       1 << PG_reclaim |
+                       1 << PG_swapcache |
+                       1 << PG_writeback )))
+               bad_page(__FUNCTION__, page);
+
+       page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+                       1 << PG_referenced | 1 << PG_arch_1 |
+                       1 << PG_checked | 1 << PG_mappedtodisk);
+       page->private = 0;
+       set_page_refs(page, order);
+}
+
+/* 
+ * Do the hard work of removing an element from the buddy allocator.
+ * Call me with the zone->lock already held.
+ */
+static struct page *__rmqueue(struct zone *zone, unsigned int order)
+{
+       struct free_area * area;
+       unsigned int current_order;
+       struct page *page;
+       unsigned int index;
+
+       for (current_order = order; current_order < MAX_ORDER; ++current_order) {
+               area = zone->free_area + current_order;
+               if (list_empty(&area->free_list))
+                       continue;
+
+               page = list_entry(area->free_list.next, struct page, lru);
+               list_del(&page->lru);
+               index = page - zone->zone_mem_map;
+               if (current_order != MAX_ORDER-1)
+                       MARK_USED(index, current_order, area);
+               zone->free_pages -= 1UL << order;
+               return expand(zone, page, index, order, current_order, area);
+       }
+
+       return NULL;
+}
+
+/* 
+ * Obtain a specified number of elements from the buddy allocator, all under
+ * a single hold of the lock, for efficiency.  Add them to the supplied list.
+ * Returns the number of new pages which were placed at *list.
+ */
+static int rmqueue_bulk(struct zone *zone, unsigned int order, 
+                       unsigned long count, struct list_head *list)
+{
+       unsigned long flags;
+       int i;
+       int allocated = 0;
+       struct page *page;
+       
+       spin_lock_irqsave(&zone->lock, flags);
+       for (i = 0; i < count; ++i) {
+               page = __rmqueue(zone, order);
+               if (page == NULL)
+                       break;
+               allocated++;
+               list_add_tail(&page->lru, list);
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+       return allocated;
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
+static void __drain_pages(unsigned int cpu)
+{
+       struct zone *zone;
+       int i;
+
+       for_each_zone(zone) {
+               struct per_cpu_pageset *pset;
+
+               pset = &zone->pageset[cpu];
+               for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
+                       struct per_cpu_pages *pcp;
+
+                       pcp = &pset->pcp[i];
+                       pcp->count -= free_pages_bulk(zone, pcp->count,
+                                               &pcp->list, 0);
+               }
+       }
+}
+#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_PM
+int is_head_of_free_region(struct page *page)
+{
+        struct zone *zone = page_zone(page);
+        unsigned long flags;
+       int order;
+       struct list_head *curr;
+
+       /*
+        * Should not matter as we need quiescent system for
+        * suspend anyway, but...
+        */
+       spin_lock_irqsave(&zone->lock, flags);
+       for (order = MAX_ORDER - 1; order >= 0; --order)
+               list_for_each(curr, &zone->free_area[order].free_list)
+                       if (page == list_entry(curr, struct page, lru)) {
+                               spin_unlock_irqrestore(&zone->lock, flags);
+                               return 1 << order;
+                       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+        return 0;
+}
+
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);  
+       __drain_pages(smp_processor_id());
+       local_irq_restore(flags);       
+}
+#endif /* CONFIG_PM */
+
+static void zone_statistics(struct zonelist *zonelist, struct zone *z)
+{
+#ifdef CONFIG_NUMA
+       unsigned long flags;
+       int cpu;
+       pg_data_t *pg = z->zone_pgdat;
+       pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
+       struct per_cpu_pageset *p;
+
+       local_irq_save(flags);
+       cpu = smp_processor_id();
+       p = &z->pageset[cpu];
+       if (pg == orig) {
+               z->pageset[cpu].numa_hit++;
+       } else {
+               p->numa_miss++;
+               zonelist->zones[0]->pageset[cpu].numa_foreign++;
+       }
+       if (pg == NODE_DATA(numa_node_id()))
+               p->local_node++;
+       else
+               p->other_node++;
+       local_irq_restore(flags);
+#endif
+}
+
+/*
+ * Free a 0-order page
+ */
+static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
+static void fastcall free_hot_cold_page(struct page *page, int cold)
+{
+       struct zone *zone = page_zone(page);
+       struct per_cpu_pages *pcp;
+       unsigned long flags;
+
+       if (PageForeign(page))
+               return (PageForeignDestructor(page))(page);
+
+       arch_free_page(page, 0);
+
+       kernel_map_pages(page, 1, 0);
+       inc_page_state(pgfree);
+       if (PageAnon(page))
+               page->mapping = NULL;
+       free_pages_check(__FUNCTION__, page);
+       pcp = &zone->pageset[get_cpu()].pcp[cold];
+       local_irq_save(flags);
+       if (pcp->count >= pcp->high)
+               pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+       list_add(&page->lru, &pcp->list);
+       pcp->count++;
+       local_irq_restore(flags);
+       put_cpu();
+}
+
+void fastcall free_hot_page(struct page *page)
+{
+       free_hot_cold_page(page, 0);
+}
+       
+void fastcall free_cold_page(struct page *page)
+{
+       free_hot_cold_page(page, 1);
+}
+
+/*
+ * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
+ * we cheat by calling it from here, in the order > 0 path.  Saves a branch
+ * or two.
+ */
+
+static struct page *
+buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
+{
+       unsigned long flags;
+       struct page *page = NULL;
+       int cold = !!(gfp_flags & __GFP_COLD);
+
+       if (order == 0) {
+               struct per_cpu_pages *pcp;
+
+               pcp = &zone->pageset[get_cpu()].pcp[cold];
+               local_irq_save(flags);
+               if (pcp->count <= pcp->low)
+                       pcp->count += rmqueue_bulk(zone, 0,
+                                               pcp->batch, &pcp->list);
+               if (pcp->count) {
+                       page = list_entry(pcp->list.next, struct page, lru);
+                       list_del(&page->lru);
+                       pcp->count--;
+               }
+               local_irq_restore(flags);
+               put_cpu();
+       }
+
+       if (page == NULL) {
+               spin_lock_irqsave(&zone->lock, flags);
+               page = __rmqueue(zone, order);
+               spin_unlock_irqrestore(&zone->lock, flags);
+       }
+
+       if (page != NULL) {
+               BUG_ON(bad_range(zone, page));
+               mod_page_state_zone(zone, pgalloc, 1 << order);
+               prep_new_page(page, order);
+               if (order && (gfp_flags & __GFP_COMP))
+                       prep_compound_page(page, order);
+       }
+       return page;
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator.
+ *
+ * Herein lies the mysterious "incremental min".  That's the
+ *
+ *     local_low = z->pages_low;
+ *     min += local_low;
+ *
+ * thing.  The intent here is to provide additional protection to low zones for
+ * allocation requests which _could_ use higher zones.  So a GFP_HIGHMEM
+ * request is not allowed to dip as deeply into the normal zone as a GFP_KERNEL
+ * request.  This preserves additional space in those lower zones for requests
+ * which really do need memory from those zones.  It means that on a decent
+ * sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA
+ * zone untouched.
+ */
+struct page * fastcall
+__alloc_pages(unsigned int gfp_mask, unsigned int order,
+               struct zonelist *zonelist)
+{
+       const int wait = gfp_mask & __GFP_WAIT;
+       unsigned long min;
+       struct zone **zones, *z;
+       struct page *page;
+       struct reclaim_state reclaim_state;
+       struct task_struct *p = current;
+       int i;
+       int alloc_type;
+       int do_retry;
+       int can_try_harder;
+
+       might_sleep_if(wait);
+
+       /*
+        * The caller may dip into page reserves a bit more if the caller
+        * cannot run direct reclaim, or is the caller has realtime scheduling
+        * policy
+        */
+       can_try_harder = (unlikely(rt_task(p)) && !in_interrupt()) || !wait;
+
+       zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
+
+       if (unlikely(zones[0] == NULL)) {
+               /* Should this ever happen?? */
+               return NULL;
+       }
+
+       alloc_type = zone_idx(zones[0]);
+
+       /* Go through the zonelist once, looking for a zone with enough free */
+       for (i = 0; (z = zones[i]) != NULL; i++) {
+               min = z->pages_low + (1<<order) + z->protection[alloc_type];
+
+               if (z->free_pages < min)
+                       continue;
+
+               page = buffered_rmqueue(z, order, gfp_mask);
+               if (page)
+                       goto got_pg;
+       }
+
+       for (i = 0; (z = zones[i]) != NULL; i++)
+               wakeup_kswapd(z);
+
+       /*
+        * Go through the zonelist again. Let __GFP_HIGH and allocations
+        * coming from realtime tasks to go deeper into reserves
+        */
+       for (i = 0; (z = zones[i]) != NULL; i++) {
+               min = z->pages_min;
+               if (gfp_mask & __GFP_HIGH)
+                       min /= 2;
+               if (can_try_harder)
+                       min -= min / 4;
+               min += (1<<order) + z->protection[alloc_type];
+
+               if (z->free_pages < min)
+                       continue;
+
+               page = buffered_rmqueue(z, order, gfp_mask);
+               if (page)
+                       goto got_pg;
+       }
+
+       /* This allocation should allow future memory freeing. */
+       if ((p->flags & (PF_MEMALLOC | PF_MEMDIE)) && !in_interrupt()) {
+               /* go through the zonelist yet again, ignoring mins */
+               for (i = 0; (z = zones[i]) != NULL; i++) {
+                       page = buffered_rmqueue(z, order, gfp_mask);
+                       if (page)
+                               goto got_pg;
+               }
+               goto nopage;
+       }
+
+       /* Atomic allocations - we can't balance anything */
+       if (!wait)
+               goto nopage;
+
+rebalance:
+       /* We now go into synchronous reclaim */
+       p->flags |= PF_MEMALLOC;
+       reclaim_state.reclaimed_slab = 0;
+       p->reclaim_state = &reclaim_state;
+
+       try_to_free_pages(zones, gfp_mask, order);
+
+       p->reclaim_state = NULL;
+       p->flags &= ~PF_MEMALLOC;
+
+       /* go through the zonelist yet one more time */
+       for (i = 0; (z = zones[i]) != NULL; i++) {
+               min = z->pages_min;
+               if (gfp_mask & __GFP_HIGH)
+                       min /= 2;
+               if (can_try_harder)
+                       min -= min / 4;
+               min += (1<<order) + z->protection[alloc_type];
+
+               if (z->free_pages < min)
+                       continue;
+
+               page = buffered_rmqueue(z, order, gfp_mask);
+               if (page)
+                       goto got_pg;
+       }
+
+       /*
+        * Don't let big-order allocations loop unless the caller explicitly
+        * requests that.  Wait for some write requests to complete then retry.
+        *
+        * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
+        * <= 3, but that may not be true in other implementations.
+        */
+       do_retry = 0;
+       if (!(gfp_mask & __GFP_NORETRY)) {
+               if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
+                       do_retry = 1;
+               if (gfp_mask & __GFP_NOFAIL)
+                       do_retry = 1;
+       }
+       if (do_retry) {
+               blk_congestion_wait(WRITE, HZ/50);
+               goto rebalance;
+       }
+
+nopage:
+       if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
+               printk(KERN_WARNING "%s: page allocation failure."
+                       " order:%d, mode:0x%x\n",
+                       p->comm, order, gfp_mask);
+               dump_stack();
+       }
+       return NULL;
+got_pg:
+       zone_statistics(zonelist, z);
+       kernel_map_pages(page, 1 << order, 1);
+       return page;
+}
+
+EXPORT_SYMBOL(__alloc_pages);
+
+/*
+ * Common helper functions.
+ */
+fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
+{
+       struct page * page;
+       page = alloc_pages(gfp_mask, order);
+       if (!page)
+               return 0;
+       return (unsigned long) page_address(page);
+}
+
+EXPORT_SYMBOL(__get_free_pages);
+
+fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
+{
+       struct page * page;
+
+       /*
+        * get_zeroed_page() returns a 32-bit address, which cannot represent
+        * a highmem page
+        */
+       BUG_ON(gfp_mask & __GFP_HIGHMEM);
+
+       page = alloc_pages(gfp_mask, 0);
+       if (page) {
+               void *address = page_address(page);
+               clear_page(address);
+               return (unsigned long) address;
+       }
+       return 0;
+}
+
+EXPORT_SYMBOL(get_zeroed_page);
+
+void __pagevec_free(struct pagevec *pvec)
+{
+       int i = pagevec_count(pvec);
+
+       while (--i >= 0)
+               free_hot_cold_page(pvec->pages[i], pvec->cold);
+}
+
+fastcall void __free_pages(struct page *page, unsigned int order)
+{
+       if (!PageReserved(page) && put_page_testzero(page)) {
+               if (order == 0)
+                       free_hot_page(page);
+               else
+                       __free_pages_ok(page, order);
+       }
+}
+
+EXPORT_SYMBOL(__free_pages);
+
+fastcall void free_pages(unsigned long addr, unsigned int order)
+{
+       if (addr != 0) {
+               BUG_ON(!virt_addr_valid((void *)addr));
+               __free_pages(virt_to_page((void *)addr), order);
+       }
+}
+
+EXPORT_SYMBOL(free_pages);
+
+/*
+ * Total amount of free (allocatable) RAM:
+ */
+unsigned int nr_free_pages(void)
+{
+       unsigned int sum = 0;
+       struct zone *zone;
+
+       for_each_zone(zone)
+               sum += zone->free_pages;
+
+       return sum;
+}
+
+EXPORT_SYMBOL(nr_free_pages);
+
+#ifdef CONFIG_NUMA
+unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
+{
+       unsigned int i, sum = 0;
+
+       for (i = 0; i < MAX_NR_ZONES; i++)
+               sum += pgdat->node_zones[i].free_pages;
+
+       return sum;
+}
+#endif
+
+static unsigned int nr_free_zone_pages(int offset)
+{
+       pg_data_t *pgdat;
+       unsigned int sum = 0;
+
+       for_each_pgdat(pgdat) {
+               struct zonelist *zonelist = pgdat->node_zonelists + offset;
+               struct zone **zonep = zonelist->zones;
+               struct zone *zone;
+
+               for (zone = *zonep++; zone; zone = *zonep++) {
+                       unsigned long size = zone->present_pages;
+                       unsigned long high = zone->pages_high;
+                       if (size > high)
+                               sum += size - high;
+               }
+       }
+
+       return sum;
+}
+
+/*
+ * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
+ */
+unsigned int nr_free_buffer_pages(void)
+{
+       return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
+}
+
+/*
+ * Amount of free RAM allocatable within all zones
+ */
+unsigned int nr_free_pagecache_pages(void)
+{
+       return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
+}
+
+#ifdef CONFIG_HIGHMEM
+unsigned int nr_free_highpages (void)
+{
+       pg_data_t *pgdat;
+       unsigned int pages = 0;
+
+       for_each_pgdat(pgdat)
+               pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+
+       return pages;
+}
+#endif
+
+#ifdef CONFIG_NUMA
+static void show_node(struct zone *zone)
+{
+       printk("Node %d ", zone->zone_pgdat->node_id);
+}
+#else
+#define show_node(zone)        do { } while (0)
+#endif
+
+/*
+ * Accumulate the page_state information across all CPUs.
+ * The result is unavoidably approximate - it can change
+ * during and after execution of this function.
+ */
+DEFINE_PER_CPU(struct page_state, page_states) = {0};
+EXPORT_PER_CPU_SYMBOL(page_states);
+
+atomic_t nr_pagecache = ATOMIC_INIT(0);
+EXPORT_SYMBOL(nr_pagecache);
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
+#endif
+
+void __get_page_state(struct page_state *ret, int nr)
+{
+       int cpu = 0;
+
+       memset(ret, 0, sizeof(*ret));
+       while (cpu < NR_CPUS) {
+               unsigned long *in, *out, off;
+
+               if (!cpu_possible(cpu)) {
+                       cpu++;
+                       continue;
+               }
+
+               in = (unsigned long *)&per_cpu(page_states, cpu);
+               cpu++;
+               if (cpu < NR_CPUS && cpu_possible(cpu))
+                       prefetch(&per_cpu(page_states, cpu));
+               out = (unsigned long *)ret;
+               for (off = 0; off < nr; off++)
+                       *out++ += *in++;
+       }
+}
+
+void get_page_state(struct page_state *ret)
+{
+       int nr;
+
+       nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
+       nr /= sizeof(unsigned long);
+
+       __get_page_state(ret, nr + 1);
+}
+
+void get_full_page_state(struct page_state *ret)
+{
+       __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
+}
+
+unsigned long __read_page_state(unsigned offset)
+{
+       unsigned long ret = 0;
+       int cpu;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               unsigned long in;
+
+               if (!cpu_possible(cpu))
+                       continue;
+
+               in = (unsigned long)&per_cpu(page_states, cpu) + offset;
+               ret += *((unsigned long *)in);
+       }
+       return ret;
+}
+
+void __get_zone_counts(unsigned long *active, unsigned long *inactive,
+                       unsigned long *free, struct pglist_data *pgdat)
+{
+       struct zone *zones = pgdat->node_zones;
+       int i;
+
+       *active = 0;
+       *inactive = 0;
+       *free = 0;
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               *active += zones[i].nr_active;
+               *inactive += zones[i].nr_inactive;
+               *free += zones[i].free_pages;
+       }
+}
+
+void get_zone_counts(unsigned long *active,
+               unsigned long *inactive, unsigned long *free)
+{
+       struct pglist_data *pgdat;
+
+       *active = 0;
+       *inactive = 0;
+       *free = 0;
+       for_each_pgdat(pgdat) {
+               unsigned long l, m, n;
+               __get_zone_counts(&l, &m, &n, pgdat);
+               *active += l;
+               *inactive += m;
+               *free += n;
+       }
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+       val->totalram = totalram_pages;
+       val->sharedram = 0;
+       val->freeram = nr_free_pages();
+       val->bufferram = nr_blockdev_pages();
+#ifdef CONFIG_HIGHMEM
+       val->totalhigh = totalhigh_pages;
+       val->freehigh = nr_free_highpages();
+#else
+       val->totalhigh = 0;
+       val->freehigh = 0;
+#endif
+       val->mem_unit = PAGE_SIZE;
+}
+
+EXPORT_SYMBOL(si_meminfo);
+
+#ifdef CONFIG_NUMA
+void si_meminfo_node(struct sysinfo *val, int nid)
+{
+       pg_data_t *pgdat = NODE_DATA(nid);
+
+       val->totalram = pgdat->node_present_pages;
+       val->freeram = nr_free_pages_pgdat(pgdat);
+       val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
+       val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+       val->mem_unit = PAGE_SIZE;
+}
+#endif
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
+/*
+ * Show free area list (used inside shift_scroll-lock stuff)
+ * We also calculate the percentage fragmentation. We do this by counting the
+ * memory on each free list with the exception of the first item on the list.
+ */
+void show_free_areas(void)
+{
+       struct page_state ps;
+       int cpu, temperature;
+       unsigned long active;
+       unsigned long inactive;
+       unsigned long free;
+       struct zone *zone;
+
+       for_each_zone(zone) {
+               show_node(zone);
+               printk("%s per-cpu:", zone->name);
+
+               if (!zone->present_pages) {
+                       printk(" empty\n");
+                       continue;
+               } else
+                       printk("\n");
+
+               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+                       struct per_cpu_pageset *pageset;
+
+                       if (!cpu_possible(cpu))
+                               continue;
+
+                       pageset = zone->pageset + cpu;
+
+                       for (temperature = 0; temperature < 2; temperature++)
+                               printk("cpu %d %s: low %d, high %d, batch %d\n",
+                                       cpu,
+                                       temperature ? "cold" : "hot",
+                                       pageset->pcp[temperature].low,
+                                       pageset->pcp[temperature].high,
+                                       pageset->pcp[temperature].batch);
+               }
+       }
+
+       get_page_state(&ps);
+       get_zone_counts(&active, &inactive, &free);
+
+       printk("\nFree pages: %11ukB (%ukB HighMem)\n",
+               K(nr_free_pages()),
+               K(nr_free_highpages()));
+
+       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
+               "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
+               active,
+               inactive,
+               ps.nr_dirty,
+               ps.nr_writeback,
+               ps.nr_unstable,
+               nr_free_pages(),
+               ps.nr_slab,
+               ps.nr_mapped,
+               ps.nr_page_table_pages);
+
+       for_each_zone(zone) {
+               int i;
+
+               show_node(zone);
+               printk("%s"
+                       " free:%lukB"
+                       " min:%lukB"
+                       " low:%lukB"
+                       " high:%lukB"
+                       " active:%lukB"
+                       " inactive:%lukB"
+                       " present:%lukB"
+                       "\n",
+                       zone->name,
+                       K(zone->free_pages),
+                       K(zone->pages_min),
+                       K(zone->pages_low),
+                       K(zone->pages_high),
+                       K(zone->nr_active),
+                       K(zone->nr_inactive),
+                       K(zone->present_pages)
+                       );
+               printk("protections[]:");
+               for (i = 0; i < MAX_NR_ZONES; i++)
+                       printk(" %lu", zone->protection[i]);
+               printk("\n");
+       }
+
+       for_each_zone(zone) {
+               struct list_head *elem;
+               unsigned long nr, flags, order, total = 0;
+
+               show_node(zone);
+               printk("%s: ", zone->name);
+               if (!zone->present_pages) {
+                       printk("empty\n");
+                       continue;
+               }
+
+               spin_lock_irqsave(&zone->lock, flags);
+               for (order = 0; order < MAX_ORDER; order++) {
+                       nr = 0;
+                       list_for_each(elem, &zone->free_area[order].free_list)
+                               ++nr;
+                       total += nr << order;
+                       printk("%lu*%lukB ", nr, K(1UL) << order);
+               }
+               spin_unlock_irqrestore(&zone->lock, flags);
+               printk("= %lukB\n", K(total));
+       }
+
+       show_swap_cache_info();
+}
+
+/*
+ * Builds allocation fallback zone lists.
+ */
+static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
+{
+       switch (k) {
+               struct zone *zone;
+       default:
+               BUG();
+       case ZONE_HIGHMEM:
+               zone = pgdat->node_zones + ZONE_HIGHMEM;
+               if (zone->present_pages) {
+#ifndef CONFIG_HIGHMEM
+                       BUG();
+#endif
+                       zonelist->zones[j++] = zone;
+               }
+       case ZONE_NORMAL:
+               zone = pgdat->node_zones + ZONE_NORMAL;
+               if (zone->present_pages)
+                       zonelist->zones[j++] = zone;
+       case ZONE_DMA:
+               zone = pgdat->node_zones + ZONE_DMA;
+               if (zone->present_pages)
+                       zonelist->zones[j++] = zone;
+       }
+
+       return j;
+}
+
+#ifdef CONFIG_NUMA
+#define MAX_NODE_LOAD (numnodes)
+static int __initdata node_load[MAX_NUMNODES];
+/**
+ * find_next_best_node - find the next node that should appear in a given
+ *    node's fallback list
+ * @node: node whose fallback list we're appending
+ * @used_node_mask: pointer to the bitmap of already used nodes
+ *
+ * We use a number of factors to determine which is the next node that should
+ * appear on a given node's fallback list.  The node should not have appeared
+ * already in @node's fallback list, and it should be the next closest node
+ * according to the distance array (which contains arbitrary distance values
+ * from each node to each node in the system), and should also prefer nodes
+ * with no CPUs, since presumably they'll have very little allocation pressure
+ * on them otherwise.
+ * It returns -1 if no node is found.
+ */
+static int __init find_next_best_node(int node, void *used_node_mask)
+{
+       int i, n, val;
+       int min_val = INT_MAX;
+       int best_node = -1;
+
+       for (i = 0; i < numnodes; i++) {
+               cpumask_t tmp;
+
+               /* Start from local node */
+               n = (node+i)%numnodes;
+
+               /* Don't want a node to appear more than once */
+               if (test_bit(n, used_node_mask))
+                       continue;
+
+               /* Use the distance array to find the distance */
+               val = node_distance(node, n);
+
+               /* Give preference to headless and unused nodes */
+               tmp = node_to_cpumask(n);
+               if (!cpus_empty(tmp))
+                       val += PENALTY_FOR_NODE_WITH_CPUS;
+
+               /* Slight preference for less loaded node */
+               val *= (MAX_NODE_LOAD*MAX_NUMNODES);
+               val += node_load[n];
+
+               if (val < min_val) {
+                       min_val = val;
+                       best_node = n;
+               }
+       }
+
+       if (best_node >= 0)
+               set_bit(best_node, used_node_mask);
+
+       return best_node;
+}
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+       int i, j, k, node, local_node;
+       int prev_node, load;
+       struct zonelist *zonelist;
+       DECLARE_BITMAP(used_mask, MAX_NUMNODES);
+
+       /* initialize zonelists */
+       for (i = 0; i < GFP_ZONETYPES; i++) {
+               zonelist = pgdat->node_zonelists + i;
+               memset(zonelist, 0, sizeof(*zonelist));
+               zonelist->zones[0] = NULL;
+       }
+
+       /* NUMA-aware ordering of nodes */
+       local_node = pgdat->node_id;
+       load = numnodes;
+       prev_node = local_node;
+       bitmap_zero(used_mask, MAX_NUMNODES);
+       while ((node = find_next_best_node(local_node, used_mask)) >= 0) {
+               /*
+                * We don't want to pressure a particular node.
+                * So adding penalty to the first node in same
+                * distance group to make it round-robin.
+                */
+               if (node_distance(local_node, node) !=
+                               node_distance(local_node, prev_node))
+                       node_load[node] += load;
+               prev_node = node;
+               load--;
+               for (i = 0; i < GFP_ZONETYPES; i++) {
+                       zonelist = pgdat->node_zonelists + i;
+                       for (j = 0; zonelist->zones[j] != NULL; j++);
+
+                       k = ZONE_NORMAL;
+                       if (i & __GFP_HIGHMEM)
+                               k = ZONE_HIGHMEM;
+                       if (i & __GFP_DMA)
+                               k = ZONE_DMA;
+
+                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+                       zonelist->zones[j] = NULL;
+               }
+       }
+}
+
+#else  /* CONFIG_NUMA */
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+       int i, j, k, node, local_node;
+
+       local_node = pgdat->node_id;
+       for (i = 0; i < GFP_ZONETYPES; i++) {
+               struct zonelist *zonelist;
+
+               zonelist = pgdat->node_zonelists + i;
+               memset(zonelist, 0, sizeof(*zonelist));
+
+               j = 0;
+               k = ZONE_NORMAL;
+               if (i & __GFP_HIGHMEM)
+                       k = ZONE_HIGHMEM;
+               if (i & __GFP_DMA)
+                       k = ZONE_DMA;
+
+               j = build_zonelists_node(pgdat, zonelist, j, k);
+               /*
+                * Now we build the zonelist so that it contains the zones
+                * of all the other nodes.
+                * We don't want to pressure a particular node, so when
+                * building the zones for node N, we make sure that the
+                * zones coming right after the local ones are those from
+                * node N+1 (modulo N)
+                */
+               for (node = local_node + 1; node < numnodes; node++)
+                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+               for (node = 0; node < local_node; node++)
+                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+               zonelist->zones[j] = NULL;
+       }
+}
+
+#endif /* CONFIG_NUMA */
+
+void __init build_all_zonelists(void)
+{
+       int i;
+
+       for(i = 0 ; i < numnodes ; i++)
+               build_zonelists(NODE_DATA(i));
+       printk("Built %i zonelists\n", numnodes);
+}
+
+/*
+ * Helper functions to size the waitqueue hash table.
+ * Essentially these want to choose hash table sizes sufficiently
+ * large so that collisions trying to wait on pages are rare.
+ * But in fact, the number of active page waitqueues on typical
+ * systems is ridiculously low, less than 200. So this is even
+ * conservative, even though it seems large.
+ *
+ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
+ * waitqueues, i.e. the size of the waitq table given the number of pages.
+ */
+#define PAGES_PER_WAITQUEUE    256
+
+static inline unsigned long wait_table_size(unsigned long pages)
+{
+       unsigned long size = 1;
+
+       pages /= PAGES_PER_WAITQUEUE;
+
+       while (size < pages)
+               size <<= 1;
+
+       /*
+        * Once we have dozens or even hundreds of threads sleeping
+        * on IO we've got bigger problems than wait queue collision.
+        * Limit the size of the wait table to a reasonable size.
+        */
+       size = min(size, 4096UL);
+
+       return max(size, 4UL);
+}
+
+/*
+ * This is an integer logarithm so that shifts can be used later
+ * to extract the more random high bits from the multiplicative
+ * hash function before the remainder is taken.
+ */
+static inline unsigned long wait_table_bits(unsigned long size)
+{
+       return ffz(~size);
+}
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
+               unsigned long *zones_size, unsigned long *zholes_size)
+{
+       unsigned long realtotalpages, totalpages = 0;
+       int i;
+
+       for (i = 0; i < MAX_NR_ZONES; i++)
+               totalpages += zones_size[i];
+       pgdat->node_spanned_pages = totalpages;
+
+       realtotalpages = totalpages;
+       if (zholes_size)
+               for (i = 0; i < MAX_NR_ZONES; i++)
+                       realtotalpages -= zholes_size[i];
+       pgdat->node_present_pages = realtotalpages;
+       printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
+}
+
+
+/*
+ * Initially all pages are reserved - free ones are freed
+ * up by free_all_bootmem() once the early boot process is
+ * done. Non-atomic initialization, single-pass.
+ */
+void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+               unsigned long start_pfn)
+{
+       struct page *start = pfn_to_page(start_pfn);
+       struct page *page;
+
+       for (page = start; page < (start + size); page++) {
+               set_page_zone(page, NODEZONE(nid, zone));
+               set_page_count(page, 0);
+               reset_page_mapcount(page);
+               SetPageReserved(page);
+               INIT_LIST_HEAD(&page->lru);
+#ifdef WANT_PAGE_VIRTUAL
+               /* The shift won't overflow because ZONE_NORMAL is below 4G. */
+               if (!is_highmem_idx(zone))
+                       set_page_address(page, __va(start_pfn << PAGE_SHIFT));
+#endif
+               start_pfn++;
+       }
+}
+
+/*
+ * Page buddy system uses "index >> (i+1)", where "index" is
+ * at most "size-1".
+ *
+ * The extra "+3" is to round down to byte size (8 bits per byte
+ * assumption). Thus we get "(size-1) >> (i+4)" as the last byte
+ * we can access.
+ *
+ * The "+1" is because we want to round the byte allocation up
+ * rather than down. So we should have had a "+7" before we shifted
+ * down by three. Also, we have to add one as we actually _use_ the
+ * last bit (it's [0,n] inclusive, not [0,n[).
+ *
+ * So we actually had +7+1 before we shift down by 3. But
+ * (n+8) >> 3 == (n >> 3) + 1 (modulo overflows, which we do not have).
+ *
+ * Finally, we LONG_ALIGN because all bitmap operations are on longs.
+ */
+unsigned long pages_to_bitmap_size(unsigned long order, unsigned long nr_pages)
+{
+       unsigned long bitmap_size;
+
+       bitmap_size = (nr_pages-1) >> (order+4);
+       bitmap_size = LONG_ALIGN(bitmap_size+1);
+
+       return bitmap_size;
+}
+
+void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, unsigned long size)
+{
+       int order;
+       for (order = 0; ; order++) {
+               unsigned long bitmap_size;
+
+               INIT_LIST_HEAD(&zone->free_area[order].free_list);
+               if (order == MAX_ORDER-1) {
+                       zone->free_area[order].map = NULL;
+                       break;
+               }
+
+               bitmap_size = pages_to_bitmap_size(order, size);
+               zone->free_area[order].map =
+                 (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
+       }
+}
+
+#ifndef __HAVE_ARCH_MEMMAP_INIT
+#define memmap_init(size, nid, zone, start_pfn) \
+       memmap_init_zone((size), (nid), (zone), (start_pfn))
+#endif
+
+/*
+ * Set up the zone data structures:
+ *   - mark all pages reserved
+ *   - mark all memory queues empty
+ *   - clear the memory bitmaps
+ */
+static void __init free_area_init_core(struct pglist_data *pgdat,
+               unsigned long *zones_size, unsigned long *zholes_size)
+{
+       unsigned long i, j;
+       const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
+       int cpu, nid = pgdat->node_id;
+       unsigned long zone_start_pfn = pgdat->node_start_pfn;
+
+       pgdat->nr_zones = 0;
+       init_waitqueue_head(&pgdat->kswapd_wait);
+       
+       for (j = 0; j < MAX_NR_ZONES; j++) {
+               struct zone *zone = pgdat->node_zones + j;
+               unsigned long size, realsize;
+               unsigned long batch;
+
+               zone_table[NODEZONE(nid, j)] = zone;
+               realsize = size = zones_size[j];
+               if (zholes_size)
+                       realsize -= zholes_size[j];
+
+               if (j == ZONE_DMA || j == ZONE_NORMAL)
+                       nr_kernel_pages += realsize;
+               nr_all_pages += realsize;
+
+               zone->spanned_pages = size;
+               zone->present_pages = realsize;
+               zone->name = zone_names[j];
+               spin_lock_init(&zone->lock);
+               spin_lock_init(&zone->lru_lock);
+               zone->zone_pgdat = pgdat;
+               zone->free_pages = 0;
+
+               zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+
+               /*
+                * The per-cpu-pages pools are set to around 1000th of the
+                * size of the zone.  But no more than 1/4 of a meg - there's
+                * no point in going beyond the size of L2 cache.
+                *
+                * OK, so we don't know how big the cache is.  So guess.
+                */
+               batch = zone->present_pages / 1024;
+               if (batch * PAGE_SIZE > 256 * 1024)
+                       batch = (256 * 1024) / PAGE_SIZE;
+               batch /= 4;             /* We effectively *= 4 below */
+               if (batch < 1)
+                       batch = 1;
+
+               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+                       struct per_cpu_pages *pcp;
+
+                       pcp = &zone->pageset[cpu].pcp[0];       /* hot */
+                       pcp->count = 0;
+                       pcp->low = 2 * batch;
+                       pcp->high = 6 * batch;
+                       pcp->batch = 1 * batch;
+                       INIT_LIST_HEAD(&pcp->list);
+
+                       pcp = &zone->pageset[cpu].pcp[1];       /* cold */
+                       pcp->count = 0;
+                       pcp->low = 0;
+                       pcp->high = 2 * batch;
+                       pcp->batch = 1 * batch;
+                       INIT_LIST_HEAD(&pcp->list);
+               }
+               printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
+                               zone_names[j], realsize, batch);
+               INIT_LIST_HEAD(&zone->active_list);
+               INIT_LIST_HEAD(&zone->inactive_list);
+               zone->nr_scan_active = 0;
+               zone->nr_scan_inactive = 0;
+               zone->nr_active = 0;
+               zone->nr_inactive = 0;
+               if (!size)
+                       continue;
+
+               /*
+                * The per-page waitqueue mechanism uses hashed waitqueues
+                * per zone.
+                */
+               zone->wait_table_size = wait_table_size(size);
+               zone->wait_table_bits =
+                       wait_table_bits(zone->wait_table_size);
+               zone->wait_table = (wait_queue_head_t *)
+                       alloc_bootmem_node(pgdat, zone->wait_table_size
+                                               * sizeof(wait_queue_head_t));
+
+               for(i = 0; i < zone->wait_table_size; ++i)
+                       init_waitqueue_head(zone->wait_table + i);
+
+               pgdat->nr_zones = j+1;
+
+               zone->zone_mem_map = pfn_to_page(zone_start_pfn);
+               zone->zone_start_pfn = zone_start_pfn;
+
+               if ((zone_start_pfn) & (zone_required_alignment-1))
+                       printk("BUG: wrong zone alignment, it will crash\n");
+
+               memmap_init(size, nid, j, zone_start_pfn);
+
+               zone_start_pfn += size;
+
+               zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+       }
+}
+
+void __init node_alloc_mem_map(struct pglist_data *pgdat)
+{
+       unsigned long size;
+
+       size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+       pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
+#ifndef CONFIG_DISCONTIGMEM
+       mem_map = contig_page_data.node_mem_map;
+#endif
+}
+
+void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+               unsigned long *zones_size, unsigned long node_start_pfn,
+               unsigned long *zholes_size)
+{
+       pgdat->node_id = nid;
+       pgdat->node_start_pfn = node_start_pfn;
+       calculate_zone_totalpages(pgdat, zones_size, zholes_size);
+
+       if (!pfn_to_page(node_start_pfn))
+               node_alloc_mem_map(pgdat);
+
+       free_area_init_core(pgdat, zones_size, zholes_size);
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static bootmem_data_t contig_bootmem_data;
+struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
+
+EXPORT_SYMBOL(contig_page_data);
+
+void __init free_area_init(unsigned long *zones_size)
+{
+       free_area_init_node(0, &contig_page_data, zones_size,
+                       __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
+}
+#endif
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+
+static void *frag_start(struct seq_file *m, loff_t *pos)
+{
+       pg_data_t *pgdat;
+       loff_t node = *pos;
+
+       for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
+               --node;
+
+       return pgdat;
+}
+
+static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+       pg_data_t *pgdat = (pg_data_t *)arg;
+
+       (*pos)++;
+       return pgdat->pgdat_next;
+}
+
+static void frag_stop(struct seq_file *m, void *arg)
+{
+}
+
+/* 
+ * This walks the freelist for each zone. Whilst this is slow, I'd rather 
+ * be slow here than slow down the fast path by keeping stats - mjbligh
+ */
+static int frag_show(struct seq_file *m, void *arg)
+{
+       pg_data_t *pgdat = (pg_data_t *)arg;
+       struct zone *zone;
+       struct zone *node_zones = pgdat->node_zones;
+       unsigned long flags;
+       int order;
+
+       for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+               if (!zone->present_pages)
+                       continue;
+
+               spin_lock_irqsave(&zone->lock, flags);
+               seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+               for (order = 0; order < MAX_ORDER; ++order) {
+                       unsigned long nr_bufs = 0;
+                       struct list_head *elem;
+
+                       list_for_each(elem, &(zone->free_area[order].free_list))
+                               ++nr_bufs;
+                       seq_printf(m, "%6lu ", nr_bufs);
+               }
+               spin_unlock_irqrestore(&zone->lock, flags);
+               seq_putc(m, '\n');
+       }
+       return 0;
+}
+
+struct seq_operations fragmentation_op = {
+       .start  = frag_start,
+       .next   = frag_next,
+       .stop   = frag_stop,
+       .show   = frag_show,
+};
+
+static char *vmstat_text[] = {
+       "nr_dirty",
+       "nr_writeback",
+       "nr_unstable",
+       "nr_page_table_pages",
+       "nr_mapped",
+       "nr_slab",
+
+       "pgpgin",
+       "pgpgout",
+       "pswpin",
+       "pswpout",
+       "pgalloc_high",
+
+       "pgalloc_normal",
+       "pgalloc_dma",
+       "pgfree",
+       "pgactivate",
+       "pgdeactivate",
+
+       "pgfault",
+       "pgmajfault",
+       "pgrefill_high",
+       "pgrefill_normal",
+       "pgrefill_dma",
+
+       "pgsteal_high",
+       "pgsteal_normal",
+       "pgsteal_dma",
+       "pgscan_kswapd_high",
+       "pgscan_kswapd_normal",
+
+       "pgscan_kswapd_dma",
+       "pgscan_direct_high",
+       "pgscan_direct_normal",
+       "pgscan_direct_dma",
+       "pginodesteal",
+
+       "slabs_scanned",
+       "kswapd_steal",
+       "kswapd_inodesteal",
+       "pageoutrun",
+       "allocstall",
+
+       "pgrotated",
+};
+
+static void *vmstat_start(struct seq_file *m, loff_t *pos)
+{
+       struct page_state *ps;
+
+       if (*pos >= ARRAY_SIZE(vmstat_text))
+               return NULL;
+
+       ps = kmalloc(sizeof(*ps), GFP_KERNEL);
+       m->private = ps;
+       if (!ps)
+               return ERR_PTR(-ENOMEM);
+       get_full_page_state(ps);
+       ps->pgpgin /= 2;                /* sectors -> kbytes */
+       ps->pgpgout /= 2;
+       return (unsigned long *)ps + *pos;
+}
+
+static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+       (*pos)++;
+       if (*pos >= ARRAY_SIZE(vmstat_text))
+               return NULL;
+       return (unsigned long *)m->private + *pos;
+}
+
+static int vmstat_show(struct seq_file *m, void *arg)
+{
+       unsigned long *l = arg;
+       unsigned long off = l - (unsigned long *)m->private;
+
+       seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
+       return 0;
+}
+
+static void vmstat_stop(struct seq_file *m, void *arg)
+{
+       kfree(m->private);
+       m->private = NULL;
+}
+
+struct seq_operations vmstat_op = {
+       .start  = vmstat_start,
+       .next   = vmstat_next,
+       .stop   = vmstat_stop,
+       .show   = vmstat_show,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int page_alloc_cpu_notify(struct notifier_block *self,
+                                unsigned long action, void *hcpu)
+{
+       int cpu = (unsigned long)hcpu;
+       long *count;
+
+       if (action == CPU_DEAD) {
+               /* Drain local pagecache count. */
+               count = &per_cpu(nr_pagecache_local, cpu);
+               atomic_add(*count, &nr_pagecache);
+               *count = 0;
+               local_irq_disable();
+               __drain_pages(cpu);
+               local_irq_enable();
+       }
+       return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void __init page_alloc_init(void)
+{
+       hotcpu_notifier(page_alloc_cpu_notify, 0);
+}
+
+static unsigned long higherzone_val(struct zone *z, int max_zone,
+                                       int alloc_type)
+{
+       int z_idx = zone_idx(z);
+       struct zone *higherzone;
+       unsigned long pages;
+
+       /* there is no higher zone to get a contribution from */
+       if (z_idx == MAX_NR_ZONES-1)
+               return 0;
+
+       higherzone = &z->zone_pgdat->node_zones[z_idx+1];
+
+       /* We always start with the higher zone's protection value */
+       pages = higherzone->protection[alloc_type];
+
+       /*
+        * We get a lower-zone-protection contribution only if there are
+        * pages in the higher zone and if we're not the highest zone
+        * in the current zonelist.  e.g., never happens for GFP_DMA. Happens
+        * only for ZONE_DMA in a GFP_KERNEL allocation and happens for ZONE_DMA
+        * and ZONE_NORMAL for a GFP_HIGHMEM allocation.
+        */
+       if (higherzone->present_pages && z_idx < alloc_type)
+               pages += higherzone->pages_low * sysctl_lower_zone_protection;
+
+       return pages;
+}
+
+/*
+ * setup_per_zone_protection - called whenver min_free_kbytes or
+ *     sysctl_lower_zone_protection changes.  Ensures that each zone
+ *     has a correct pages_protected value, so an adequate number of
+ *     pages are left in the zone after a successful __alloc_pages().
+ *
+ *     This algorithm is way confusing.  I tries to keep the same behavior
+ *     as we had with the incremental min iterative algorithm.
+ */
+static void setup_per_zone_protection(void)
+{
+       struct pglist_data *pgdat;
+       struct zone *zones, *zone;
+       int max_zone;
+       int i, j;
+
+       for_each_pgdat(pgdat) {
+               zones = pgdat->node_zones;
+
+               for (i = 0, max_zone = 0; i < MAX_NR_ZONES; i++)
+                       if (zones[i].present_pages)
+                               max_zone = i;
+
+               /*
+                * For each of the different allocation types:
+                * GFP_DMA -> GFP_KERNEL -> GFP_HIGHMEM
+                */
+               for (i = 0; i < GFP_ZONETYPES; i++) {
+                       /*
+                        * For each of the zones:
+                        * ZONE_HIGHMEM -> ZONE_NORMAL -> ZONE_DMA
+                        */
+                       for (j = MAX_NR_ZONES-1; j >= 0; j--) {
+                               zone = &zones[j];
+
+                               /*
+                                * We never protect zones that don't have memory
+                                * in them (j>max_zone) or zones that aren't in
+                                * the zonelists for a certain type of
+                                * allocation (j>=i).  We have to assign these
+                                * to zero because the lower zones take
+                                * contributions from the higher zones.
+                                */
+                               if (j > max_zone || j >= i) {
+                                       zone->protection[i] = 0;
+                                       continue;
+                               }
+                               /*
+                                * The contribution of the next higher zone
+                                */
+                               zone->protection[i] = higherzone_val(zone,
+                                                               max_zone, i);
+                       }
+               }
+       }
+}
+
+/*
+ * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures 
+ *     that the pages_{min,low,high} values for each zone are set correctly 
+ *     with respect to min_free_kbytes.
+ */
+static void setup_per_zone_pages_min(void)
+{
+       unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+       unsigned long lowmem_pages = 0;
+       struct zone *zone;
+       unsigned long flags;
+
+       /* Calculate total number of !ZONE_HIGHMEM pages */
+       for_each_zone(zone) {
+               if (!is_highmem(zone))
+                       lowmem_pages += zone->present_pages;
+       }
+
+       for_each_zone(zone) {
+               spin_lock_irqsave(&zone->lru_lock, flags);
+               if (is_highmem(zone)) {
+                       /*
+                        * Often, highmem doesn't need to reserve any pages.
+                        * But the pages_min/low/high values are also used for
+                        * batching up page reclaim activity so we need a
+                        * decent value here.
+                        */
+                       int min_pages;
+
+                       min_pages = zone->present_pages / 1024;
+                       if (min_pages < SWAP_CLUSTER_MAX)
+                               min_pages = SWAP_CLUSTER_MAX;
+                       if (min_pages > 128)
+                               min_pages = 128;
+                       zone->pages_min = min_pages;
+               } else {
+                       /* if it's a lowmem zone, reserve a number of pages 
+                        * proportionate to the zone's size.
+                        */
+                       zone->pages_min = (pages_min * zone->present_pages) / 
+                                          lowmem_pages;
+               }
+
+               zone->pages_low = zone->pages_min * 2;
+               zone->pages_high = zone->pages_min * 3;
+               spin_unlock_irqrestore(&zone->lru_lock, flags);
+       }
+}
+
+/*
+ * Initialise min_free_kbytes.
+ *
+ * For small machines we want it small (128k min).  For large machines
+ * we want it large (16MB max).  But it is not linear, because network
+ * bandwidth does not increase linearly with machine size.  We use
+ *
+ *     min_free_kbytes = sqrt(lowmem_kbytes)
+ *
+ * which yields
+ *
+ * 16MB:       128k
+ * 32MB:       181k
+ * 64MB:       256k
+ * 128MB:      362k
+ * 256MB:      512k
+ * 512MB:      724k
+ * 1024MB:     1024k
+ * 2048MB:     1448k
+ * 4096MB:     2048k
+ * 8192MB:     2896k
+ * 16384MB:    4096k
+ */
+static int __init init_per_zone_pages_min(void)
+{
+       unsigned long lowmem_kbytes;
+
+       lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
+
+       min_free_kbytes = int_sqrt(lowmem_kbytes);
+       if (min_free_kbytes < 128)
+               min_free_kbytes = 128;
+       if (min_free_kbytes > 16384)
+               min_free_kbytes = 16384;
+       setup_per_zone_pages_min();
+       setup_per_zone_protection();
+       return 0;
+}
+module_init(init_per_zone_pages_min)
+
+/*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
+ *     that we can call two helper functions whenever min_free_kbytes
+ *     changes.
+ */
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
+               struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       proc_dointvec(table, write, file, buffer, length, ppos);
+       setup_per_zone_pages_min();
+       setup_per_zone_protection();
+       return 0;
+}
+
+/*
+ * lower_zone_protection_sysctl_handler - just a wrapper around
+ *     proc_dointvec() so that we can call setup_per_zone_protection()
+ *     whenever sysctl_lower_zone_protection changes.
+ */
+int lower_zone_protection_sysctl_handler(ctl_table *table, int write,
+                struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       setup_per_zone_protection();
+       return 0;
+}
+
+/*
+ * allocate a large system hash table from bootmem
+ * - it is assumed that the hash table must contain an exact power-of-2
+ *   quantity of entries
+ */
+void *__init alloc_large_system_hash(const char *tablename,
+                                    unsigned long bucketsize,
+                                    unsigned long numentries,
+                                    int scale,
+                                    int consider_highmem,
+                                    unsigned int *_hash_shift,
+                                    unsigned int *_hash_mask)
+{
+       unsigned long long max;
+       unsigned long log2qty, size;
+       void *table;
+
+       /* allow the kernel cmdline to have a say */
+       if (!numentries) {
+               /* round applicable memory size up to nearest megabyte */
+               numentries = consider_highmem ? nr_all_pages : nr_kernel_pages;
+               numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
+               numentries >>= 20 - PAGE_SHIFT;
+               numentries <<= 20 - PAGE_SHIFT;
+
+               /* limit to 1 bucket per 2^scale bytes of low memory */
+               if (scale > PAGE_SHIFT)
+                       numentries >>= (scale - PAGE_SHIFT);
+               else
+                       numentries <<= (PAGE_SHIFT - scale);
+       }
+       /* rounded up to nearest power of 2 in size */
+       numentries = 1UL << (long_log2(numentries) + 1);
+
+       /* limit allocation size to 1/16 total memory */
+       max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
+       do_div(max, bucketsize);
+
+       if (numentries > max)
+               numentries = max;
+
+       log2qty = long_log2(numentries);
+
+       do {
+               size = bucketsize << log2qty;
+               table = alloc_bootmem(size);
+       } while (!table && size > PAGE_SIZE && --log2qty);
+
+       if (!table)
+               panic("Failed to allocate %s hash table\n", tablename);
+
+       printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
+              tablename,
+              (1U << log2qty),
+              long_log2(size) - PAGE_SHIFT,
+              size);
+
+       if (_hash_shift)
+               *_hash_shift = log2qty;
+       if (_hash_mask)
+               *_hash_mask = (1 << log2qty) - 1;
+
+       return table;
+}
diff --git a/linux-2.6.9-xen-sparse/net/core/skbuff.c b/linux-2.6.9-xen-sparse/net/core/skbuff.c
new file mode 100644 (file)
index 0000000..41b512c
--- /dev/null
@@ -0,0 +1,1523 @@
+/*
+ *     Routines having to do with the 'struct sk_buff' memory handlers.
+ *
+ *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
+ *                     Florian La Roche <rzsfl@rz.uni-sb.de>
+ *
+ *     Version:        $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
+ *
+ *     Fixes:
+ *             Alan Cox        :       Fixed the worst of the load
+ *                                     balancer bugs.
+ *             Dave Platt      :       Interrupt stacking fix.
+ *     Richard Kooijman        :       Timestamp fixes.
+ *             Alan Cox        :       Changed buffer format.
+ *             Alan Cox        :       destructor hook for AF_UNIX etc.
+ *             Linus Torvalds  :       Better skb_clone.
+ *             Alan Cox        :       Added skb_copy.
+ *             Alan Cox        :       Added all the changed routines Linus
+ *                                     only put in the headers
+ *             Ray VanTassle   :       Fixed --skb->lock in free
+ *             Alan Cox        :       skb_copy copy arp field
+ *             Andi Kleen      :       slabified it.
+ *             Robert Olsson   :       Removed skb_head_pool
+ *
+ *     NOTE:
+ *             The __skb_ routines should be called with interrupts
+ *     disabled, or you better be *real* sure that the operation is atomic
+ *     with respect to whatever list is being frobbed (e.g. via lock_sock()
+ *     or via disabling bottom half handlers, etc).
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+/*
+ *     The functions in this file will not compile correctly with gcc 2.4.x
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#ifdef CONFIG_NET_CLS_ACT
+#include <net/pkt_sched.h>
+#endif
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <net/protocol.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/xfrm.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+static kmem_cache_t *skbuff_head_cache;
+
+/*
+ *     Keep out-of-line to prevent kernel bloat.
+ *     __builtin_return_address is not used because it is not always
+ *     reliable.
+ */
+
+/**
+ *     skb_over_panic  -       private function
+ *     @skb: buffer
+ *     @sz: size
+ *     @here: address
+ *
+ *     Out of line support code for skb_put(). Not user callable.
+ */
+void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+{
+       printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
+               here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+       BUG();
+}
+
+/**
+ *     skb_under_panic -       private function
+ *     @skb: buffer
+ *     @sz: size
+ *     @here: address
+ *
+ *     Out of line support code for skb_push(). Not user callable.
+ */
+
+void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+{
+       printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
+               here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+       BUG();
+}
+
+/*     Allocate a new skbuff. We do this ourselves so we can fill in a few
+ *     'private' fields and also do memory statistics to find all the
+ *     [BEEP] leaks.
+ *
+ */
+
+/**
+ *     alloc_skb       -       allocate a network buffer
+ *     @size: size to allocate
+ *     @gfp_mask: allocation mask
+ *
+ *     Allocate a new &sk_buff. The returned buffer has no headroom and a
+ *     tail room of size bytes. The object has a reference count of one.
+ *     The return is the buffer. On a failure the return is %NULL.
+ *
+ *     Buffers may only be allocated from interrupts using a @gfp_mask of
+ *     %GFP_ATOMIC.
+ */
+struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
+{
+       struct sk_buff *skb;
+       u8 *data;
+
+       /* Get the HEAD */
+       skb = kmem_cache_alloc(skbuff_head_cache,
+                              gfp_mask & ~__GFP_DMA);
+       if (!skb)
+               goto out;
+
+       /* Get the DATA. Size must match skb_add_mtu(). */
+       size = SKB_DATA_ALIGN(size);
+       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+       if (!data)
+               goto nodata;
+
+       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       skb->truesize = size + sizeof(struct sk_buff);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb->tail = data;
+       skb->end  = data + size;
+
+       atomic_set(&(skb_shinfo(skb)->dataref), 1);
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->tso_segs = 0;
+       skb_shinfo(skb)->frag_list = NULL;
+out:
+       return skb;
+nodata:
+       kmem_cache_free(skbuff_head_cache, skb);
+       skb = NULL;
+       goto out;
+}
+
+/**
+ *     alloc_skb_from_cache    -       allocate a network buffer
+ *     @cp: kmem_cache from which to allocate the data area
+ *           (object size must be big enough for @size bytes + skb overheads)
+ *     @size: size to allocate
+ *     @gfp_mask: allocation mask
+ *
+ *     Allocate a new &sk_buff. The returned buffer has no headroom and a
+ *     tail room of size bytes. The object has a reference count of one.
+ *     The return is the buffer. On a failure the return is %NULL.
+ *
+ *     Buffers may only be allocated from interrupts using a @gfp_mask of
+ *     %GFP_ATOMIC.
+ */
+struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+                                    unsigned int size, int gfp_mask)
+{
+       struct sk_buff *skb;
+       u8 *data;
+
+       /* Get the HEAD */
+       skb = kmem_cache_alloc(skbuff_head_cache,
+                              gfp_mask & ~__GFP_DMA);
+       if (!skb)
+               goto out;
+
+       /* Get the DATA. */
+       size = SKB_DATA_ALIGN(size);
+       data = kmem_cache_alloc(cp, gfp_mask);
+       if (!data)
+               goto nodata;
+
+       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       skb->truesize = size + sizeof(struct sk_buff);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb->tail = data;
+       skb->end  = data + size;
+
+       atomic_set(&(skb_shinfo(skb)->dataref), 1);
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->tso_segs = 0;
+       skb_shinfo(skb)->frag_list = NULL;
+out:
+       return skb;
+nodata:
+       kmem_cache_free(skbuff_head_cache, skb);
+       skb = NULL;
+       goto out;
+}
+
+
+static void skb_drop_fraglist(struct sk_buff *skb)
+{
+       struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+       skb_shinfo(skb)->frag_list = NULL;
+
+       do {
+               struct sk_buff *this = list;
+               list = list->next;
+               kfree_skb(this);
+       } while (list);
+}
+
+static void skb_clone_fraglist(struct sk_buff *skb)
+{
+       struct sk_buff *list;
+
+       for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
+               skb_get(list);
+}
+
+void skb_release_data(struct sk_buff *skb)
+{
+       if (!skb->cloned ||
+           atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
+               if (skb_shinfo(skb)->nr_frags) {
+                       int i;
+                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+                               put_page(skb_shinfo(skb)->frags[i].page);
+               }
+
+               if (skb_shinfo(skb)->frag_list)
+                       skb_drop_fraglist(skb);
+
+               kfree(skb->head);
+       }
+}
+
+/*
+ *     Free an skbuff by memory without cleaning the state.
+ */
+void kfree_skbmem(struct sk_buff *skb)
+{
+       skb_release_data(skb);
+       kmem_cache_free(skbuff_head_cache, skb);
+}
+
+/**
+ *     __kfree_skb - private function
+ *     @skb: buffer
+ *
+ *     Free an sk_buff. Release anything attached to the buffer.
+ *     Clean the state. This is an internal helper function. Users should
+ *     always call kfree_skb
+ */
+
+void __kfree_skb(struct sk_buff *skb)
+{
+       if (skb->list) {
+               printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
+                      "on a list (from %p).\n", NET_CALLER(skb));
+               BUG();
+       }
+
+       dst_release(skb->dst);
+#ifdef CONFIG_XFRM
+       secpath_put(skb->sp);
+#endif
+       if(skb->destructor) {
+               if (in_irq())
+                       printk(KERN_WARNING "Warning: kfree_skb on "
+                                           "hard IRQ %p\n", NET_CALLER(skb));
+               skb->destructor(skb);
+       }
+#ifdef CONFIG_NETFILTER
+       nf_conntrack_put(skb->nfct);
+#ifdef CONFIG_BRIDGE_NETFILTER
+       nf_bridge_put(skb->nf_bridge);
+#endif
+#endif
+/* XXX: IS this still necessary? - JHS */
+#ifdef CONFIG_NET_SCHED
+       skb->tc_index = 0;
+#ifdef CONFIG_NET_CLS_ACT
+       skb->tc_verd = 0;
+       skb->tc_classid = 0;
+#endif
+#endif
+
+       kfree_skbmem(skb);
+}
+
+/**
+ *     skb_clone       -       duplicate an sk_buff
+ *     @skb: buffer to clone
+ *     @gfp_mask: allocation priority
+ *
+ *     Duplicate an &sk_buff. The new one is not owned by a socket. Both
+ *     copies share the same packet data but not structure. The new
+ *     buffer has a reference count of 1. If the allocation fails the
+ *     function returns %NULL otherwise the new buffer is returned.
+ *
+ *     If this function is called from an interrupt gfp_mask() must be
+ *     %GFP_ATOMIC.
+ */
+
+struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
+{
+       struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+
+       if (!n) 
+               return NULL;
+
+#define C(x) n->x = skb->x
+
+       n->next = n->prev = NULL;
+       n->list = NULL;
+       n->sk = NULL;
+       C(stamp);
+       C(dev);
+       C(real_dev);
+       C(h);
+       C(nh);
+       C(mac);
+       C(dst);
+       dst_clone(skb->dst);
+       C(sp);
+#ifdef CONFIG_INET
+       secpath_get(skb->sp);
+#endif
+       memcpy(n->cb, skb->cb, sizeof(skb->cb));
+       C(len);
+       C(data_len);
+       C(csum);
+       C(local_df);
+       n->cloned = 1;
+       C(pkt_type);
+       C(ip_summed);
+       C(priority);
+       C(protocol);
+       C(security);
+       n->destructor = NULL;
+#ifdef CONFIG_NETFILTER
+       C(nfmark);
+       C(nfcache);
+       C(nfct);
+       nf_conntrack_get(skb->nfct);
+       C(nfctinfo);
+#ifdef CONFIG_NETFILTER_DEBUG
+       C(nf_debug);
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       C(nf_bridge);
+       nf_bridge_get(skb->nf_bridge);
+#endif
+#endif /*CONFIG_NETFILTER*/
+#if defined(CONFIG_HIPPI)
+       C(private);
+#endif
+#ifdef CONFIG_NET_SCHED
+       C(tc_index);
+#ifdef CONFIG_NET_CLS_ACT
+       n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
+       n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd);
+       n->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
+       C(input_dev);
+       C(tc_classid);
+#endif
+
+#endif
+       C(truesize);
+       atomic_set(&n->users, 1);
+       C(head);
+       C(data);
+       C(tail);
+       C(end);
+
+       atomic_inc(&(skb_shinfo(skb)->dataref));
+       skb->cloned = 1;
+
+       return n;
+}
+
+static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+       /*
+        *      Shift between the two data areas in bytes
+        */
+       unsigned long offset = new->data - old->data;
+
+       new->list       = NULL;
+       new->sk         = NULL;
+       new->dev        = old->dev;
+       new->real_dev   = old->real_dev;
+       new->priority   = old->priority;
+       new->protocol   = old->protocol;
+       new->dst        = dst_clone(old->dst);
+#ifdef CONFIG_INET
+       new->sp         = secpath_get(old->sp);
+#endif
+       new->h.raw      = old->h.raw + offset;
+       new->nh.raw     = old->nh.raw + offset;
+       new->mac.raw    = old->mac.raw + offset;
+       memcpy(new->cb, old->cb, sizeof(old->cb));
+       new->local_df   = old->local_df;
+       new->pkt_type   = old->pkt_type;
+       new->stamp      = old->stamp;
+       new->destructor = NULL;
+       new->security   = old->security;
+#ifdef CONFIG_NETFILTER
+       new->nfmark     = old->nfmark;
+       new->nfcache    = old->nfcache;
+       new->nfct       = old->nfct;
+       nf_conntrack_get(old->nfct);
+       new->nfctinfo   = old->nfctinfo;
+#ifdef CONFIG_NETFILTER_DEBUG
+       new->nf_debug   = old->nf_debug;
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       new->nf_bridge  = old->nf_bridge;
+       nf_bridge_get(old->nf_bridge);
+#endif
+#endif
+#ifdef CONFIG_NET_SCHED
+#ifdef CONFIG_NET_CLS_ACT
+       new->tc_verd = old->tc_verd;
+#endif
+       new->tc_index   = old->tc_index;
+#endif
+       atomic_set(&new->users, 1);
+}
+
+/**
+ *     skb_copy        -       create private copy of an sk_buff
+ *     @skb: buffer to copy
+ *     @gfp_mask: allocation priority
+ *
+ *     Make a copy of both an &sk_buff and its data. This is used when the
+ *     caller wishes to modify the data and needs a private copy of the
+ *     data to alter. Returns %NULL on failure or the pointer to the buffer
+ *     on success. The returned buffer has a reference count of 1.
+ *
+ *     As by-product this function converts non-linear &sk_buff to linear
+ *     one, so that &sk_buff becomes completely private and caller is allowed
+ *     to modify all the data of returned buffer. This means that this
+ *     function is not recommended for use in circumstances when only
+ *     header is going to be modified. Use pskb_copy() instead.
+ */
+
+struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
+{
+       int headerlen = skb->data - skb->head;
+       /*
+        *      Allocate the copy buffer
+        */
+       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
+                                     gfp_mask);
+       if (!n)
+               return NULL;
+
+       /* Set the data pointer */
+       skb_reserve(n, headerlen);
+       /* Set the tail pointer and length */
+       skb_put(n, skb->len);
+       n->csum      = skb->csum;
+       n->ip_summed = skb->ip_summed;
+
+       if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
+               BUG();
+
+       copy_skb_header(n, skb);
+       return n;
+}
+
+
+/**
+ *     pskb_copy       -       create copy of an sk_buff with private head.
+ *     @skb: buffer to copy
+ *     @gfp_mask: allocation priority
+ *
+ *     Make a copy of both an &sk_buff and part of its data, located
+ *     in header. Fragmented data remain shared. This is used when
+ *     the caller wishes to modify only header of &sk_buff and needs
+ *     private copy of the header to alter. Returns %NULL on failure
+ *     or the pointer to the buffer on success.
+ *     The returned buffer has a reference count of 1.
+ */
+
+struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
+{
+       /*
+        *      Allocate the copy buffer
+        */
+       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
+
+       if (!n)
+               goto out;
+
+       /* Set the data pointer */
+       skb_reserve(n, skb->data - skb->head);
+       /* Set the tail pointer and length */
+       skb_put(n, skb_headlen(skb));
+       /* Copy the bytes */
+       memcpy(n->data, skb->data, n->len);
+       n->csum      = skb->csum;
+       n->ip_summed = skb->ip_summed;
+
+       n->data_len  = skb->data_len;
+       n->len       = skb->len;
+
+       if (skb_shinfo(skb)->nr_frags) {
+               int i;
+
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+                       get_page(skb_shinfo(n)->frags[i].page);
+               }
+               skb_shinfo(n)->nr_frags = i;
+       }
+       skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
+       skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
+
+       if (skb_shinfo(skb)->frag_list) {
+               skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
+               skb_clone_fraglist(n);
+       }
+
+       copy_skb_header(n, skb);
+out:
+       return n;
+}
+
+/**
+ *     pskb_expand_head - reallocate header of &sk_buff
+ *     @skb: buffer to reallocate
+ *     @nhead: room to add at head
+ *     @ntail: room to add at tail
+ *     @gfp_mask: allocation priority
+ *
+ *     Expands (or creates identical copy, if &nhead and &ntail are zero)
+ *     header of skb. &sk_buff itself is not changed. &sk_buff MUST have
+ *     reference count of 1. Returns zero in the case of success or error,
+ *     if expansion failed. In the last case, &sk_buff is not changed.
+ *
+ *     All the pointers pointing into skb header may change and must be
+ *     reloaded after call to this function.
+ */
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
+{
+       int i;
+       u8 *data;
+       int size = nhead + (skb->end - skb->head) + ntail;
+       long off;
+
+       if (skb_shared(skb))
+               BUG();
+
+       size = SKB_DATA_ALIGN(size);
+
+       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+       if (!data)
+               goto nodata;
+
+       /* Copy only real data... and, alas, header. This should be
+        * optimized for the cases when header is void. */
+       memcpy(data + nhead, skb->head, skb->tail - skb->head);
+       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+               get_page(skb_shinfo(skb)->frags[i].page);
+
+       if (skb_shinfo(skb)->frag_list)
+               skb_clone_fraglist(skb);
+
+       skb_release_data(skb);
+
+       off = (data + nhead) - skb->head;
+
+       skb->head     = data;
+       skb->end      = data + size;
+       skb->data    += off;
+       skb->tail    += off;
+       skb->mac.raw += off;
+       skb->h.raw   += off;
+       skb->nh.raw  += off;
+       skb->cloned   = 0;
+       atomic_set(&skb_shinfo(skb)->dataref, 1);
+       return 0;
+
+nodata:
+       return -ENOMEM;
+}
+
+/* Make private copy of skb with writable head and some headroom */
+
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
+{
+       struct sk_buff *skb2;
+       int delta = headroom - skb_headroom(skb);
+
+       if (delta <= 0)
+               skb2 = pskb_copy(skb, GFP_ATOMIC);
+       else {
+               skb2 = skb_clone(skb, GFP_ATOMIC);
+               if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
+                                            GFP_ATOMIC)) {
+                       kfree_skb(skb2);
+                       skb2 = NULL;
+               }
+       }
+       return skb2;
+}
+
+
+/**
+ *     skb_copy_expand -       copy and expand sk_buff
+ *     @skb: buffer to copy
+ *     @newheadroom: new free bytes at head
+ *     @newtailroom: new free bytes at tail
+ *     @gfp_mask: allocation priority
+ *
+ *     Make a copy of both an &sk_buff and its data and while doing so
+ *     allocate additional space.
+ *
+ *     This is used when the caller wishes to modify the data and needs a
+ *     private copy of the data to alter as well as more space for new fields.
+ *     Returns %NULL on failure or the pointer to the buffer
+ *     on success. The returned buffer has a reference count of 1.
+ *
+ *     You must pass %GFP_ATOMIC as the allocation priority if this function
+ *     is called from an interrupt.
+ *
+ *     BUG ALERT: ip_summed is not copied. Why does this work? Is it used
+ *     only by netfilter in the cases when checksum is recalculated? --ANK
+ */
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+                               int newheadroom, int newtailroom, int gfp_mask)
+{
+       /*
+        *      Allocate the copy buffer
+        */
+       struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
+                                     gfp_mask);
+       int head_copy_len, head_copy_off;
+
+       if (!n)
+               return NULL;
+
+       skb_reserve(n, newheadroom);
+
+       /* Set the tail pointer and length */
+       skb_put(n, skb->len);
+
+       head_copy_len = skb_headroom(skb);
+       head_copy_off = 0;
+       if (newheadroom <= head_copy_len)
+               head_copy_len = newheadroom;
+       else
+               head_copy_off = newheadroom - head_copy_len;
+
+       /* Copy the linear header and data. */
+       if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
+                         skb->len + head_copy_len))
+               BUG();
+
+       copy_skb_header(n, skb);
+       skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
+       skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
+
+       return n;
+}
+
+/**
+ *     skb_pad                 -       zero pad the tail of an skb
+ *     @skb: buffer to pad
+ *     @pad: space to pad
+ *
+ *     Ensure that a buffer is followed by a padding area that is zero
+ *     filled. Used by network drivers which may DMA or transfer data
+ *     beyond the buffer end onto the wire.
+ *
+ *     May return NULL in out of memory cases.
+ */
+struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
+{
+       struct sk_buff *nskb;
+       
+       /* If the skbuff is non linear tailroom is always zero.. */
+       if (skb_tailroom(skb) >= pad) {
+               memset(skb->data+skb->len, 0, pad);
+               return skb;
+       }
+       
+       nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
+       kfree_skb(skb);
+       if (nskb)
+               memset(nskb->data+nskb->len, 0, pad);
+       return nskb;
+}      
+/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
+ * If realloc==0 and trimming is impossible without change of data,
+ * it is BUG().
+ */
+
+int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
+{
+       int offset = skb_headlen(skb);
+       int nfrags = skb_shinfo(skb)->nr_frags;
+       int i;
+
+       for (i = 0; i < nfrags; i++) {
+               int end = offset + skb_shinfo(skb)->frags[i].size;
+               if (end > len) {
+                       if (skb_cloned(skb)) {
+                               if (!realloc)
+                                       BUG();
+                               if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+                                       return -ENOMEM;
+                       }
+                       if (len <= offset) {
+                               put_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb)->nr_frags--;
+                       } else {
+                               skb_shinfo(skb)->frags[i].size = len - offset;
+                       }
+               }
+               offset = end;
+       }
+
+       if (offset < len) {
+               skb->data_len -= skb->len - len;
+               skb->len       = len;
+       } else {
+               if (len <= skb_headlen(skb)) {
+                       skb->len      = len;
+                       skb->data_len = 0;
+                       skb->tail     = skb->data + len;
+                       if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
+                               skb_drop_fraglist(skb);
+               } else {
+                       skb->data_len -= skb->len - len;
+                       skb->len       = len;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ *     __pskb_pull_tail - advance tail of skb header
+ *     @skb: buffer to reallocate
+ *     @delta: number of bytes to advance tail
+ *
+ *     The function makes a sense only on a fragmented &sk_buff,
+ *     it expands header moving its tail forward and copying necessary
+ *     data from fragmented part.
+ *
+ *     &sk_buff MUST have reference count of 1.
+ *
+ *     Returns %NULL (and &sk_buff does not change) if pull failed
+ *     or value of new tail of skb in the case of success.
+ *
+ *     All the pointers pointing into skb header may change and must be
+ *     reloaded after call to this function.
+ */
+
+/* Moves tail of skb head forward, copying data from fragmented part,
+ * when it is necessary.
+ * 1. It may fail due to malloc failure.
+ * 2. It may change skb pointers.
+ *
+ * It is pretty complicated. Luckily, it is called only in exceptional cases.
+ */
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
+{
+       /* If skb has not enough free space at tail, get new one
+        * plus 128 bytes for future expansions. If we have enough
+        * room at tail, reallocate without expansion only if skb is cloned.
+        */
+       int i, k, eat = (skb->tail + delta) - skb->end;
+
+       if (eat > 0 || skb_cloned(skb)) {
+               if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
+                                    GFP_ATOMIC))
+                       return NULL;
+       }
+
+       if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
+               BUG();
+
+       /* Optimization: no fragments, no reasons to preestimate
+        * size of pulled pages. Superb.
+        */
+       if (!skb_shinfo(skb)->frag_list)
+               goto pull_pages;
+
+       /* Estimate size of pulled pages. */
+       eat = delta;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size >= eat)
+                       goto pull_pages;
+               eat -= skb_shinfo(skb)->frags[i].size;
+       }
+
+       /* If we need update frag list, we are in troubles.
+        * Certainly, it possible to add an offset to skb data,
+        * but taking into account that pulling is expected to
+        * be very rare operation, it is worth to fight against
+        * further bloating skb head and crucify ourselves here instead.
+        * Pure masohism, indeed. 8)8)
+        */
+       if (eat) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+               struct sk_buff *clone = NULL;
+               struct sk_buff *insp = NULL;
+
+               do {
+                       if (!list)
+                               BUG();
+
+                       if (list->len <= eat) {
+                               /* Eaten as whole. */
+                               eat -= list->len;
+                               list = list->next;
+                               insp = list;
+                       } else {
+                               /* Eaten partially. */
+
+                               if (skb_shared(list)) {
+                                       /* Sucks! We need to fork list. :-( */
+                                       clone = skb_clone(list, GFP_ATOMIC);
+                                       if (!clone)
+                                               return NULL;
+                                       insp = list->next;
+                                       list = clone;
+                               } else {
+                                       /* This may be pulled without
+                                        * problems. */
+                                       insp = list;
+                               }
+                               if (!pskb_pull(list, eat)) {
+                                       if (clone)
+                                               kfree_skb(clone);
+                                       return NULL;
+                               }
+                               break;
+                       }
+               } while (eat);
+
+               /* Free pulled out fragments. */
+               while ((list = skb_shinfo(skb)->frag_list) != insp) {
+                       skb_shinfo(skb)->frag_list = list->next;
+                       kfree_skb(list);
+               }
+               /* And insert new clone at head. */
+               if (clone) {
+                       clone->next = list;
+                       skb_shinfo(skb)->frag_list = clone;
+               }
+       }
+       /* Success! Now we may commit changes to skb data. */
+
+pull_pages:
+       eat = delta;
+       k = 0;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size <= eat) {
+                       put_page(skb_shinfo(skb)->frags[i].page);
+                       eat -= skb_shinfo(skb)->frags[i].size;
+               } else {
+                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+                       if (eat) {
+                               skb_shinfo(skb)->frags[k].page_offset += eat;
+                               skb_shinfo(skb)->frags[k].size -= eat;
+                               eat = 0;
+                       }
+                       k++;
+               }
+       }
+       skb_shinfo(skb)->nr_frags = k;
+
+       skb->tail     += delta;
+       skb->data_len -= delta;
+
+       return skb->tail;
+}
+
+/* Copy some data bits from skb to kernel buffer. */
+
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
+{
+       int i, copy;
+       int start = skb_headlen(skb);
+
+       if (offset > (int)skb->len - len)
+               goto fault;
+
+       /* Copy header. */
+       if ((copy = start - offset) > 0) {
+               if (copy > len)
+                       copy = len;
+               memcpy(to, skb->data + offset, copy);
+               if ((len -= copy) == 0)
+                       return 0;
+               offset += copy;
+               to     += copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       u8 *vaddr;
+
+                       if (copy > len)
+                               copy = len;
+
+                       vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+                       memcpy(to,
+                              vaddr + skb_shinfo(skb)->frags[i].page_offset+
+                              offset - start, copy);
+                       kunmap_skb_frag(vaddr);
+
+                       if ((len -= copy) == 0)
+                               return 0;
+                       offset += copy;
+                       to     += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               if (copy > len)
+                                       copy = len;
+                               if (skb_copy_bits(list, offset - start,
+                                                 to, copy))
+                                       goto fault;
+                               if ((len -= copy) == 0)
+                                       return 0;
+                               offset += copy;
+                               to     += copy;
+                       }
+                       start = end;
+               }
+       }
+       if (!len)
+               return 0;
+
+fault:
+       return -EFAULT;
+}
+
+/* Keep iterating until skb_iter_next returns false. */
+void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
+{
+       i->len = skb_headlen(skb);
+       i->data = (unsigned char *)skb->data;
+       i->nextfrag = 0;
+       i->fraglist = NULL;
+}
+
+int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
+{
+       /* Unmap previous, if not head fragment. */
+       if (i->nextfrag)
+               kunmap_skb_frag(i->data);
+
+       if (i->fraglist) {
+       fraglist:
+               /* We're iterating through fraglist. */
+               if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
+                       i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
+                                               ->frags[i->nextfrag]);
+                       i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
+                               .size;
+                       i->nextfrag++;
+                       return 1;
+               }
+               /* Fragments with fragments?  Too hard! */
+               BUG_ON(skb_shinfo(i->fraglist)->frag_list);
+               i->fraglist = i->fraglist->next;
+               if (!i->fraglist)
+                       goto end;
+
+               i->len = skb_headlen(i->fraglist);
+               i->data = i->fraglist->data;
+               i->nextfrag = 0;
+               return 1;
+       }
+
+       if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
+               i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
+               i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
+               i->nextfrag++;
+               return 1;
+       }
+
+       i->fraglist = skb_shinfo(skb)->frag_list;
+       if (i->fraglist)
+               goto fraglist;
+
+end:
+       /* Bug trap for callers */
+       i->data = NULL;
+       return 0;
+}
+
+void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
+{
+       /* Unmap previous, if not head fragment. */
+       if (i->data && i->nextfrag)
+               kunmap_skb_frag(i->data);
+       /* Bug trap for callers */
+       i->data = NULL;
+}
+
+/* Checksum skb data. */
+
+unsigned int skb_checksum(const struct sk_buff *skb, int offset,
+                         int len, unsigned int csum)
+{
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
+       int pos = 0;
+
+       /* Checksum header. */
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               csum = csum_partial(skb->data + offset, copy, csum);
+               if ((len -= copy) == 0)
+                       return csum;
+               offset += copy;
+               pos     = copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       unsigned int csum2;
+                       u8 *vaddr;
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       vaddr = kmap_skb_frag(frag);
+                       csum2 = csum_partial(vaddr + frag->page_offset +
+                                            offset - start, copy, 0);
+                       kunmap_skb_frag(vaddr);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if (!(len -= copy))
+                               return csum;
+                       offset += copy;
+                       pos    += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               unsigned int csum2;
+                               if (copy > len)
+                                       copy = len;
+                               csum2 = skb_checksum(list, offset - start,
+                                                    copy, 0);
+                               csum = csum_block_add(csum, csum2, pos);
+                               if ((len -= copy) == 0)
+                                       return csum;
+                               offset += copy;
+                               pos    += copy;
+                       }
+                       start = end;
+               }
+       }
+       if (len)
+               BUG();
+
+       return csum;
+}
+
+/* Both of above in one bottle. */
+
+unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
+                                   u8 *to, int len, unsigned int csum)
+{
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
+       int pos = 0;
+
+       /* Copy header. */
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               csum = csum_partial_copy_nocheck(skb->data + offset, to,
+                                                copy, csum);
+               if ((len -= copy) == 0)
+                       return csum;
+               offset += copy;
+               to     += copy;
+               pos     = copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       unsigned int csum2;
+                       u8 *vaddr;
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       vaddr = kmap_skb_frag(frag);
+                       csum2 = csum_partial_copy_nocheck(vaddr +
+                                                         frag->page_offset +
+                                                         offset - start, to,
+                                                         copy, 0);
+                       kunmap_skb_frag(vaddr);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if (!(len -= copy))
+                               return csum;
+                       offset += copy;
+                       to     += copy;
+                       pos    += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       unsigned int csum2;
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               if (copy > len)
+                                       copy = len;
+                               csum2 = skb_copy_and_csum_bits(list,
+                                                              offset - start,
+                                                              to, copy, 0);
+                               csum = csum_block_add(csum, csum2, pos);
+                               if ((len -= copy) == 0)
+                                       return csum;
+                               offset += copy;
+                               to     += copy;
+                               pos    += copy;
+                       }
+                       start = end;
+               }
+       }
+       if (len)
+               BUG();
+       return csum;
+}
+
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
+{
+       unsigned int csum;
+       long csstart;
+
+       if (skb->ip_summed == CHECKSUM_HW)
+               csstart = skb->h.raw - skb->data;
+       else
+               csstart = skb_headlen(skb);
+
+       if (csstart > skb_headlen(skb))
+               BUG();
+
+       memcpy(to, skb->data, csstart);
+
+       csum = 0;
+       if (csstart != skb->len)
+               csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
+                                             skb->len - csstart, 0);
+
+       if (skb->ip_summed == CHECKSUM_HW) {
+               long csstuff = csstart + skb->csum;
+
+               *((unsigned short *)(to + csstuff)) = csum_fold(csum);
+       }
+}
+
+/**
+ *     skb_dequeue - remove from the head of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the head of the list. The list lock is taken so the function
+ *     may be used safely with other locking list functions. The head item is
+ *     returned or %NULL if the list is empty.
+ */
+
+struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+       unsigned long flags;
+       struct sk_buff *result;
+
+       spin_lock_irqsave(&list->lock, flags);
+       result = __skb_dequeue(list);
+       spin_unlock_irqrestore(&list->lock, flags);
+       return result;
+}
+
+/**
+ *     skb_dequeue_tail - remove from the tail of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the tail of the list. The list lock is taken so the function
+ *     may be used safely with other locking list functions. The tail item is
+ *     returned or %NULL if the list is empty.
+ */
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
+{
+       unsigned long flags;
+       struct sk_buff *result;
+
+       spin_lock_irqsave(&list->lock, flags);
+       result = __skb_dequeue_tail(list);
+       spin_unlock_irqrestore(&list->lock, flags);
+       return result;
+}
+
+/**
+ *     skb_queue_purge - empty a list
+ *     @list: list to empty
+ *
+ *     Delete all buffers on an &sk_buff list. Each buffer is removed from
+ *     the list and one reference dropped. This function takes the list
+ *     lock and is atomic with respect to other list locking functions.
+ */
+void skb_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+       while ((skb = skb_dequeue(list)) != NULL)
+               kfree_skb(skb);
+}
+
+/**
+ *     skb_queue_head - queue a buffer at the list head
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the start of the list. This function takes the
+ *     list lock and can be used safely with other locking &sk_buff functions
+ *     safely.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_queue_head(list, newsk);
+       spin_unlock_irqrestore(&list->lock, flags);
+}
+
+/**
+ *     skb_queue_tail - queue a buffer at the list tail
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the tail of the list. This function takes the
+ *     list lock and can be used safely with other locking &sk_buff functions
+ *     safely.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_queue_tail(list, newsk);
+       spin_unlock_irqrestore(&list->lock, flags);
+}
+/**
+ *     skb_unlink      -       remove a buffer from a list
+ *     @skb: buffer to remove
+ *
+ *     Place a packet after a given packet in a list. The list locks are taken
+ *     and this function is atomic with respect to other list locked calls
+ *
+ *     Works even without knowing the list it is sitting on, which can be
+ *     handy at times. It also means that THE LIST MUST EXIST when you
+ *     unlink. Thus a list must have its contents unlinked before it is
+ *     destroyed.
+ */
+void skb_unlink(struct sk_buff *skb)
+{
+       struct sk_buff_head *list = skb->list;
+
+       if (list) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&list->lock, flags);
+               if (skb->list == list)
+                       __skb_unlink(skb, skb->list);
+               spin_unlock_irqrestore(&list->lock, flags);
+       }
+}
+
+
+/**
+ *     skb_append      -       append a buffer
+ *     @old: buffer to insert after
+ *     @newsk: buffer to insert
+ *
+ *     Place a packet after a given packet in a list. The list locks are taken
+ *     and this function is atomic with respect to other list locked calls.
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+
+void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&old->list->lock, flags);
+       __skb_append(old, newsk);
+       spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+
+/**
+ *     skb_insert      -       insert a buffer
+ *     @old: buffer to insert before
+ *     @newsk: buffer to insert
+ *
+ *     Place a packet before a given packet in a list. The list locks are taken
+ *     and this function is atomic with respect to other list locked calls
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&old->list->lock, flags);
+       __skb_insert(newsk, old->prev, old, old->list);
+       spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+#if 0
+/*
+ *     Tune the memory allocator for a new MTU size.
+ */
+void skb_add_mtu(int mtu)
+{
+       /* Must match allocation in alloc_skb */
+       mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
+
+       kmem_add_cache_size(mtu);
+}
+#endif
+
+static void inline skb_split_inside_header(struct sk_buff *skb,
+                                          struct sk_buff* skb1,
+                                          const u32 len, const int pos)
+{
+       int i;
+
+       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
+
+       /* And move data appendix as is. */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+               skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
+
+       skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb1->data_len             = skb->data_len;
+       skb1->len                  += skb1->data_len;
+       skb->data_len              = 0;
+       skb->len                   = len;
+       skb->tail                  = skb->data + len;
+}
+
+static void inline skb_split_no_header(struct sk_buff *skb,
+                                      struct sk_buff* skb1,
+                                      const u32 len, int pos)
+{
+       int i, k = 0;
+       const int nfrags = skb_shinfo(skb)->nr_frags;
+
+       skb_shinfo(skb)->nr_frags = 0;
+       skb1->len                 = skb1->data_len = skb->len - len;
+       skb->len                  = len;
+       skb->data_len             = len - pos;
+
+       for (i = 0; i < nfrags; i++) {
+               int size = skb_shinfo(skb)->frags[i].size;
+
+               if (pos + size > len) {
+                       skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
+
+                       if (pos < len) {
+                               /* Split frag.
+                                * We have to variants in this case:
+                                * 1. Move all the frag to the second
+                                *    part, if it is possible. F.e.
+                                *    this approach is mandatory for TUX,
+                                *    where splitting is expensive.
+                                * 2. Split is accurately. We make this.
+                                */
+                               get_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_shinfo(skb1)->frags[0].size -= len - pos;
+                               skb_shinfo(skb)->frags[i].size  = len - pos;
+                               skb_shinfo(skb)->nr_frags++;
+                       }
+                       k++;
+               } else
+                       skb_shinfo(skb)->nr_frags++;
+               pos += size;
+       }
+       skb_shinfo(skb1)->nr_frags = k;
+}
+
+/**
+ * skb_split - Split fragmented skb to two parts at length len.
+ */
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
+{
+       int pos = skb_headlen(skb);
+
+       if (len < pos)  /* Split line is inside header. */
+               skb_split_inside_header(skb, skb1, len, pos);
+       else            /* Second chunk has no header, nothing to copy. */
+               skb_split_no_header(skb, skb1, len, pos);
+}
+
+void __init skb_init(void)
+{
+       skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+                                             sizeof(struct sk_buff),
+                                             0,
+                                             SLAB_HWCACHE_ALIGN,
+                                             NULL, NULL);
+       if (!skbuff_head_cache)
+               panic("cannot create skbuff cache");
+}
+
+EXPORT_SYMBOL(___pskb_trim);
+EXPORT_SYMBOL(__kfree_skb);
+EXPORT_SYMBOL(__pskb_pull_tail);
+EXPORT_SYMBOL(alloc_skb);
+EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(pskb_expand_head);
+EXPORT_SYMBOL(skb_checksum);
+EXPORT_SYMBOL(skb_clone);
+EXPORT_SYMBOL(skb_clone_fraglist);
+EXPORT_SYMBOL(skb_copy);
+EXPORT_SYMBOL(skb_copy_and_csum_bits);
+EXPORT_SYMBOL(skb_copy_and_csum_dev);
+EXPORT_SYMBOL(skb_copy_bits);
+EXPORT_SYMBOL(skb_copy_expand);
+EXPORT_SYMBOL(skb_over_panic);
+EXPORT_SYMBOL(skb_pad);
+EXPORT_SYMBOL(skb_realloc_headroom);
+EXPORT_SYMBOL(skb_under_panic);
+EXPORT_SYMBOL(skb_dequeue);
+EXPORT_SYMBOL(skb_dequeue_tail);
+EXPORT_SYMBOL(skb_insert);
+EXPORT_SYMBOL(skb_queue_purge);
+EXPORT_SYMBOL(skb_queue_head);
+EXPORT_SYMBOL(skb_queue_tail);
+EXPORT_SYMBOL(skb_unlink);
+EXPORT_SYMBOL(skb_append);
+EXPORT_SYMBOL(skb_split);
+EXPORT_SYMBOL(skb_iter_first);
+EXPORT_SYMBOL(skb_iter_next);
+EXPORT_SYMBOL(skb_iter_abort);
diff --git a/linux-2.6.9-xen-sparse/net/ipv4/raw.c b/linux-2.6.9-xen-sparse/net/ipv4/raw.c
new file mode 100644 (file)
index 0000000..4796250
--- /dev/null
@@ -0,0 +1,886 @@
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             RAW - implementation of IP "raw" sockets.
+ *
+ * Version:    $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
+ *
+ * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
+ *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ *             Alan Cox        :       verify_area() fixed up
+ *             Alan Cox        :       ICMP error handling
+ *             Alan Cox        :       EMSGSIZE if you send too big a packet
+ *             Alan Cox        :       Now uses generic datagrams and shared
+ *                                     skbuff library. No more peek crashes,
+ *                                     no more backlogs
+ *             Alan Cox        :       Checks sk->broadcast.
+ *             Alan Cox        :       Uses skb_free_datagram/skb_copy_datagram
+ *             Alan Cox        :       Raw passes ip options too
+ *             Alan Cox        :       Setsocketopt added
+ *             Alan Cox        :       Fixed error return for broadcasts
+ *             Alan Cox        :       Removed wake_up calls
+ *             Alan Cox        :       Use ttl/tos
+ *             Alan Cox        :       Cleaned up old debugging
+ *             Alan Cox        :       Use new kernel side addresses
+ *     Arnt Gulbrandsen        :       Fixed MSG_DONTROUTE in raw sockets.
+ *             Alan Cox        :       BSD style RAW socket demultiplexing.
+ *             Alan Cox        :       Beginnings of mrouted support.
+ *             Alan Cox        :       Added IP_HDRINCL option.
+ *             Alan Cox        :       Skip broadcast check if BSDism set.
+ *             David S. Miller :       New socket lookup architecture.
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h> 
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <asm/ioctls.h>
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/aio.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sockios.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/mroute.h>
+#include <linux/netdevice.h>
+#include <linux/in_route.h>
+#include <linux/route.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <linux/gfp.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/raw.h>
+#include <net/snmp.h>
+#include <net/inet_common.h>
+#include <net/checksum.h>
+#include <net/xfrm.h>
+#include <linux/rtnetlink.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+
+struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE];
+rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED;
+
+static void raw_v4_hash(struct sock *sk)
+{
+       struct hlist_head *head = &raw_v4_htable[inet_sk(sk)->num &
+                                                (RAWV4_HTABLE_SIZE - 1)];
+
+       write_lock_bh(&raw_v4_lock);
+       sk_add_node(sk, head);
+       sock_prot_inc_use(sk->sk_prot);
+       write_unlock_bh(&raw_v4_lock);
+}
+
+static void raw_v4_unhash(struct sock *sk)
+{
+       write_lock_bh(&raw_v4_lock);
+       if (sk_del_node_init(sk))
+               sock_prot_dec_use(sk->sk_prot);
+       write_unlock_bh(&raw_v4_lock);
+}
+
+struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
+                            unsigned long raddr, unsigned long laddr,
+                            int dif)
+{
+       struct hlist_node *node;
+
+       sk_for_each_from(sk, node) {
+               struct inet_opt *inet = inet_sk(sk);
+
+               if (inet->num == num                                    &&
+                   !(inet->daddr && inet->daddr != raddr)              &&
+                   !(inet->rcv_saddr && inet->rcv_saddr != laddr)      &&
+                   !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+                       goto found; /* gotcha */
+       }
+       sk = NULL;
+found:
+       return sk;
+}
+
+/*
+ *     0 - deliver
+ *     1 - block
+ */
+static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
+{
+       int type;
+
+       if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
+               return 1;
+
+       type = skb->h.icmph->type;
+       if (type < 32) {
+               __u32 data = raw4_sk(sk)->filter.data;
+
+               return ((1 << type) & data) != 0;
+       }
+
+       /* Do not block unknown ICMP types */
+       return 0;
+}
+
+/* IP input processing comes here for RAW socket delivery.
+ * Caller owns SKB, so we must make clones.
+ *
+ * RFC 1122: SHOULD pass TOS value up to the transport layer.
+ * -> It does. And not only TOS, but all IP header.
+ */
+void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
+{
+       struct sock *sk;
+       struct hlist_head *head;
+
+       read_lock(&raw_v4_lock);
+       head = &raw_v4_htable[hash];
+       if (hlist_empty(head))
+               goto out;
+       sk = __raw_v4_lookup(__sk_head(head), iph->protocol,
+                            iph->saddr, iph->daddr,
+                            skb->dev->ifindex);
+
+       while (sk) {
+               if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
+                       struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+
+                       /* Not releasing hash table! */
+                       if (clone)
+                               raw_rcv(sk, clone);
+               }
+               sk = __raw_v4_lookup(sk_next(sk), iph->protocol,
+                                    iph->saddr, iph->daddr,
+                                    skb->dev->ifindex);
+       }
+out:
+       read_unlock(&raw_v4_lock);
+}
+
+void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
+{
+       struct inet_opt *inet = inet_sk(sk);
+       int type = skb->h.icmph->type;
+       int code = skb->h.icmph->code;
+       int err = 0;
+       int harderr = 0;
+
+       /* Report error on raw socket, if:
+          1. User requested ip_recverr.
+          2. Socket is connected (otherwise the error indication
+             is useless without ip_recverr and error is hard.
+        */
+       if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
+               return;
+
+       switch (type) {
+       default:
+       case ICMP_TIME_EXCEEDED:
+               err = EHOSTUNREACH;
+               break;
+       case ICMP_SOURCE_QUENCH:
+               return;
+       case ICMP_PARAMETERPROB:
+               err = EPROTO;
+               harderr = 1;
+               break;
+       case ICMP_DEST_UNREACH:
+               err = EHOSTUNREACH;
+               if (code > NR_ICMP_UNREACH)
+                       break;
+               err = icmp_err_convert[code].errno;
+               harderr = icmp_err_convert[code].fatal;
+               if (code == ICMP_FRAG_NEEDED) {
+                       harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
+                       err = EMSGSIZE;
+               }
+       }
+
+       if (inet->recverr) {
+               struct iphdr *iph = (struct iphdr*)skb->data;
+               u8 *payload = skb->data + (iph->ihl << 2);
+
+               if (inet->hdrincl)
+                       payload = skb->data;
+               ip_icmp_error(sk, skb, err, 0, info, payload);
+       }
+
+       if (inet->recverr || harderr) {
+               sk->sk_err = err;
+               sk->sk_error_report(sk);
+       }
+}
+
+static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
+{
+       /* Charge it to the socket. */
+       
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
+               /* FIXME: increment a raw drops counter here */
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+int raw_rcv(struct sock *sk, struct sk_buff *skb)
+{
+       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       skb_push(skb, skb->data - skb->nh.raw);
+
+       raw_rcv_skb(sk, skb);
+       return 0;
+}
+
+static int raw_send_hdrinc(struct sock *sk, void *from, int length,
+                       struct rtable *rt, 
+                       unsigned int flags)
+{
+       struct inet_opt *inet = inet_sk(sk);
+       int hh_len;
+       struct iphdr *iph;
+       struct sk_buff *skb;
+       int err;
+
+       if (length > rt->u.dst.dev->mtu) {
+               ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
+                              rt->u.dst.dev->mtu);
+               return -EMSGSIZE;
+       }
+       if (flags&MSG_PROBE)
+               goto out;
+
+       hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+
+       skb = sock_alloc_send_skb(sk, length+hh_len+15,
+                                 flags&MSG_DONTWAIT, &err);
+       if (skb == NULL)
+               goto error; 
+       skb_reserve(skb, hh_len);
+
+       skb->priority = sk->sk_priority;
+       skb->dst = dst_clone(&rt->u.dst);
+
+       skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
+
+       skb->ip_summed = CHECKSUM_NONE;
+
+       skb->h.raw = skb->nh.raw;
+       err = memcpy_fromiovecend((void *)iph, from, 0, length);
+       if (err)
+               goto error_fault;
+
+       /* We don't modify invalid header */
+       if (length >= sizeof(*iph) && iph->ihl * 4 <= length) {
+               if (!iph->saddr)
+                       iph->saddr = rt->rt_src;
+               iph->check   = 0;
+               iph->tot_len = htons(length);
+               if (!iph->id)
+                       ip_select_ident(iph, &rt->u.dst, NULL);
+
+               iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+       }
+
+       err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
+                     dst_output);
+       if (err > 0)
+               err = inet->recverr ? net_xmit_errno(err) : 0;
+       if (err)
+               goto error;
+out:
+       return 0;
+
+error_fault:
+       err = -EFAULT;
+       kfree_skb(skb);
+error:
+       IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+       return err; 
+}
+
+static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
+{
+       struct iovec *iov;
+       u8 __user *type = NULL;
+       u8 __user *code = NULL;
+       int probed = 0;
+       int i;
+
+       if (!msg->msg_iov)
+               return;
+
+       for (i = 0; i < msg->msg_iovlen; i++) {
+               iov = &msg->msg_iov[i];
+               if (!iov)
+                       continue;
+
+               switch (fl->proto) {
+               case IPPROTO_ICMP:
+                       /* check if one-byte field is readable or not. */
+                       if (iov->iov_base && iov->iov_len < 1)
+                               break;
+
+                       if (!type) {
+                               type = iov->iov_base;
+                               /* check if code field is readable or not. */
+                               if (iov->iov_len > 1)
+                                       code = type + 1;
+                       } else if (!code)
+                               code = iov->iov_base;
+
+                       if (type && code) {
+                               get_user(fl->fl_icmp_type, type);
+                               __get_user(fl->fl_icmp_code, code);
+                               probed = 1;
+                       }
+                       break;
+               default:
+                       probed = 1;
+                       break;
+               }
+               if (probed)
+                       break;
+       }
+}
+
+static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                      size_t len)
+{
+       struct inet_opt *inet = inet_sk(sk);
+       struct ipcm_cookie ipc;
+       struct rtable *rt = NULL;
+       int free = 0;
+       u32 daddr;
+       u32 saddr;
+       u8  tos;
+       int err;
+
+       err = -EMSGSIZE;
+       if (len < 0 || len > 0xFFFF)
+               goto out;
+
+       /*
+        *      Check the flags.
+        */
+
+       err = -EOPNOTSUPP;
+       if (msg->msg_flags & MSG_OOB)   /* Mirror BSD error message */
+               goto out;               /* compatibility */
+                        
+       /*
+        *      Get and verify the address. 
+        */
+
+       if (msg->msg_namelen) {
+               struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
+               err = -EINVAL;
+               if (msg->msg_namelen < sizeof(*usin))
+                       goto out;
+               if (usin->sin_family != AF_INET) {
+                       static int complained;
+                       if (!complained++)
+                               printk(KERN_INFO "%s forgot to set AF_INET in "
+                                                "raw sendmsg. Fix it!\n",
+                                                current->comm);
+                       err = -EINVAL;
+                       if (usin->sin_family)
+                               goto out;
+               }
+               daddr = usin->sin_addr.s_addr;
+               /* ANK: I did not forget to get protocol from port field.
+                * I just do not know, who uses this weirdness.
+                * IP_HDRINCL is much more convenient.
+                */
+       } else {
+               err = -EDESTADDRREQ;
+               if (sk->sk_state != TCP_ESTABLISHED) 
+                       goto out;
+               daddr = inet->daddr;
+       }
+
+       ipc.addr = inet->saddr;
+       ipc.opt = NULL;
+       ipc.oif = sk->sk_bound_dev_if;
+
+       if (msg->msg_controllen) {
+               err = ip_cmsg_send(msg, &ipc);
+               if (err)
+                       goto out;
+               if (ipc.opt)
+                       free = 1;
+       }
+
+       saddr = ipc.addr;
+       ipc.addr = daddr;
+
+       if (!ipc.opt)
+               ipc.opt = inet->opt;
+
+       if (ipc.opt) {
+               err = -EINVAL;
+               /* Linux does not mangle headers on raw sockets,
+                * so that IP options + IP_HDRINCL is non-sense.
+                */
+               if (inet->hdrincl)
+                       goto done;
+               if (ipc.opt->srr) {
+                       if (!daddr)
+                               goto done;
+                       daddr = ipc.opt->faddr;
+               }
+       }
+       tos = RT_TOS(inet->tos) | sk->sk_localroute;
+       if (msg->msg_flags & MSG_DONTROUTE)
+               tos |= RTO_ONLINK;
+
+       if (MULTICAST(daddr)) {
+               if (!ipc.oif)
+                       ipc.oif = inet->mc_index;
+               if (!saddr)
+                       saddr = inet->mc_addr;
+       }
+
+       {
+               struct flowi fl = { .oif = ipc.oif,
+                                   .nl_u = { .ip4_u =
+                                             { .daddr = daddr,
+                                               .saddr = saddr,
+                                               .tos = tos } },
+                                   .proto = inet->hdrincl ? IPPROTO_RAW :
+                                                            sk->sk_protocol,
+                                 };
+               if (!inet->hdrincl)
+                       raw_probe_proto_opt(&fl, msg);
+
+               err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
+       }
+       if (err)
+               goto done;
+
+       err = -EACCES;
+       if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
+               goto done;
+
+       if (msg->msg_flags & MSG_CONFIRM)
+               goto do_confirm;
+back_from_confirm:
+
+       if (inet->hdrincl)
+               err = raw_send_hdrinc(sk, msg->msg_iov, len, 
+                                       rt, msg->msg_flags);
+       
+        else {
+               if (!ipc.addr)
+                       ipc.addr = rt->rt_dst;
+               lock_sock(sk);
+               err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
+                                       &ipc, rt, msg->msg_flags);
+               if (err)
+                       ip_flush_pending_frames(sk);
+               else if (!(msg->msg_flags & MSG_MORE))
+                       err = ip_push_pending_frames(sk);
+               release_sock(sk);
+       }
+done:
+       if (free)
+               kfree(ipc.opt);
+       ip_rt_put(rt);
+
+out:   return err < 0 ? err : len;
+
+do_confirm:
+       dst_confirm(&rt->u.dst);
+       if (!(msg->msg_flags & MSG_PROBE) || len)
+               goto back_from_confirm;
+       err = 0;
+       goto done;
+}
+
+static void raw_close(struct sock *sk, long timeout)
+{
+        /*
+        * Raw sockets may have direct kernel refereneces. Kill them.
+        */
+       ip_ra_control(sk, 0, NULL);
+
+       sk_common_release(sk);
+}
+
+/* This gets rid of all the nasties in af_inet. -DaveM */
+static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+       struct inet_opt *inet = inet_sk(sk);
+       struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+       int ret = -EINVAL;
+       int chk_addr_ret;
+
+       if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
+               goto out;
+       chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
+       ret = -EADDRNOTAVAIL;
+       if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+           chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+               goto out;
+       inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
+       if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+               inet->saddr = 0;  /* Use device */
+       sk_dst_reset(sk);
+       ret = 0;
+out:   return ret;
+}
+
+/*
+ *     This should be easy, if there is something there
+ *     we return it, otherwise we block.
+ */
+
+int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len, int noblock, int flags, int *addr_len)
+{
+       struct inet_opt *inet = inet_sk(sk);
+       size_t copied = 0;
+       int err = -EOPNOTSUPP;
+       struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+       struct sk_buff *skb;
+
+       if (flags & MSG_OOB)
+               goto out;
+
+       if (addr_len)
+               *addr_len = sizeof(*sin);
+
+       if (flags & MSG_ERRQUEUE) {
+               err = ip_recv_error(sk, msg, len);
+               goto out;
+       }
+
+       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb)
+               goto out;
+
+       copied = skb->len;
+       if (len < copied) {
+               msg->msg_flags |= MSG_TRUNC;
+               copied = len;
+       }
+
+       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       if (err)
+               goto done;
+
+       sock_recv_timestamp(msg, sk, skb);
+
+       /* Copy the address. */
+       if (sin) {
+               sin->sin_family = AF_INET;
+               sin->sin_addr.s_addr = skb->nh.iph->saddr;
+               memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+       }
+       if (inet->cmsg_flags)
+               ip_cmsg_recv(msg, skb);
+       if (flags & MSG_TRUNC)
+               copied = skb->len;
+done:
+       skb_free_datagram(sk, skb);
+out:   return err ? err : copied;
+}
+
+static int raw_init(struct sock *sk)
+{
+       struct raw_opt *tp = raw4_sk(sk);
+       if (inet_sk(sk)->num == IPPROTO_ICMP)
+               memset(&tp->filter, 0, sizeof(tp->filter));
+       return 0;
+}
+
+static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
+{
+       if (optlen > sizeof(struct icmp_filter))
+               optlen = sizeof(struct icmp_filter);
+       if (copy_from_user(&raw4_sk(sk)->filter, optval, optlen))
+               return -EFAULT;
+       return 0;
+}
+
+static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
+{
+       int len, ret = -EFAULT;
+
+       if (get_user(len, optlen))
+               goto out;
+       ret = -EINVAL;
+       if (len < 0)
+               goto out;
+       if (len > sizeof(struct icmp_filter))
+               len = sizeof(struct icmp_filter);
+       ret = -EFAULT;
+       if (put_user(len, optlen) ||
+           copy_to_user(optval, &raw4_sk(sk)->filter, len))
+               goto out;
+       ret = 0;
+out:   return ret;
+}
+
+static int raw_setsockopt(struct sock *sk, int level, int optname, 
+                         char __user *optval, int optlen)
+{
+       if (level != SOL_RAW)
+               return ip_setsockopt(sk, level, optname, optval, optlen);
+
+       if (optname == ICMP_FILTER) {
+               if (inet_sk(sk)->num != IPPROTO_ICMP)
+                       return -EOPNOTSUPP;
+               else
+                       return raw_seticmpfilter(sk, optval, optlen);
+       }
+       return -ENOPROTOOPT;
+}
+
+static int raw_getsockopt(struct sock *sk, int level, int optname, 
+                         char __user *optval, int __user *optlen)
+{
+       if (level != SOL_RAW)
+               return ip_getsockopt(sk, level, optname, optval, optlen);
+
+       if (optname == ICMP_FILTER) {
+               if (inet_sk(sk)->num != IPPROTO_ICMP)
+                       return -EOPNOTSUPP;
+               else
+                       return raw_geticmpfilter(sk, optval, optlen);
+       }
+       return -ENOPROTOOPT;
+}
+
+static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+       switch (cmd) {
+               case SIOCOUTQ: {
+                       int amount = atomic_read(&sk->sk_wmem_alloc);
+                       return put_user(amount, (int __user *)arg);
+               }
+               case SIOCINQ: {
+                       struct sk_buff *skb;
+                       int amount = 0;
+
+                       spin_lock_irq(&sk->sk_receive_queue.lock);
+                       skb = skb_peek(&sk->sk_receive_queue);
+                       if (skb != NULL)
+                               amount = skb->len;
+                       spin_unlock_irq(&sk->sk_receive_queue.lock);
+                       return put_user(amount, (int __user *)arg);
+               }
+
+               default:
+#ifdef CONFIG_IP_MROUTE
+                       return ipmr_ioctl(sk, cmd, (void __user *)arg);
+#else
+                       return -ENOIOCTLCMD;
+#endif
+       }
+}
+
+struct proto raw_prot = {
+       .name =         "RAW",
+       .close =        raw_close,
+       .connect =      ip4_datagram_connect,
+       .disconnect =   udp_disconnect,
+       .ioctl =        raw_ioctl,
+       .init =         raw_init,
+       .setsockopt =   raw_setsockopt,
+       .getsockopt =   raw_getsockopt,
+       .sendmsg =      raw_sendmsg,
+       .recvmsg =      raw_recvmsg,
+       .bind =         raw_bind,
+       .backlog_rcv =  raw_rcv_skb,
+       .hash =         raw_v4_hash,
+       .unhash =       raw_v4_unhash,
+       .slab_obj_size = sizeof(struct raw_sock),
+};
+
+#ifdef CONFIG_PROC_FS
+struct raw_iter_state {
+       int bucket;
+};
+
+#define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private)
+
+static struct sock *raw_get_first(struct seq_file *seq)
+{
+       struct sock *sk;
+       struct raw_iter_state* state = raw_seq_private(seq);
+
+       for (state->bucket = 0; state->bucket < RAWV4_HTABLE_SIZE; ++state->bucket) {
+               struct hlist_node *node;
+
+               sk_for_each(sk, node, &raw_v4_htable[state->bucket])
+                       if (sk->sk_family == PF_INET)
+                               goto found;
+       }
+       sk = NULL;
+found:
+       return sk;
+}
+
+static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
+{
+       struct raw_iter_state* state = raw_seq_private(seq);
+
+       do {
+               sk = sk_next(sk);
+try_again:
+               ;
+       } while (sk && sk->sk_family != PF_INET);
+
+       if (!sk && ++state->bucket < RAWV4_HTABLE_SIZE) {
+               sk = sk_head(&raw_v4_htable[state->bucket]);
+               goto try_again;
+       }
+       return sk;
+}
+
+static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct sock *sk = raw_get_first(seq);
+
+       if (sk)
+               while (pos && (sk = raw_get_next(seq, sk)) != NULL)
+                       --pos;
+       return pos ? NULL : sk;
+}
+
+static void *raw_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       read_lock(&raw_v4_lock);
+       return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct sock *sk;
+
+       if (v == SEQ_START_TOKEN)
+               sk = raw_get_first(seq);
+       else
+               sk = raw_get_next(seq, v);
+       ++*pos;
+       return sk;
+}
+
+static void raw_seq_stop(struct seq_file *seq, void *v)
+{
+       read_unlock(&raw_v4_lock);
+}
+
+static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
+{
+       struct inet_opt *inet = inet_sk(sp);
+       unsigned int dest = inet->daddr,
+                    src = inet->rcv_saddr;
+       __u16 destp = 0,
+             srcp  = inet->num;
+
+       sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
+               " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
+               i, src, srcp, dest, destp, sp->sk_state, 
+               atomic_read(&sp->sk_wmem_alloc),
+               atomic_read(&sp->sk_rmem_alloc),
+               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               atomic_read(&sp->sk_refcnt), sp);
+       return tmpbuf;
+}
+
+static int raw_seq_show(struct seq_file *seq, void *v)
+{
+       char tmpbuf[129];
+
+       if (v == SEQ_START_TOKEN)
+               seq_printf(seq, "%-127s\n",
+                              "  sl  local_address rem_address   st tx_queue "
+                              "rx_queue tr tm->when retrnsmt   uid  timeout "
+                              "inode");
+       else {
+               struct raw_iter_state *state = raw_seq_private(seq);
+
+               seq_printf(seq, "%-127s\n",
+                          get_raw_sock(v, tmpbuf, state->bucket));
+       }
+       return 0;
+}
+
+static struct seq_operations raw_seq_ops = {
+       .start = raw_seq_start,
+       .next  = raw_seq_next,
+       .stop  = raw_seq_stop,
+       .show  = raw_seq_show,
+};
+
+static int raw_seq_open(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int rc = -ENOMEM;
+       struct raw_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+       if (!s)
+               goto out;
+       rc = seq_open(file, &raw_seq_ops);
+       if (rc)
+               goto out_kfree;
+
+       seq = file->private_data;
+       seq->private = s;
+       memset(s, 0, sizeof(*s));
+out:
+       return rc;
+out_kfree:
+       kfree(s);
+       goto out;
+}
+
+static struct file_operations raw_seq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = raw_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private,
+};
+
+int __init raw_proc_init(void)
+{
+       if (!proc_net_fops_create("raw", S_IRUGO, &raw_seq_fops))
+               return -ENOMEM;
+       return 0;
+}
+
+void __init raw_proc_exit(void)
+{
+       proc_net_remove("raw");
+}
+#endif /* CONFIG_PROC_FS */