ia64/xen-unstable

changeset 1212:b57df4362261

bitkeeper revision 1.823 (4060141cL2UWa7gkwwnmGJlHbv0sAA)

Many files:
xeno -> xen renames.
mvdir
author kaf24@scramble.cl.cam.ac.uk
date Tue Mar 23 10:40:28 2004 +0000 (2004-03-23)
parents a235ded07b12
children 8ca409420fb5
files .rootkeys xenolinux-2.4.25-sparse/Documentation/Configure.help xenolinux-2.4.25-sparse/Makefile xenolinux-2.4.25-sparse/arch/xen/Makefile xenolinux-2.4.25-sparse/arch/xen/boot/Makefile xenolinux-2.4.25-sparse/arch/xen/config.in xenolinux-2.4.25-sparse/arch/xen/defconfig xenolinux-2.4.25-sparse/arch/xen/drivers/balloon/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/balloon/balloon.c xenolinux-2.4.25-sparse/arch/xen/drivers/block/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/block/block.c xenolinux-2.4.25-sparse/arch/xen/drivers/block/block.h xenolinux-2.4.25-sparse/arch/xen/drivers/block/vbd.c xenolinux-2.4.25-sparse/arch/xen/drivers/console/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/core.c xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/vfr.c xenolinux-2.4.25-sparse/arch/xen/drivers/evtchn/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/evtchn/evtchn.c xenolinux-2.4.25-sparse/arch/xen/drivers/network/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/network/network.c xenolinux-2.4.25-sparse/arch/xen/drivers/vnetif/Makefile xenolinux-2.4.25-sparse/arch/xen/drivers/vnetif/vnetif.c xenolinux-2.4.25-sparse/arch/xen/kernel/Makefile xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S xenolinux-2.4.25-sparse/arch/xen/kernel/head.S xenolinux-2.4.25-sparse/arch/xen/kernel/hypervisor.c xenolinux-2.4.25-sparse/arch/xen/kernel/i386_ksyms.c xenolinux-2.4.25-sparse/arch/xen/kernel/ioport.c xenolinux-2.4.25-sparse/arch/xen/kernel/irq.c xenolinux-2.4.25-sparse/arch/xen/kernel/ldt.c xenolinux-2.4.25-sparse/arch/xen/kernel/pci-dma.c xenolinux-2.4.25-sparse/arch/xen/kernel/pci-i386.c xenolinux-2.4.25-sparse/arch/xen/kernel/pci-i386.h xenolinux-2.4.25-sparse/arch/xen/kernel/pci-irq.c xenolinux-2.4.25-sparse/arch/xen/kernel/pci-pc.c xenolinux-2.4.25-sparse/arch/xen/kernel/physirq.c xenolinux-2.4.25-sparse/arch/xen/kernel/process.c xenolinux-2.4.25-sparse/arch/xen/kernel/setup.c xenolinux-2.4.25-sparse/arch/xen/kernel/signal.c xenolinux-2.4.25-sparse/arch/xen/kernel/time.c xenolinux-2.4.25-sparse/arch/xen/kernel/traps.c xenolinux-2.4.25-sparse/arch/xen/lib/Makefile xenolinux-2.4.25-sparse/arch/xen/lib/delay.c xenolinux-2.4.25-sparse/arch/xen/lib/xeno_proc.c xenolinux-2.4.25-sparse/arch/xen/mm/Makefile xenolinux-2.4.25-sparse/arch/xen/mm/fault.c xenolinux-2.4.25-sparse/arch/xen/mm/hypervisor.c xenolinux-2.4.25-sparse/arch/xen/mm/init.c xenolinux-2.4.25-sparse/arch/xen/mm/ioremap.c xenolinux-2.4.25-sparse/arch/xen/vmlinux.lds xenolinux-2.4.25-sparse/arch/xeno/Makefile xenolinux-2.4.25-sparse/arch/xeno/boot/Makefile xenolinux-2.4.25-sparse/arch/xeno/config.in xenolinux-2.4.25-sparse/arch/xeno/defconfig xenolinux-2.4.25-sparse/arch/xeno/drivers/balloon/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/balloon/balloon.c xenolinux-2.4.25-sparse/arch/xeno/drivers/block/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/block/block.c xenolinux-2.4.25-sparse/arch/xeno/drivers/block/block.h xenolinux-2.4.25-sparse/arch/xeno/drivers/block/vbd.c xenolinux-2.4.25-sparse/arch/xeno/drivers/console/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/console/console.c xenolinux-2.4.25-sparse/arch/xeno/drivers/dom0/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/dom0/core.c xenolinux-2.4.25-sparse/arch/xeno/drivers/dom0/vfr.c xenolinux-2.4.25-sparse/arch/xeno/drivers/evtchn/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/evtchn/evtchn.c xenolinux-2.4.25-sparse/arch/xeno/drivers/network/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/network/network.c xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/Makefile xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/vnetif.c xenolinux-2.4.25-sparse/arch/xeno/kernel/Makefile xenolinux-2.4.25-sparse/arch/xeno/kernel/entry.S xenolinux-2.4.25-sparse/arch/xeno/kernel/head.S xenolinux-2.4.25-sparse/arch/xeno/kernel/hypervisor.c xenolinux-2.4.25-sparse/arch/xeno/kernel/i386_ksyms.c xenolinux-2.4.25-sparse/arch/xeno/kernel/ioport.c xenolinux-2.4.25-sparse/arch/xeno/kernel/irq.c xenolinux-2.4.25-sparse/arch/xeno/kernel/ldt.c xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-dma.c xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-i386.c xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-i386.h xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-irq.c xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-pc.c xenolinux-2.4.25-sparse/arch/xeno/kernel/physirq.c xenolinux-2.4.25-sparse/arch/xeno/kernel/process.c xenolinux-2.4.25-sparse/arch/xeno/kernel/setup.c xenolinux-2.4.25-sparse/arch/xeno/kernel/signal.c xenolinux-2.4.25-sparse/arch/xeno/kernel/time.c xenolinux-2.4.25-sparse/arch/xeno/kernel/traps.c xenolinux-2.4.25-sparse/arch/xeno/lib/Makefile xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c xenolinux-2.4.25-sparse/arch/xeno/lib/xeno_proc.c xenolinux-2.4.25-sparse/arch/xeno/mm/Makefile xenolinux-2.4.25-sparse/arch/xeno/mm/fault.c xenolinux-2.4.25-sparse/arch/xeno/mm/hypervisor.c xenolinux-2.4.25-sparse/arch/xeno/mm/init.c xenolinux-2.4.25-sparse/arch/xeno/mm/ioremap.c xenolinux-2.4.25-sparse/arch/xeno/vmlinux.lds xenolinux-2.4.25-sparse/drivers/block/ll_rw_blk.c xenolinux-2.4.25-sparse/drivers/char/mem.c xenolinux-2.4.25-sparse/fs/exec.c xenolinux-2.4.25-sparse/include/asm-xen/bugs.h xenolinux-2.4.25-sparse/include/asm-xen/control_if.h xenolinux-2.4.25-sparse/include/asm-xen/desc.h xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h xenolinux-2.4.25-sparse/include/asm-xen/fixmap.h xenolinux-2.4.25-sparse/include/asm-xen/highmem.h xenolinux-2.4.25-sparse/include/asm-xen/hw_irq.h xenolinux-2.4.25-sparse/include/asm-xen/hypervisor.h xenolinux-2.4.25-sparse/include/asm-xen/io.h xenolinux-2.4.25-sparse/include/asm-xen/irq.h xenolinux-2.4.25-sparse/include/asm-xen/keyboard.h xenolinux-2.4.25-sparse/include/asm-xen/mmu_context.h xenolinux-2.4.25-sparse/include/asm-xen/msr.h xenolinux-2.4.25-sparse/include/asm-xen/multicall.h xenolinux-2.4.25-sparse/include/asm-xen/page.h xenolinux-2.4.25-sparse/include/asm-xen/pgalloc.h xenolinux-2.4.25-sparse/include/asm-xen/pgtable-2level.h xenolinux-2.4.25-sparse/include/asm-xen/pgtable.h xenolinux-2.4.25-sparse/include/asm-xen/proc_cmd.h xenolinux-2.4.25-sparse/include/asm-xen/processor.h xenolinux-2.4.25-sparse/include/asm-xen/ptrace.h xenolinux-2.4.25-sparse/include/asm-xen/segment.h xenolinux-2.4.25-sparse/include/asm-xen/smp.h xenolinux-2.4.25-sparse/include/asm-xen/suspend.h xenolinux-2.4.25-sparse/include/asm-xen/system.h xenolinux-2.4.25-sparse/include/asm-xen/vga.h xenolinux-2.4.25-sparse/include/asm-xen/xeno_proc.h xenolinux-2.4.25-sparse/include/asm-xeno/bugs.h xenolinux-2.4.25-sparse/include/asm-xeno/control_if.h xenolinux-2.4.25-sparse/include/asm-xeno/desc.h xenolinux-2.4.25-sparse/include/asm-xeno/evtchn.h xenolinux-2.4.25-sparse/include/asm-xeno/fixmap.h xenolinux-2.4.25-sparse/include/asm-xeno/highmem.h xenolinux-2.4.25-sparse/include/asm-xeno/hw_irq.h xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h xenolinux-2.4.25-sparse/include/asm-xeno/io.h xenolinux-2.4.25-sparse/include/asm-xeno/irq.h xenolinux-2.4.25-sparse/include/asm-xeno/keyboard.h xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h xenolinux-2.4.25-sparse/include/asm-xeno/msr.h xenolinux-2.4.25-sparse/include/asm-xeno/multicall.h xenolinux-2.4.25-sparse/include/asm-xeno/page.h xenolinux-2.4.25-sparse/include/asm-xeno/pgalloc.h xenolinux-2.4.25-sparse/include/asm-xeno/pgtable-2level.h xenolinux-2.4.25-sparse/include/asm-xeno/pgtable.h xenolinux-2.4.25-sparse/include/asm-xeno/proc_cmd.h xenolinux-2.4.25-sparse/include/asm-xeno/processor.h xenolinux-2.4.25-sparse/include/asm-xeno/ptrace.h xenolinux-2.4.25-sparse/include/asm-xeno/segment.h xenolinux-2.4.25-sparse/include/asm-xeno/smp.h xenolinux-2.4.25-sparse/include/asm-xeno/suspend.h xenolinux-2.4.25-sparse/include/asm-xeno/system.h xenolinux-2.4.25-sparse/include/asm-xeno/vga.h xenolinux-2.4.25-sparse/include/asm-xeno/xeno_proc.h xenolinux-2.4.25-sparse/include/linux/blk.h xenolinux-2.4.25-sparse/init/do_mounts.c xenolinux-2.4.25-sparse/kernel/panic.c xenolinux-2.4.25-sparse/kernel/time.c xenolinux-2.4.25-sparse/mkbuildtree xenolinux-2.4.25-sparse/mm/memory.c xenolinux-2.4.25-sparse/mm/mprotect.c xenolinux-2.4.25-sparse/mm/mremap.c xenolinux-2.4.25-sparse/mm/swapfile.c xenolinux-2.4.25-sparse/mm/vmalloc.c
line diff
     1.1 --- a/.rootkeys	Tue Mar 23 09:57:30 2004 +0000
     1.2 +++ b/.rootkeys	Tue Mar 23 10:40:28 2004 +0000
     1.3 @@ -605,86 +605,86 @@ 3eb3c87fdQKQ5OBGbM-KjZfi9Us4ng xen/tools
     1.4  3eb3c87fS7DNbg0i6yhFs28UIqAK5g xen/tools/figlet/xen.flf
     1.5  3f05a939TA3SLPY7ZiScMotLjg9owQ xenolinux-2.4.25-sparse/Documentation/Configure.help
     1.6  3e5a4e6589G-U42lFKs43plskXoFxQ xenolinux-2.4.25-sparse/Makefile
     1.7 -3e5a4e65IEPjnWPZ5w3TxS5scV8Ewg xenolinux-2.4.25-sparse/arch/xeno/Makefile
     1.8 -3e5a4e65n-KhsEAs-A4ULiStBp-r6w xenolinux-2.4.25-sparse/arch/xeno/boot/Makefile
     1.9 -3e5a4e65OV_j_DBtjzt5vej771AJsA xenolinux-2.4.25-sparse/arch/xeno/config.in
    1.10 -3e5a4e65TNEycLeXqPSXQJQm_xGecA xenolinux-2.4.25-sparse/arch/xeno/defconfig
    1.11 -3e6377f5xwPfYZkPHPrDbEq1PRN7uQ xenolinux-2.4.25-sparse/arch/xeno/drivers/balloon/Makefile
    1.12 -3e6377f8Me8IqtvEhb70XFgOvqQH7A xenolinux-2.4.25-sparse/arch/xeno/drivers/balloon/balloon.c
    1.13 -3e5a4e65iHEuC5sjFhj42XALYbLVRw xenolinux-2.4.25-sparse/arch/xeno/drivers/block/Makefile
    1.14 -3e5a4e65pP5spJErBW69pJxSSdK9RA xenolinux-2.4.25-sparse/arch/xeno/drivers/block/block.c
    1.15 -3e67f822FOPwqHiaRKbrskgWgoNL5g xenolinux-2.4.25-sparse/arch/xeno/drivers/block/block.h
    1.16 -3e676eb5RXnHzSHgA1BvM0B1aIm4qg xenolinux-2.4.25-sparse/arch/xeno/drivers/block/vbd.c
    1.17 -3e5a4e65G3e2s0ghPMgiJ-gBTUJ0uQ xenolinux-2.4.25-sparse/arch/xeno/drivers/console/Makefile
    1.18 -3e5a4e651TH-SXHoufurnWjgl5bfOA xenolinux-2.4.25-sparse/arch/xeno/drivers/console/console.c
    1.19 -3e5a4e656nfFISThfbyXQOA6HN6YHw xenolinux-2.4.25-sparse/arch/xeno/drivers/dom0/Makefile
    1.20 -3e5a4e65BXtftInNHUC2PjDfPhdZZA xenolinux-2.4.25-sparse/arch/xeno/drivers/dom0/core.c
    1.21 -3e5a4e65gfn_ltB8ujHMVFApnTTNRQ xenolinux-2.4.25-sparse/arch/xeno/drivers/dom0/vfr.c
    1.22 -40420a6ebRqDjufoN1WSJvolEW2Wjw xenolinux-2.4.25-sparse/arch/xeno/drivers/evtchn/Makefile
    1.23 -40420a73Wou6JlsZDiu6YwjYomsm7A xenolinux-2.4.25-sparse/arch/xeno/drivers/evtchn/evtchn.c
    1.24 -3e5a4e65gZBRBB6RsSVg1c9iahigAw xenolinux-2.4.25-sparse/arch/xeno/drivers/network/Makefile
    1.25 -3e5a4e65ZxKrbFetVB84JhrTyZ1YuQ xenolinux-2.4.25-sparse/arch/xeno/drivers/network/network.c
    1.26 -405853f2wg7JXZJNltspMwOZJklxgw xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/Makefile
    1.27 -405853f6nbeazrNyEWNHBuoSg2PiPA xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/vnetif.c
    1.28 -3e5a4e65lWzkiPXsZdzPt2RNnJGG1g xenolinux-2.4.25-sparse/arch/xeno/kernel/Makefile
    1.29 -3e5a4e65_hqfuxtGG8IUy6wRM86Ecg xenolinux-2.4.25-sparse/arch/xeno/kernel/entry.S
    1.30 -3e5a4e65Hy_1iUvMTPsNqGNXd9uFpg xenolinux-2.4.25-sparse/arch/xeno/kernel/head.S
    1.31 -3e5a4e65ibVQmwlOn0j3sVH_j_6hAg xenolinux-2.4.25-sparse/arch/xeno/kernel/hypervisor.c
    1.32 -3e5a4e65RMGcuA-HCn3-wNx3fFQwdg xenolinux-2.4.25-sparse/arch/xeno/kernel/i386_ksyms.c
    1.33 -3e5a4e65MEvZhlr070sK5JsfAQlv7Q xenolinux-2.4.25-sparse/arch/xeno/kernel/ioport.c
    1.34 -3e5a4e653U6cELGv528IxOLHvCq8iA xenolinux-2.4.25-sparse/arch/xeno/kernel/irq.c
    1.35 -3e5a4e65muT6SU3ck47IP87Q7Ti5hA xenolinux-2.4.25-sparse/arch/xeno/kernel/ldt.c
    1.36 -4051db84bZeRX7a_Kh6VyyDuT5FOIg xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-dma.c
    1.37 -4051db89iiHs38tWGkoW_RukNyaBHw xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-i386.c
    1.38 -4051db8dJYX86ZCLA-WfTW2dAyrehw xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-i386.h
    1.39 -4051db91BenvDZEMZxQCGkQyJYoG5w xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-irq.c
    1.40 -4051db95N9N99FjsRwi49YKUNHWI8A xenolinux-2.4.25-sparse/arch/xeno/kernel/pci-pc.c
    1.41 -4051db99fbdTHgCpjywPCp7vjLCe7Q xenolinux-2.4.25-sparse/arch/xeno/kernel/physirq.c
    1.42 -3e5a4e65IGt3WwQDNiL4h-gYWgNTWQ xenolinux-2.4.25-sparse/arch/xeno/kernel/process.c
    1.43 -3e5a4e66tR-qJMLj3MppcKqmvuI2XQ xenolinux-2.4.25-sparse/arch/xeno/kernel/setup.c
    1.44 -3e5a4e66fWSTagLGU2P8BGFGRjhDiw xenolinux-2.4.25-sparse/arch/xeno/kernel/signal.c
    1.45 -3e5a4e66N__lUXNwzQ-eADRzK9LXuQ xenolinux-2.4.25-sparse/arch/xeno/kernel/time.c
    1.46 -3e5a4e66aHCbQ_F5QZ8VeyikLmuRZQ xenolinux-2.4.25-sparse/arch/xeno/kernel/traps.c
    1.47 -3e5a4e66-9_NczrVMbuQkoSLyXckIw xenolinux-2.4.25-sparse/arch/xeno/lib/Makefile
    1.48 -3e5a4e6637ZDk0BvFEC-aFQs599-ng xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c
    1.49 -3f68905cF5i8-NYpIhGjKmh0y8Gu5g xenolinux-2.4.25-sparse/arch/xeno/lib/xeno_proc.c
    1.50 -3e5a4e66croVgpcJyJuF2ycQw0HuJw xenolinux-2.4.25-sparse/arch/xeno/mm/Makefile
    1.51 -3e5a4e66l8Q5Tv-6B3lQIRmaVbFPzg xenolinux-2.4.25-sparse/arch/xeno/mm/fault.c
    1.52 -3e5a4e668SE9rixq4ahho9rNhLUUFQ xenolinux-2.4.25-sparse/arch/xeno/mm/hypervisor.c
    1.53 -3e5a4e661gLzzff25pJooKIIWe7IWg xenolinux-2.4.25-sparse/arch/xeno/mm/init.c
    1.54 -3f0bed43UUdQichXAiVNrjV-y2Kzcg xenolinux-2.4.25-sparse/arch/xeno/mm/ioremap.c
    1.55 -3e5a4e66qRlSTcjafidMB6ulECADvg xenolinux-2.4.25-sparse/arch/xeno/vmlinux.lds
    1.56 +3e5a4e65IEPjnWPZ5w3TxS5scV8Ewg xenolinux-2.4.25-sparse/arch/xen/Makefile
    1.57 +3e5a4e65n-KhsEAs-A4ULiStBp-r6w xenolinux-2.4.25-sparse/arch/xen/boot/Makefile
    1.58 +3e5a4e65OV_j_DBtjzt5vej771AJsA xenolinux-2.4.25-sparse/arch/xen/config.in
    1.59 +3e5a4e65TNEycLeXqPSXQJQm_xGecA xenolinux-2.4.25-sparse/arch/xen/defconfig
    1.60 +3e6377f5xwPfYZkPHPrDbEq1PRN7uQ xenolinux-2.4.25-sparse/arch/xen/drivers/balloon/Makefile
    1.61 +3e6377f8Me8IqtvEhb70XFgOvqQH7A xenolinux-2.4.25-sparse/arch/xen/drivers/balloon/balloon.c
    1.62 +3e5a4e65iHEuC5sjFhj42XALYbLVRw xenolinux-2.4.25-sparse/arch/xen/drivers/block/Makefile
    1.63 +3e5a4e65pP5spJErBW69pJxSSdK9RA xenolinux-2.4.25-sparse/arch/xen/drivers/block/block.c
    1.64 +3e67f822FOPwqHiaRKbrskgWgoNL5g xenolinux-2.4.25-sparse/arch/xen/drivers/block/block.h
    1.65 +3e676eb5RXnHzSHgA1BvM0B1aIm4qg xenolinux-2.4.25-sparse/arch/xen/drivers/block/vbd.c
    1.66 +3e5a4e65G3e2s0ghPMgiJ-gBTUJ0uQ xenolinux-2.4.25-sparse/arch/xen/drivers/console/Makefile
    1.67 +3e5a4e651TH-SXHoufurnWjgl5bfOA xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c
    1.68 +3e5a4e656nfFISThfbyXQOA6HN6YHw xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/Makefile
    1.69 +3e5a4e65BXtftInNHUC2PjDfPhdZZA xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/core.c
    1.70 +3e5a4e65gfn_ltB8ujHMVFApnTTNRQ xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/vfr.c
    1.71 +40420a6ebRqDjufoN1WSJvolEW2Wjw xenolinux-2.4.25-sparse/arch/xen/drivers/evtchn/Makefile
    1.72 +40420a73Wou6JlsZDiu6YwjYomsm7A xenolinux-2.4.25-sparse/arch/xen/drivers/evtchn/evtchn.c
    1.73 +3e5a4e65gZBRBB6RsSVg1c9iahigAw xenolinux-2.4.25-sparse/arch/xen/drivers/network/Makefile
    1.74 +3e5a4e65ZxKrbFetVB84JhrTyZ1YuQ xenolinux-2.4.25-sparse/arch/xen/drivers/network/network.c
    1.75 +405853f2wg7JXZJNltspMwOZJklxgw xenolinux-2.4.25-sparse/arch/xen/drivers/vnetif/Makefile
    1.76 +405853f6nbeazrNyEWNHBuoSg2PiPA xenolinux-2.4.25-sparse/arch/xen/drivers/vnetif/vnetif.c
    1.77 +3e5a4e65lWzkiPXsZdzPt2RNnJGG1g xenolinux-2.4.25-sparse/arch/xen/kernel/Makefile
    1.78 +3e5a4e65_hqfuxtGG8IUy6wRM86Ecg xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
    1.79 +3e5a4e65Hy_1iUvMTPsNqGNXd9uFpg xenolinux-2.4.25-sparse/arch/xen/kernel/head.S
    1.80 +3e5a4e65ibVQmwlOn0j3sVH_j_6hAg xenolinux-2.4.25-sparse/arch/xen/kernel/hypervisor.c
    1.81 +3e5a4e65RMGcuA-HCn3-wNx3fFQwdg xenolinux-2.4.25-sparse/arch/xen/kernel/i386_ksyms.c
    1.82 +3e5a4e65MEvZhlr070sK5JsfAQlv7Q xenolinux-2.4.25-sparse/arch/xen/kernel/ioport.c
    1.83 +3e5a4e653U6cELGv528IxOLHvCq8iA xenolinux-2.4.25-sparse/arch/xen/kernel/irq.c
    1.84 +3e5a4e65muT6SU3ck47IP87Q7Ti5hA xenolinux-2.4.25-sparse/arch/xen/kernel/ldt.c
    1.85 +4051db84bZeRX7a_Kh6VyyDuT5FOIg xenolinux-2.4.25-sparse/arch/xen/kernel/pci-dma.c
    1.86 +4051db89iiHs38tWGkoW_RukNyaBHw xenolinux-2.4.25-sparse/arch/xen/kernel/pci-i386.c
    1.87 +4051db8dJYX86ZCLA-WfTW2dAyrehw xenolinux-2.4.25-sparse/arch/xen/kernel/pci-i386.h
    1.88 +4051db91BenvDZEMZxQCGkQyJYoG5w xenolinux-2.4.25-sparse/arch/xen/kernel/pci-irq.c
    1.89 +4051db95N9N99FjsRwi49YKUNHWI8A xenolinux-2.4.25-sparse/arch/xen/kernel/pci-pc.c
    1.90 +4051db99fbdTHgCpjywPCp7vjLCe7Q xenolinux-2.4.25-sparse/arch/xen/kernel/physirq.c
    1.91 +3e5a4e65IGt3WwQDNiL4h-gYWgNTWQ xenolinux-2.4.25-sparse/arch/xen/kernel/process.c
    1.92 +3e5a4e66tR-qJMLj3MppcKqmvuI2XQ xenolinux-2.4.25-sparse/arch/xen/kernel/setup.c
    1.93 +3e5a4e66fWSTagLGU2P8BGFGRjhDiw xenolinux-2.4.25-sparse/arch/xen/kernel/signal.c
    1.94 +3e5a4e66N__lUXNwzQ-eADRzK9LXuQ xenolinux-2.4.25-sparse/arch/xen/kernel/time.c
    1.95 +3e5a4e66aHCbQ_F5QZ8VeyikLmuRZQ xenolinux-2.4.25-sparse/arch/xen/kernel/traps.c
    1.96 +3e5a4e66-9_NczrVMbuQkoSLyXckIw xenolinux-2.4.25-sparse/arch/xen/lib/Makefile
    1.97 +3e5a4e6637ZDk0BvFEC-aFQs599-ng xenolinux-2.4.25-sparse/arch/xen/lib/delay.c
    1.98 +3f68905cF5i8-NYpIhGjKmh0y8Gu5g xenolinux-2.4.25-sparse/arch/xen/lib/xeno_proc.c
    1.99 +3e5a4e66croVgpcJyJuF2ycQw0HuJw xenolinux-2.4.25-sparse/arch/xen/mm/Makefile
   1.100 +3e5a4e66l8Q5Tv-6B3lQIRmaVbFPzg xenolinux-2.4.25-sparse/arch/xen/mm/fault.c
   1.101 +3e5a4e668SE9rixq4ahho9rNhLUUFQ xenolinux-2.4.25-sparse/arch/xen/mm/hypervisor.c
   1.102 +3e5a4e661gLzzff25pJooKIIWe7IWg xenolinux-2.4.25-sparse/arch/xen/mm/init.c
   1.103 +3f0bed43UUdQichXAiVNrjV-y2Kzcg xenolinux-2.4.25-sparse/arch/xen/mm/ioremap.c
   1.104 +3e5a4e66qRlSTcjafidMB6ulECADvg xenolinux-2.4.25-sparse/arch/xen/vmlinux.lds
   1.105  3e5a4e66mrtlmV75L1tjKDg8RaM5gA xenolinux-2.4.25-sparse/drivers/block/ll_rw_blk.c
   1.106  3f108aeaLcGDgQdFAANLTUEid0a05w xenolinux-2.4.25-sparse/drivers/char/mem.c
   1.107  3e5a4e66rw65CxyolW9PKz4GG42RcA xenolinux-2.4.25-sparse/drivers/char/tty_io.c
   1.108  3e5a4e669uzIE54VwucPYtGwXLAbzA xenolinux-2.4.25-sparse/fs/exec.c
   1.109 -3e5a4e66wbeCpsJgVf_U8Jde-CNcsA xenolinux-2.4.25-sparse/include/asm-xeno/bugs.h
   1.110 -4048c0ddxnIa2GpBAVR-mY6mNSdeJg xenolinux-2.4.25-sparse/include/asm-xeno/control_if.h
   1.111 -3e5a4e66HdSkvIV6SJ1evG_xmTmXHA xenolinux-2.4.25-sparse/include/asm-xeno/desc.h
   1.112 -4048c0e0_P2wUTiT6UqgPhn0s7yFcA xenolinux-2.4.25-sparse/include/asm-xeno/evtchn.h
   1.113 -3e5a4e66SYp_UpAVcF8Lc1wa3Qtgzw xenolinux-2.4.25-sparse/include/asm-xeno/fixmap.h
   1.114 -3e5a4e67w_DWgjIJ17Tlossu1LGujQ xenolinux-2.4.25-sparse/include/asm-xeno/highmem.h
   1.115 -3e5a4e67YtcyDLQsShhCfQwPSELfvA xenolinux-2.4.25-sparse/include/asm-xeno/hw_irq.h
   1.116 -3e5a4e677VBavzM1UZIEcH1B-RlXMA xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h
   1.117 -4060044fVx7-tokvNLKBf_6qBB4lqQ xenolinux-2.4.25-sparse/include/asm-xeno/io.h
   1.118 -3e5a4e673p7PEOyHFm3nHkYX6HQYBg xenolinux-2.4.25-sparse/include/asm-xeno/irq.h
   1.119 -3ead095db_LRUXnxaqs0dA1DWhPoQQ xenolinux-2.4.25-sparse/include/asm-xeno/keyboard.h
   1.120 -3e5a4e678ddsQOpbSiRdy1GRcDc9WA xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h
   1.121 -3f8707e7ZmZ6TxyX0ZUEfvhA2Pb_xQ xenolinux-2.4.25-sparse/include/asm-xeno/msr.h
   1.122 -3e7270deQqtGPSnFxcW4AvJZuTUWfg xenolinux-2.4.25-sparse/include/asm-xeno/multicall.h
   1.123 -3e5a4e67mnQfh-R8KcQCaVo2Oho6yg xenolinux-2.4.25-sparse/include/asm-xeno/page.h
   1.124 -3e5a4e67uTYU5oEnIDjxuaez8njjqg xenolinux-2.4.25-sparse/include/asm-xeno/pgalloc.h
   1.125 -3e5a4e67X7JyupgdYkgDX19Huj2sAw xenolinux-2.4.25-sparse/include/asm-xeno/pgtable-2level.h
   1.126 -3e5a4e67gr4NLGtQ5CvSLimMYZlkOA xenolinux-2.4.25-sparse/include/asm-xeno/pgtable.h
   1.127 -3f108af1qNv8DVSGPv4zpqIU1txCkg xenolinux-2.4.25-sparse/include/asm-xeno/proc_cmd.h
   1.128 -3e5a4e676uK4xErTBDH6XJREn9LSyg xenolinux-2.4.25-sparse/include/asm-xeno/processor.h
   1.129 -3e5a4e67AJPjW-zL7p-xWuA6IVeH1g xenolinux-2.4.25-sparse/include/asm-xeno/ptrace.h
   1.130 -3e5a4e68uJz-xI0IBVMD7xRLQKJDFg xenolinux-2.4.25-sparse/include/asm-xeno/segment.h
   1.131 -3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA xenolinux-2.4.25-sparse/include/asm-xeno/smp.h
   1.132 -3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ xenolinux-2.4.25-sparse/include/asm-xeno/suspend.h
   1.133 -3e5a4e68mTr0zcp9SXDbnd-XLrrfxw xenolinux-2.4.25-sparse/include/asm-xeno/system.h
   1.134 -3f1056a9L_kqHcFheV00KbKBzv9j5w xenolinux-2.4.25-sparse/include/asm-xeno/vga.h
   1.135 -3f689063nhrIRsMMZjZxMFk7iEINqQ xenolinux-2.4.25-sparse/include/asm-xeno/xeno_proc.h
   1.136 +3e5a4e66wbeCpsJgVf_U8Jde-CNcsA xenolinux-2.4.25-sparse/include/asm-xen/bugs.h
   1.137 +4048c0ddxnIa2GpBAVR-mY6mNSdeJg xenolinux-2.4.25-sparse/include/asm-xen/control_if.h
   1.138 +3e5a4e66HdSkvIV6SJ1evG_xmTmXHA xenolinux-2.4.25-sparse/include/asm-xen/desc.h
   1.139 +4048c0e0_P2wUTiT6UqgPhn0s7yFcA xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h
   1.140 +3e5a4e66SYp_UpAVcF8Lc1wa3Qtgzw xenolinux-2.4.25-sparse/include/asm-xen/fixmap.h
   1.141 +3e5a4e67w_DWgjIJ17Tlossu1LGujQ xenolinux-2.4.25-sparse/include/asm-xen/highmem.h
   1.142 +3e5a4e67YtcyDLQsShhCfQwPSELfvA xenolinux-2.4.25-sparse/include/asm-xen/hw_irq.h
   1.143 +3e5a4e677VBavzM1UZIEcH1B-RlXMA xenolinux-2.4.25-sparse/include/asm-xen/hypervisor.h
   1.144 +4060044fVx7-tokvNLKBf_6qBB4lqQ xenolinux-2.4.25-sparse/include/asm-xen/io.h
   1.145 +3e5a4e673p7PEOyHFm3nHkYX6HQYBg xenolinux-2.4.25-sparse/include/asm-xen/irq.h
   1.146 +3ead095db_LRUXnxaqs0dA1DWhPoQQ xenolinux-2.4.25-sparse/include/asm-xen/keyboard.h
   1.147 +3e5a4e678ddsQOpbSiRdy1GRcDc9WA xenolinux-2.4.25-sparse/include/asm-xen/mmu_context.h
   1.148 +3f8707e7ZmZ6TxyX0ZUEfvhA2Pb_xQ xenolinux-2.4.25-sparse/include/asm-xen/msr.h
   1.149 +3e7270deQqtGPSnFxcW4AvJZuTUWfg xenolinux-2.4.25-sparse/include/asm-xen/multicall.h
   1.150 +3e5a4e67mnQfh-R8KcQCaVo2Oho6yg xenolinux-2.4.25-sparse/include/asm-xen/page.h
   1.151 +3e5a4e67uTYU5oEnIDjxuaez8njjqg xenolinux-2.4.25-sparse/include/asm-xen/pgalloc.h
   1.152 +3e5a4e67X7JyupgdYkgDX19Huj2sAw xenolinux-2.4.25-sparse/include/asm-xen/pgtable-2level.h
   1.153 +3e5a4e67gr4NLGtQ5CvSLimMYZlkOA xenolinux-2.4.25-sparse/include/asm-xen/pgtable.h
   1.154 +3f108af1qNv8DVSGPv4zpqIU1txCkg xenolinux-2.4.25-sparse/include/asm-xen/proc_cmd.h
   1.155 +3e5a4e676uK4xErTBDH6XJREn9LSyg xenolinux-2.4.25-sparse/include/asm-xen/processor.h
   1.156 +3e5a4e67AJPjW-zL7p-xWuA6IVeH1g xenolinux-2.4.25-sparse/include/asm-xen/ptrace.h
   1.157 +3e5a4e68uJz-xI0IBVMD7xRLQKJDFg xenolinux-2.4.25-sparse/include/asm-xen/segment.h
   1.158 +3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA xenolinux-2.4.25-sparse/include/asm-xen/smp.h
   1.159 +3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ xenolinux-2.4.25-sparse/include/asm-xen/suspend.h
   1.160 +3e5a4e68mTr0zcp9SXDbnd-XLrrfxw xenolinux-2.4.25-sparse/include/asm-xen/system.h
   1.161 +3f1056a9L_kqHcFheV00KbKBzv9j5w xenolinux-2.4.25-sparse/include/asm-xen/vga.h
   1.162 +3f689063nhrIRsMMZjZxMFk7iEINqQ xenolinux-2.4.25-sparse/include/asm-xen/xeno_proc.h
   1.163  3f056927gMHl7mWB89rb73JahbhQIA xenolinux-2.4.25-sparse/include/linux/blk.h
   1.164  3e5a4e68WLX3B8owTvktP3HHOtznPQ xenolinux-2.4.25-sparse/include/linux/major.h
   1.165  401c0590D_kwJDU59X8NyvqSv_Cl2A xenolinux-2.4.25-sparse/include/linux/sched.h
     2.1 --- a/xenolinux-2.4.25-sparse/Documentation/Configure.help	Tue Mar 23 09:57:30 2004 +0000
     2.2 +++ b/xenolinux-2.4.25-sparse/Documentation/Configure.help	Tue Mar 23 10:40:28 2004 +0000
     2.3 @@ -597,10 +597,10 @@ CONFIG_BLK_DEV_NBD
     2.4    If unsure, say N.
     2.5  
     2.6  XenoLinux virtual block device support
     2.7 -CONFIG_XENOLINUX_BLOCK
     2.8 +CONFIG_XEN_VBD
     2.9    Xen can export virtual block devices which map back to extents of
    2.10    blocks on the physical partitions.  This option is needed for
    2.11 -  xenolinux to make use of such devices when running as a Xen guest.
    2.12 +  Linux to make use of such devices when running as a Xen guest.
    2.13  
    2.14    If unsure, say Y.
    2.15  
    2.16 @@ -17383,11 +17383,11 @@ Acorn partition support
    2.17  CONFIG_ACORN_PARTITION
    2.18    Support hard disks partitioned under Acorn operating systems.
    2.19  
    2.20 -Xeno partition support
    2.21 -CONFIG_XENO_PARTITION
    2.22 -  Support Xeno-style partitions on physical disks.  The Xen
    2.23 +Xen virtual-partition support
    2.24 +CONFIG_XEN_VBD_PARTITION
    2.25 +  Support partition-level virtual block devices.  The Xen
    2.26    hypervisor can export partitions on a physical disk to clients,
    2.27 -  but access to the partition table requires special hackery.
    2.28 +  but access to the partition table requires special trickery.
    2.29    This will be used if this option is enabled; otherwise, 
    2.30    it will be possible to access exported partitions by sector
    2.31    number but not with useful names e.g. /dev/hda4.
     3.1 --- a/xenolinux-2.4.25-sparse/Makefile	Tue Mar 23 09:57:30 2004 +0000
     3.2 +++ b/xenolinux-2.4.25-sparse/Makefile	Tue Mar 23 10:40:28 2004 +0000
     3.3 @@ -10,6 +10,7 @@ KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$
     3.4  # This will be overriden for Xen and UML builds.
     3.5  SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
     3.6  ARCH ?= $(SUBARCH)
     3.7 +ARCH := $(ARCH:xeno=xen) ## Temporary hack while users adjust to new archname
     3.8  
     3.9  KERNELPATH=kernel-$(shell echo $(KERNELRELEASE) | sed -e "s/-//g")
    3.10  
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/Makefile	Tue Mar 23 10:40:28 2004 +0000
     4.3 @@ -0,0 +1,120 @@
     4.4 +#
     4.5 +# xen/Makefile
     4.6 +#
     4.7 +# This file is included by the global makefile so that you can add your own
     4.8 +# architecture-specific flags and dependencies. Remember to do have actions
     4.9 +# for "archclean" and "archdep" for cleaning up and making dependencies for
    4.10 +# this architecture
    4.11 +#
    4.12 +# This file is subject to the terms and conditions of the GNU General Public
    4.13 +# License.  See the file "COPYING" in the main directory of this archive
    4.14 +# for more details.
    4.15 +#
    4.16 +# Copyright (C) 1994 by Linus Torvalds
    4.17 +#
    4.18 +# 19990713  Artur Skawina <skawina@geocities.com>
    4.19 +#           Added '-march' and '-mpreferred-stack-boundary' support
    4.20 +#
    4.21 +
    4.22 +override EXTRAVERSION := -xen$(EXTRAVERSION)
    4.23 +
    4.24 +LD=$(CROSS_COMPILE)ld -m elf_i386
    4.25 +OBJCOPY=$(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S
    4.26 +LDFLAGS=-e stext
    4.27 +LINKFLAGS =-T $(TOPDIR)/arch/xen/vmlinux.lds $(LDFLAGS)
    4.28 +
    4.29 +CFLAGS += -pipe
    4.30 +
    4.31 +check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
    4.32 +
    4.33 +# prevent gcc from keeping the stack 16 byte aligned
    4.34 +CFLAGS += $(call check_gcc,-mpreferred-stack-boundary=2,)
    4.35 +
    4.36 +ifdef CONFIG_M686
    4.37 +CFLAGS += -march=i686
    4.38 +endif
    4.39 +
    4.40 +ifdef CONFIG_MPENTIUMIII
    4.41 +CFLAGS += -march=i686
    4.42 +endif
    4.43 +
    4.44 +ifdef CONFIG_MPENTIUM4
    4.45 +CFLAGS += -march=i686
    4.46 +endif
    4.47 +
    4.48 +ifdef CONFIG_MK7
    4.49 +CFLAGS += $(call check_gcc,-march=athlon,-march=i686 -malign-functions=4)
    4.50 +endif
    4.51 +
    4.52 +HEAD := arch/xen/kernel/head.o arch/xen/kernel/init_task.o
    4.53 +
    4.54 +SUBDIRS += arch/xen/kernel arch/xen/mm arch/xen/lib
    4.55 +SUBDIRS += arch/xen/drivers/console arch/xen/drivers/network
    4.56 +SUBDIRS += arch/xen/drivers/evtchn arch/xen/drivers/block
    4.57 +SUBDIRS += arch/xen/drivers/balloon arch/xen/drivers/vnetif
    4.58 +ifdef CONFIG_XEN_PRIVILEGED_GUEST
    4.59 +SUBDIRS += arch/xen/drivers/dom0 
    4.60 +endif
    4.61 +
    4.62 +CORE_FILES += arch/xen/kernel/kernel.o arch/xen/mm/mm.o
    4.63 +CORE_FILES += arch/xen/drivers/evtchn/drv.o
    4.64 +CORE_FILES += arch/xen/drivers/console/drv.o
    4.65 +CORE_FILES += arch/xen/drivers/block/drv.o
    4.66 +CORE_FILES += arch/xen/drivers/network/drv.o
    4.67 +CORE_FILES += arch/xen/drivers/vnetif/drv.o
    4.68 +ifdef CONFIG_XEN_PRIVILEGED_GUEST
    4.69 +CORE_FILES += arch/xen/drivers/dom0/drv.o
    4.70 +endif
    4.71 +CORE_FILES += arch/xen/drivers/balloon/drv.o
    4.72 +LIBS := $(TOPDIR)/arch/xen/lib/lib.a $(LIBS) $(TOPDIR)/arch/xen/lib/lib.a
    4.73 +
    4.74 +arch/xen/kernel: dummy
    4.75 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/kernel
    4.76 +
    4.77 +arch/xen/mm: dummy
    4.78 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/mm
    4.79 +
    4.80 +arch/xen/drivers/console: dummy
    4.81 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/console
    4.82 +
    4.83 +arch/xen/drivers/network: dummy
    4.84 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/network
    4.85 +
    4.86 +arch/xen/drivers/block: dummy
    4.87 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/block
    4.88 +
    4.89 +arch/xen/drivers/dom0: dummy
    4.90 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/dom0
    4.91 +
    4.92 +arch/xen/drivers/balloon: dummy
    4.93 +	$(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/balloon
    4.94 +
    4.95 +MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
    4.96 +
    4.97 +vmlinux: arch/xen/vmlinux.lds
    4.98 +
    4.99 +FORCE: ;
   4.100 +
   4.101 +.PHONY: bzImage compressed clean archclean archmrproper archdep
   4.102 +
   4.103 +bzImage: vmlinux
   4.104 +	@$(MAKEBOOT) xenolinux.gz
   4.105 +
   4.106 +install: bzImage
   4.107 +	mkdir -p $(prefix)/boot
   4.108 +	install -m0644 arch/$(ARCH)/boot/xenolinux.gz $(prefix)/boot/xenolinux.gz
   4.109 +
   4.110 +dist: bzImage
   4.111 +	mkdir -p ../install/boot
   4.112 +	install -m0644 arch/$(ARCH)/boot/xenolinux.gz ../install/boot/xenolinux.gz
   4.113 +
   4.114 +archclean:
   4.115 +	@$(MAKEBOOT) clean
   4.116 +
   4.117 +archmrproper:
   4.118 +	rm -f include/asm-xen/hypervisor-ifs/arch
   4.119 +
   4.120 +archdep:
   4.121 +	rm -f include/asm-xen/hypervisor-ifs/arch
   4.122 +	( cd include/asm-xen/hypervisor-ifs ; rm -rf arch ; ln -sf arch-$(SUBARCH) arch)
   4.123 +	@$(MAKEBOOT) dep
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/boot/Makefile	Tue Mar 23 10:40:28 2004 +0000
     5.3 @@ -0,0 +1,22 @@
     5.4 +#
     5.5 +# arch/xen/boot/Makefile
     5.6 +#
     5.7 +
     5.8 +xenolinux.gz: xenolinux
     5.9 +	gzip -f -9 < $< > $@
    5.10 +
    5.11 +xenolinux: $(TOPDIR)/vmlinux
    5.12 +	# Guest OS header -- first 8 bytes are identifier 'XenGuest'.
    5.13 +	echo -e -n 'XenGuest' >$@ 
    5.14 +	# Guest OS header -- next 4 bytes are load address (0xC0000000).
    5.15 +	echo -e -n '\000\000\000\300' >>$@
    5.16 +	$(OBJCOPY) $< xenolinux.body
    5.17 +	# Guest OS header is immediately followed by raw OS image.
    5.18 +	# Start address must be at byte 0.
    5.19 +	cat xenolinux.body >>$@
    5.20 +	rm -f xenolinux.body
    5.21 +
    5.22 +dep:
    5.23 +
    5.24 +clean:
    5.25 +	rm -f xenolinux xenolinux.gz
    5.26 \ No newline at end of file
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/config.in	Tue Mar 23 10:40:28 2004 +0000
     6.3 @@ -0,0 +1,190 @@
     6.4 +#
     6.5 +# For a description of the syntax of this configuration file,
     6.6 +# see Documentation/kbuild/config-language.txt.
     6.7 +#
     6.8 +mainmenu_name "Linux Kernel Configuration"
     6.9 +
    6.10 +define_bool CONFIG_XEN y
    6.11 +
    6.12 +define_bool CONFIG_X86 y
    6.13 +define_bool CONFIG_ISA y
    6.14 +define_bool CONFIG_SBUS n
    6.15 +
    6.16 +define_bool CONFIG_UID16 y
    6.17 +
    6.18 +mainmenu_option next_comment
    6.19 +comment 'Xenolinux'
    6.20 +bool 'Support for privileged operations (domain 0)' CONFIG_XEN_PRIVILEGED_GUEST
    6.21 +endmenu
    6.22 +# The IBM S/390 patch needs this.
    6.23 +define_bool CONFIG_NO_IDLE_HZ y
    6.24 +
    6.25 +mainmenu_option next_comment
    6.26 +comment 'Code maturity level options'
    6.27 +bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
    6.28 +endmenu
    6.29 +
    6.30 +mainmenu_option next_comment
    6.31 +comment 'Loadable module support'
    6.32 +bool 'Enable loadable module support' CONFIG_MODULES
    6.33 +if [ "$CONFIG_MODULES" = "y" ]; then
    6.34 +   bool '  Set version information on all module symbols' CONFIG_MODVERSIONS
    6.35 +   bool '  Kernel module loader' CONFIG_KMOD
    6.36 +fi
    6.37 +endmenu
    6.38 +
    6.39 +mainmenu_option next_comment
    6.40 +comment 'Processor type and features'
    6.41 +choice 'Processor family' \
    6.42 +	"Pentium-Pro/Celeron/Pentium-II		CONFIG_M686 \
    6.43 +	 Pentium-III/Celeron(Coppermine)	CONFIG_MPENTIUMIII \
    6.44 +	 Pentium-4				CONFIG_MPENTIUM4 \
    6.45 +	 Athlon/Duron/K7			CONFIG_MK7 \
    6.46 +	 Opteron/Athlon64/Hammer/K8             CONFIG_MK8 \
    6.47 +	 VIA-C3-2                               CONFIG_MVIAC3_2" Pentium-Pro
    6.48 +
    6.49 +   define_bool CONFIG_X86_WP_WORKS_OK y
    6.50 +   define_bool CONFIG_X86_INVLPG y
    6.51 +   define_bool CONFIG_X86_CMPXCHG y
    6.52 +   define_bool CONFIG_X86_XADD y
    6.53 +   define_bool CONFIG_X86_BSWAP y
    6.54 +   define_bool CONFIG_X86_POPAD_OK y
    6.55 +   define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
    6.56 +   define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
    6.57 +
    6.58 +   define_bool CONFIG_X86_GOOD_APIC y
    6.59 +   define_bool CONFIG_X86_PGE y
    6.60 +   define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
    6.61 +   define_bool CONFIG_X86_TSC y
    6.62 +
    6.63 +if [ "$CONFIG_M686" = "y" ]; then
    6.64 +   define_int  CONFIG_X86_L1_CACHE_SHIFT 5
    6.65 +fi
    6.66 +if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
    6.67 +   define_int  CONFIG_X86_L1_CACHE_SHIFT 5
    6.68 +fi
    6.69 +if [ "$CONFIG_MPENTIUM4" = "y" ]; then
    6.70 +   define_int  CONFIG_X86_L1_CACHE_SHIFT 7
    6.71 +fi
    6.72 +if [ "$CONFIG_MK8" = "y" ]; then
    6.73 +   define_bool CONFIG_MK7 y
    6.74 +fi
    6.75 +if [ "$CONFIG_MK7" = "y" ]; then
    6.76 +   define_int  CONFIG_X86_L1_CACHE_SHIFT 6
    6.77 +   define_bool CONFIG_X86_USE_3DNOW y
    6.78 +fi
    6.79 +if [ "$CONFIG_MVIAC3_2" = "y" ]; then
    6.80 +   define_int  CONFIG_X86_L1_CACHE_SHIFT 5
    6.81 +fi
    6.82 +
    6.83 +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
    6.84 +   tristate 'BIOS Enhanced Disk Drive calls determine boot disk (EXPERIMENTAL)' CONFIG_EDD
    6.85 +fi
    6.86 +
    6.87 +choice 'High Memory Support' \
    6.88 +	"off    CONFIG_NOHIGHMEM \
    6.89 +	 4GB    CONFIG_HIGHMEM4G \
    6.90 +	 64GB   CONFIG_HIGHMEM64G" off
    6.91 +if [ "$CONFIG_HIGHMEM4G" = "y" ]; then
    6.92 +   define_bool CONFIG_HIGHMEM y
    6.93 +fi
    6.94 +if [ "$CONFIG_HIGHMEM64G" = "y" ]; then
    6.95 +   define_bool CONFIG_HIGHMEM y
    6.96 +   define_bool CONFIG_X86_PAE y
    6.97 +fi
    6.98 +
    6.99 +#bool 'Symmetric multi-processing support' CONFIG_SMP
   6.100 +#if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
   6.101 +#   define_bool CONFIG_HAVE_DEC_LOCK y
   6.102 +#fi
   6.103 +endmenu
   6.104 +
   6.105 +mainmenu_option next_comment
   6.106 +comment 'General setup'
   6.107 +
   6.108 +bool 'Networking support' CONFIG_NET
   6.109 +
   6.110 +bool 'PCI support' CONFIG_PCI
   6.111 +if [ "$CONFIG_PCI" = "y" ]; then
   6.112 +   tristate '    3c590/3c900 series (592/595/597) "Vortex/Boomerang" support' CONFIG_VORTEX
   6.113 +   tristate 'Intel(R) PRO/1000 Gigabit Ethernet support' CONFIG_E1000
   6.114 +   if [ "$CONFIG_E1000" != "n" ]; then
   6.115 +      bool '  Use Rx Polling (NAPI)' CONFIG_E1000_NAPI
   6.116 +   fi
   6.117 +fi
   6.118 +source drivers/pci/Config.in
   6.119 +
   6.120 +bool 'System V IPC' CONFIG_SYSVIPC
   6.121 +bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
   6.122 +bool 'Sysctl support' CONFIG_SYSCTL
   6.123 +if [ "$CONFIG_PROC_FS" = "y" ]; then
   6.124 +   choice 'Kernel core (/proc/kcore) format' \
   6.125 +	"ELF		CONFIG_KCORE_ELF	\
   6.126 +	 A.OUT		CONFIG_KCORE_AOUT" ELF
   6.127 +fi
   6.128 +tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
   6.129 +tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
   6.130 +tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
   6.131 +bool 'Select task to kill on out of memory condition' CONFIG_OOM_KILLER
   6.132 +
   6.133 +endmenu
   6.134 +
   6.135 +if [ "$CONFIG_NET" = "y" ]; then
   6.136 +   source net/Config.in
   6.137 +fi
   6.138 +
   6.139 +
   6.140 +#
   6.141 +# Block device driver configuration
   6.142 +#
   6.143 +mainmenu_option next_comment
   6.144 +comment 'Block devices'
   6.145 +tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
   6.146 +dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
   6.147 +tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
   6.148 +if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
   6.149 +   int '  Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
   6.150 +fi
   6.151 +dep_bool '  Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD $CONFIG_BLK_DEV_RAM
   6.152 +bool 'Per partition statistics in /proc/partitions' CONFIG_BLK_STATS
   6.153 +bool 'XenoLinux virtual block device support' CONFIG_XEN_VBD
   6.154 +#endmenu
   6.155 +define_bool CONFIG_BLK_DEV_HD n
   6.156 +endmenu
   6.157 +
   6.158 +source drivers/char/Config.in
   6.159 +
   6.160 +source fs/Config.in
   6.161 +
   6.162 +mainmenu_option next_comment
   6.163 +comment 'Console drivers'
   6.164 +
   6.165 +bool 'Xen console support' CONFIG_XEN_CONSOLE
   6.166 +
   6.167 +if [ "$CONFIG_VT" = "y" ]; then
   6.168 +   bool 'VGA text console' CONFIG_VGA_CONSOLE
   6.169 +   bool 'Dummy console' CONFIG_DUMMY_CONSOLE 
   6.170 +fi
   6.171 +endmenu
   6.172 +
   6.173 +mainmenu_option next_comment
   6.174 +comment 'Kernel hacking'
   6.175 +
   6.176 +bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
   6.177 +if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
   6.178 +   bool '  Debug high memory support' CONFIG_DEBUG_HIGHMEM
   6.179 +   bool '  Debug memory allocations' CONFIG_DEBUG_SLAB
   6.180 +   bool '  Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
   6.181 +   bool '  Magic SysRq key' CONFIG_MAGIC_SYSRQ
   6.182 +   bool '  Spinlock debugging' CONFIG_DEBUG_SPINLOCK
   6.183 +   bool '  Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE
   6.184 +   bool '  Load all symbols for debugging' CONFIG_KALLSYMS
   6.185 +   bool '  Compile the kernel with frame pointers' CONFIG_FRAME_POINTER
   6.186 +fi
   6.187 +
   6.188 +int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
   6.189 +
   6.190 +endmenu
   6.191 +
   6.192 +source crypto/Config.in
   6.193 +source lib/Config.in
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/defconfig	Tue Mar 23 10:40:28 2004 +0000
     7.3 @@ -0,0 +1,457 @@
     7.4 +#
     7.5 +# Automatically generated make config: don't edit
     7.6 +#
     7.7 +CONFIG_XEN=y
     7.8 +CONFIG_X86=y
     7.9 +CONFIG_ISA=y
    7.10 +# CONFIG_SBUS is not set
    7.11 +CONFIG_UID16=y
    7.12 +
    7.13 +#
    7.14 +# Xenolinux options
    7.15 +#
    7.16 +# support for privileged domains
    7.17 +CONFIG_XEN_PRIVILEGED_GUEST=y
    7.18 +# on-demand timer setting (taken from s390 patch set)
    7.19 +CONFIG_NO_IDLE_HZ=y
    7.20 +
    7.21 +#
    7.22 +# Code maturity level options
    7.23 +#
    7.24 +# CONFIG_EXPERIMENTAL is not set
    7.25 +
    7.26 +#
    7.27 +# Loadable module support
    7.28 +#
    7.29 +CONFIG_MODULES=y
    7.30 +CONFIG_MODVERSIONS=y
    7.31 +CONFIG_KMOD=y
    7.32 +
    7.33 +#
    7.34 +# Processor type and features
    7.35 +#
    7.36 +CONFIG_M686=y
    7.37 +# CONFIG_MPENTIUMIII is not set
    7.38 +# CONFIG_MPENTIUM4 is not set
    7.39 +# CONFIG_MK7 is not set
    7.40 +CONFIG_X86_WP_WORKS_OK=y
    7.41 +CONFIG_X86_INVLPG=y
    7.42 +CONFIG_X86_CMPXCHG=y
    7.43 +CONFIG_X86_XADD=y
    7.44 +CONFIG_X86_BSWAP=y
    7.45 +CONFIG_X86_POPAD_OK=y
    7.46 +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
    7.47 +CONFIG_RWSEM_XCHGADD_ALGORITHM=y
    7.48 +CONFIG_X86_GOOD_APIC=y
    7.49 +CONFIG_X86_PGE=y
    7.50 +CONFIG_X86_USE_PPRO_CHECKSUM=y
    7.51 +CONFIG_X86_TSC=y
    7.52 +CONFIG_X86_L1_CACHE_SHIFT=5
    7.53 +CONFIG_NOHIGHMEM=y
    7.54 +# CONFIG_HIGHMEM4G is not set
    7.55 +# CONFIG_HIGHMEM64G is not set
    7.56 +
    7.57 +#
    7.58 +# General setup
    7.59 +#
    7.60 +CONFIG_NET=y
    7.61 +# CONFIG_PCI is not set
    7.62 +# CONFIG_PCI_NAMES is not set
    7.63 +CONFIG_SYSVIPC=y
    7.64 +# CONFIG_BSD_PROCESS_ACCT is not set
    7.65 +CONFIG_SYSCTL=y
    7.66 +CONFIG_KCORE_ELF=y
    7.67 +# CONFIG_KCORE_AOUT is not set
    7.68 +CONFIG_BINFMT_AOUT=y
    7.69 +CONFIG_BINFMT_ELF=y
    7.70 +# CONFIG_BINFMT_MISC is not set
    7.71 +# CONFIG_OOM_KILLER is not set
    7.72 +
    7.73 +#
    7.74 +# Networking options
    7.75 +#
    7.76 +CONFIG_PACKET=y
    7.77 +CONFIG_PACKET_MMAP=y
    7.78 +# CONFIG_NETLINK_DEV is not set
    7.79 +CONFIG_NETFILTER=y
    7.80 +# CONFIG_NETFILTER_DEBUG is not set
    7.81 +CONFIG_FILTER=y
    7.82 +CONFIG_UNIX=y
    7.83 +CONFIG_INET=y
    7.84 +# CONFIG_IP_MULTICAST is not set
    7.85 +# CONFIG_IP_ADVANCED_ROUTER is not set
    7.86 +CONFIG_IP_PNP=y
    7.87 +CONFIG_IP_PNP_DHCP=y
    7.88 +# CONFIG_IP_PNP_BOOTP is not set
    7.89 +# CONFIG_IP_PNP_RARP is not set
    7.90 +# CONFIG_NET_IPIP is not set
    7.91 +# CONFIG_NET_IPGRE is not set
    7.92 +# CONFIG_INET_ECN is not set
    7.93 +# CONFIG_SYN_COOKIES is not set
    7.94 +
    7.95 +#
    7.96 +#   IP: Netfilter Configuration
    7.97 +#
    7.98 +CONFIG_IP_NF_CONNTRACK=y
    7.99 +CONFIG_IP_NF_FTP=y
   7.100 +# CONFIG_IP_NF_AMANDA is not set
   7.101 +CONFIG_IP_NF_TFTP=y
   7.102 +CONFIG_IP_NF_IRC=y
   7.103 +CONFIG_IP_NF_IPTABLES=y
   7.104 +# CONFIG_IP_NF_MATCH_LIMIT is not set
   7.105 +# CONFIG_IP_NF_MATCH_MAC is not set
   7.106 +# CONFIG_IP_NF_MATCH_PKTTYPE is not set
   7.107 +# CONFIG_IP_NF_MATCH_MARK is not set
   7.108 +# CONFIG_IP_NF_MATCH_MULTIPORT is not set
   7.109 +# CONFIG_IP_NF_MATCH_TOS is not set
   7.110 +# CONFIG_IP_NF_MATCH_RECENT is not set
   7.111 +# CONFIG_IP_NF_MATCH_ECN is not set
   7.112 +# CONFIG_IP_NF_MATCH_DSCP is not set
   7.113 +# CONFIG_IP_NF_MATCH_AH_ESP is not set
   7.114 +# CONFIG_IP_NF_MATCH_LENGTH is not set
   7.115 +# CONFIG_IP_NF_MATCH_TTL is not set
   7.116 +# CONFIG_IP_NF_MATCH_TCPMSS is not set
   7.117 +# CONFIG_IP_NF_MATCH_HELPER is not set
   7.118 +CONFIG_IP_NF_MATCH_STATE=y
   7.119 +CONFIG_IP_NF_MATCH_CONNTRACK=y
   7.120 +CONFIG_IP_NF_FILTER=y
   7.121 +CONFIG_IP_NF_TARGET_REJECT=y
   7.122 +CONFIG_IP_NF_NAT=y
   7.123 +CONFIG_IP_NF_NAT_NEEDED=y
   7.124 +CONFIG_IP_NF_TARGET_MASQUERADE=y
   7.125 +CONFIG_IP_NF_TARGET_REDIRECT=y
   7.126 +# CONFIG_IP_NF_NAT_LOCAL is not set
   7.127 +CONFIG_IP_NF_NAT_IRC=y
   7.128 +CONFIG_IP_NF_NAT_FTP=y
   7.129 +CONFIG_IP_NF_NAT_TFTP=y
   7.130 +# CONFIG_IP_NF_MANGLE is not set
   7.131 +CONFIG_IP_NF_TARGET_LOG=y
   7.132 +CONFIG_IP_NF_TARGET_ULOG=y
   7.133 +# CONFIG_IP_NF_TARGET_TCPMSS is not set
   7.134 +# CONFIG_IP_NF_ARPTABLES is not set
   7.135 +
   7.136 +#
   7.137 +#   IP: Virtual Server Configuration
   7.138 +#
   7.139 +# CONFIG_IP_VS is not set
   7.140 +# CONFIG_VLAN_8021Q is not set
   7.141 +
   7.142 +#
   7.143 +#  
   7.144 +#
   7.145 +# CONFIG_IPX is not set
   7.146 +# CONFIG_ATALK is not set
   7.147 +
   7.148 +#
   7.149 +# Appletalk devices
   7.150 +#
   7.151 +# CONFIG_DEV_APPLETALK is not set
   7.152 +# CONFIG_DECNET is not set
   7.153 +# CONFIG_BRIDGE is not set
   7.154 +
   7.155 +#
   7.156 +# QoS and/or fair queueing
   7.157 +#
   7.158 +# CONFIG_NET_SCHED is not set
   7.159 +
   7.160 +#
   7.161 +# Network testing
   7.162 +#
   7.163 +# CONFIG_NET_PKTGEN is not set
   7.164 +
   7.165 +#
   7.166 +# Block devices
   7.167 +#
   7.168 +CONFIG_BLK_DEV_LOOP=y
   7.169 +CONFIG_BLK_DEV_NBD=y
   7.170 +CONFIG_BLK_DEV_RAM=y
   7.171 +CONFIG_BLK_DEV_RAM_SIZE=4096
   7.172 +CONFIG_BLK_DEV_INITRD=y
   7.173 +# CONFIG_BLK_STATS is not set
   7.174 +CONFIG_XEN_VBD=y
   7.175 +# CONFIG_BLK_DEV_HD is not set
   7.176 +
   7.177 +#
   7.178 +# Character devices
   7.179 +#
   7.180 +CONFIG_VT=y
   7.181 +CONFIG_VT_CONSOLE=y
   7.182 +# CONFIG_SERIAL is not set
   7.183 +# CONFIG_SERIAL_EXTENDED is not set
   7.184 +# CONFIG_SERIAL_NONSTANDARD is not set
   7.185 +CONFIG_UNIX98_PTYS=y
   7.186 +CONFIG_UNIX98_PTY_COUNT=256
   7.187 +# CONFIG_PRINTER is not set
   7.188 +# CONFIG_PPDEV is not set
   7.189 +# CONFIG_TIPAR is not set
   7.190 +
   7.191 +#
   7.192 +# I2C support
   7.193 +#
   7.194 +# CONFIG_I2C is not set
   7.195 +
   7.196 +#
   7.197 +# Mice
   7.198 +#
   7.199 +# CONFIG_BUSMOUSE is not set
   7.200 +CONFIG_MOUSE=y
   7.201 +CONFIG_PSMOUSE=y
   7.202 +# CONFIG_82C710_MOUSE is not set
   7.203 +# CONFIG_PC110_PAD is not set
   7.204 +# CONFIG_MK712_MOUSE is not set
   7.205 +
   7.206 +#
   7.207 +# Joysticks
   7.208 +#
   7.209 +# CONFIG_INPUT_GAMEPORT is not set
   7.210 +# CONFIG_INPUT_NS558 is not set
   7.211 +# CONFIG_INPUT_LIGHTNING is not set
   7.212 +# CONFIG_INPUT_PCIGAME is not set
   7.213 +# CONFIG_INPUT_CS461X is not set
   7.214 +# CONFIG_INPUT_EMU10K1 is not set
   7.215 +# CONFIG_INPUT_SERIO is not set
   7.216 +# CONFIG_INPUT_SERPORT is not set
   7.217 +
   7.218 +#
   7.219 +# Joysticks
   7.220 +#
   7.221 +# CONFIG_INPUT_ANALOG is not set
   7.222 +# CONFIG_INPUT_A3D is not set
   7.223 +# CONFIG_INPUT_ADI is not set
   7.224 +# CONFIG_INPUT_COBRA is not set
   7.225 +# CONFIG_INPUT_GF2K is not set
   7.226 +# CONFIG_INPUT_GRIP is not set
   7.227 +# CONFIG_INPUT_INTERACT is not set
   7.228 +# CONFIG_INPUT_TMDC is not set
   7.229 +# CONFIG_INPUT_SIDEWINDER is not set
   7.230 +# CONFIG_INPUT_IFORCE_USB is not set
   7.231 +# CONFIG_INPUT_IFORCE_232 is not set
   7.232 +# CONFIG_INPUT_WARRIOR is not set
   7.233 +# CONFIG_INPUT_MAGELLAN is not set
   7.234 +# CONFIG_INPUT_SPACEORB is not set
   7.235 +# CONFIG_INPUT_SPACEBALL is not set
   7.236 +# CONFIG_INPUT_STINGER is not set
   7.237 +# CONFIG_INPUT_DB9 is not set
   7.238 +# CONFIG_INPUT_GAMECON is not set
   7.239 +# CONFIG_INPUT_TURBOGRAFX is not set
   7.240 +# CONFIG_QIC02_TAPE is not set
   7.241 +# CONFIG_IPMI_HANDLER is not set
   7.242 +# CONFIG_IPMI_PANIC_EVENT is not set
   7.243 +# CONFIG_IPMI_DEVICE_INTERFACE is not set
   7.244 +# CONFIG_IPMI_KCS is not set
   7.245 +# CONFIG_IPMI_WATCHDOG is not set
   7.246 +
   7.247 +#
   7.248 +# Watchdog Cards
   7.249 +#
   7.250 +# CONFIG_WATCHDOG is not set
   7.251 +# CONFIG_SCx200 is not set
   7.252 +# CONFIG_SCx200_GPIO is not set
   7.253 +# CONFIG_AMD_RNG is not set
   7.254 +# CONFIG_INTEL_RNG is not set
   7.255 +# CONFIG_HW_RANDOM is not set
   7.256 +# CONFIG_AMD_PM768 is not set
   7.257 +# CONFIG_NVRAM is not set
   7.258 +# CONFIG_RTC is not set
   7.259 +# CONFIG_DTLK is not set
   7.260 +# CONFIG_R3964 is not set
   7.261 +# CONFIG_APPLICOM is not set
   7.262 +
   7.263 +#
   7.264 +# Ftape, the floppy tape device driver
   7.265 +#
   7.266 +# CONFIG_FTAPE is not set
   7.267 +# CONFIG_AGP is not set
   7.268 +
   7.269 +#
   7.270 +# Direct Rendering Manager (XFree86 DRI support)
   7.271 +#
   7.272 +# CONFIG_DRM is not set
   7.273 +# CONFIG_MWAVE is not set
   7.274 +# CONFIG_OBMOUSE is not set
   7.275 +
   7.276 +#
   7.277 +# File systems
   7.278 +#
   7.279 +# CONFIG_QUOTA is not set
   7.280 +# CONFIG_QFMT_V2 is not set
   7.281 +CONFIG_AUTOFS_FS=y
   7.282 +CONFIG_AUTOFS4_FS=y
   7.283 +# CONFIG_REISERFS_FS is not set
   7.284 +# CONFIG_REISERFS_CHECK is not set
   7.285 +# CONFIG_REISERFS_PROC_INFO is not set
   7.286 +# CONFIG_ADFS_FS is not set
   7.287 +# CONFIG_ADFS_FS_RW is not set
   7.288 +# CONFIG_AFFS_FS is not set
   7.289 +# CONFIG_HFS_FS is not set
   7.290 +# CONFIG_HFSPLUS_FS is not set
   7.291 +# CONFIG_BEFS_FS is not set
   7.292 +# CONFIG_BEFS_DEBUG is not set
   7.293 +# CONFIG_BFS_FS is not set
   7.294 +CONFIG_EXT3_FS=y
   7.295 +CONFIG_JBD=y
   7.296 +# CONFIG_JBD_DEBUG is not set
   7.297 +CONFIG_FAT_FS=y
   7.298 +CONFIG_MSDOS_FS=y
   7.299 +CONFIG_UMSDOS_FS=y
   7.300 +CONFIG_VFAT_FS=y
   7.301 +# CONFIG_EFS_FS is not set
   7.302 +# CONFIG_JFFS_FS is not set
   7.303 +# CONFIG_JFFS2_FS is not set
   7.304 +# CONFIG_CRAMFS is not set
   7.305 +CONFIG_TMPFS=y
   7.306 +CONFIG_RAMFS=y
   7.307 +CONFIG_ISO9660_FS=y
   7.308 +CONFIG_JOLIET=y
   7.309 +CONFIG_ZISOFS=y
   7.310 +# CONFIG_JFS_FS is not set
   7.311 +# CONFIG_JFS_DEBUG is not set
   7.312 +# CONFIG_JFS_STATISTICS is not set
   7.313 +# CONFIG_MINIX_FS is not set
   7.314 +# CONFIG_VXFS_FS is not set
   7.315 +# CONFIG_NTFS_FS is not set
   7.316 +# CONFIG_NTFS_RW is not set
   7.317 +# CONFIG_HPFS_FS is not set
   7.318 +CONFIG_PROC_FS=y
   7.319 +# CONFIG_DEVFS_FS is not set
   7.320 +# CONFIG_DEVFS_MOUNT is not set
   7.321 +# CONFIG_DEVFS_DEBUG is not set
   7.322 +CONFIG_DEVPTS_FS=y
   7.323 +# CONFIG_QNX4FS_FS is not set
   7.324 +# CONFIG_QNX4FS_RW is not set
   7.325 +# CONFIG_ROMFS_FS is not set
   7.326 +CONFIG_EXT2_FS=y
   7.327 +# CONFIG_SYSV_FS is not set
   7.328 +# CONFIG_UDF_FS is not set
   7.329 +# CONFIG_UDF_RW is not set
   7.330 +# CONFIG_UFS_FS is not set
   7.331 +# CONFIG_UFS_FS_WRITE is not set
   7.332 +# CONFIG_XFS_FS is not set
   7.333 +# CONFIG_XFS_QUOTA is not set
   7.334 +# CONFIG_XFS_RT is not set
   7.335 +# CONFIG_XFS_TRACE is not set
   7.336 +# CONFIG_XFS_DEBUG is not set
   7.337 +
   7.338 +#
   7.339 +# Network File Systems
   7.340 +#
   7.341 +# CONFIG_CODA_FS is not set
   7.342 +# CONFIG_INTERMEZZO_FS is not set
   7.343 +CONFIG_NFS_FS=y
   7.344 +CONFIG_NFS_V3=y
   7.345 +# CONFIG_NFS_DIRECTIO is not set
   7.346 +CONFIG_ROOT_NFS=y
   7.347 +CONFIG_NFSD=y
   7.348 +CONFIG_NFSD_V3=y
   7.349 +# CONFIG_NFSD_TCP is not set
   7.350 +CONFIG_SUNRPC=y
   7.351 +CONFIG_LOCKD=y
   7.352 +CONFIG_LOCKD_V4=y
   7.353 +# CONFIG_SMB_FS is not set
   7.354 +# CONFIG_NCP_FS is not set
   7.355 +# CONFIG_NCPFS_PACKET_SIGNING is not set
   7.356 +# CONFIG_NCPFS_IOCTL_LOCKING is not set
   7.357 +# CONFIG_NCPFS_STRONG is not set
   7.358 +# CONFIG_NCPFS_NFS_NS is not set
   7.359 +# CONFIG_NCPFS_OS2_NS is not set
   7.360 +# CONFIG_NCPFS_SMALLDOS is not set
   7.361 +# CONFIG_NCPFS_NLS is not set
   7.362 +# CONFIG_NCPFS_EXTRAS is not set
   7.363 +CONFIG_ZISOFS_FS=y
   7.364 +
   7.365 +#
   7.366 +# Partition Types
   7.367 +#
   7.368 +CONFIG_PARTITION_ADVANCED=y
   7.369 +# CONFIG_ACORN_PARTITION is not set
   7.370 +# CONFIG_OSF_PARTITION is not set
   7.371 +# CONFIG_AMIGA_PARTITION is not set
   7.372 +# CONFIG_ATARI_PARTITION is not set
   7.373 +# CONFIG_MAC_PARTITION is not set
   7.374 +CONFIG_MSDOS_PARTITION=y
   7.375 +# CONFIG_BSD_DISKLABEL is not set
   7.376 +# CONFIG_MINIX_SUBPARTITION is not set
   7.377 +# CONFIG_SOLARIS_X86_PARTITION is not set
   7.378 +# CONFIG_UNIXWARE_DISKLABEL is not set
   7.379 +# CONFIG_LDM_PARTITION is not set
   7.380 +# CONFIG_SGI_PARTITION is not set
   7.381 +# CONFIG_ULTRIX_PARTITION is not set
   7.382 +# CONFIG_SUN_PARTITION is not set
   7.383 +# CONFIG_EFI_PARTITION is not set
   7.384 +# CONFIG_SMB_NLS is not set
   7.385 +CONFIG_NLS=y
   7.386 +
   7.387 +#
   7.388 +# Native Language Support
   7.389 +#
   7.390 +CONFIG_NLS_DEFAULT="iso8559-1"
   7.391 +# CONFIG_NLS_CODEPAGE_437 is not set
   7.392 +# CONFIG_NLS_CODEPAGE_737 is not set
   7.393 +# CONFIG_NLS_CODEPAGE_775 is not set
   7.394 +# CONFIG_NLS_CODEPAGE_850 is not set
   7.395 +# CONFIG_NLS_CODEPAGE_852 is not set
   7.396 +# CONFIG_NLS_CODEPAGE_855 is not set
   7.397 +# CONFIG_NLS_CODEPAGE_857 is not set
   7.398 +# CONFIG_NLS_CODEPAGE_860 is not set
   7.399 +# CONFIG_NLS_CODEPAGE_861 is not set
   7.400 +# CONFIG_NLS_CODEPAGE_862 is not set
   7.401 +# CONFIG_NLS_CODEPAGE_863 is not set
   7.402 +# CONFIG_NLS_CODEPAGE_864 is not set
   7.403 +# CONFIG_NLS_CODEPAGE_865 is not set
   7.404 +# CONFIG_NLS_CODEPAGE_866 is not set
   7.405 +# CONFIG_NLS_CODEPAGE_869 is not set
   7.406 +# CONFIG_NLS_CODEPAGE_936 is not set
   7.407 +# CONFIG_NLS_CODEPAGE_950 is not set
   7.408 +# CONFIG_NLS_CODEPAGE_932 is not set
   7.409 +# CONFIG_NLS_CODEPAGE_949 is not set
   7.410 +# CONFIG_NLS_CODEPAGE_874 is not set
   7.411 +# CONFIG_NLS_ISO8859_8 is not set
   7.412 +# CONFIG_NLS_CODEPAGE_1250 is not set
   7.413 +# CONFIG_NLS_CODEPAGE_1251 is not set
   7.414 +CONFIG_NLS_ISO8859_1=y
   7.415 +# CONFIG_NLS_ISO8859_2 is not set
   7.416 +# CONFIG_NLS_ISO8859_3 is not set
   7.417 +# CONFIG_NLS_ISO8859_4 is not set
   7.418 +# CONFIG_NLS_ISO8859_5 is not set
   7.419 +# CONFIG_NLS_ISO8859_6 is not set
   7.420 +# CONFIG_NLS_ISO8859_7 is not set
   7.421 +# CONFIG_NLS_ISO8859_9 is not set
   7.422 +# CONFIG_NLS_ISO8859_13 is not set
   7.423 +# CONFIG_NLS_ISO8859_14 is not set
   7.424 +# CONFIG_NLS_ISO8859_15 is not set
   7.425 +# CONFIG_NLS_KOI8_R is not set
   7.426 +# CONFIG_NLS_KOI8_U is not set
   7.427 +# CONFIG_NLS_UTF8 is not set
   7.428 +
   7.429 +#
   7.430 +# Console drivers
   7.431 +#
   7.432 +CONFIG_XEN_CONSOLE=y
   7.433 +CONFIG_VGA_CONSOLE=y
   7.434 +CONFIG_DUMMY_CONSOLE=y
   7.435 +
   7.436 +#
   7.437 +# Kernel hacking
   7.438 +#
   7.439 +CONFIG_DEBUG_KERNEL=y
   7.440 +# CONFIG_DEBUG_HIGHMEM is not set
   7.441 +# CONFIG_DEBUG_SLAB is not set
   7.442 +# CONFIG_DEBUG_IOVIRT is not set
   7.443 +# CONFIG_MAGIC_SYSRQ is not set
   7.444 +# CONFIG_DEBUG_SPINLOCK is not set
   7.445 +# CONFIG_DEBUG_BUGVERBOSE is not set
   7.446 +CONFIG_KALLSYMS=y
   7.447 +# CONFIG_FRAME_POINTER is not set
   7.448 +CONFIG_LOG_BUF_SHIFT=0
   7.449 +
   7.450 +#
   7.451 +# Cryptographic options
   7.452 +#
   7.453 +# CONFIG_CRYPTO is not set
   7.454 +
   7.455 +#
   7.456 +# Library routines
   7.457 +#
   7.458 +# CONFIG_CRC32 is not set
   7.459 +CONFIG_ZLIB_INFLATE=y
   7.460 +# CONFIG_ZLIB_DEFLATE is not set
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/balloon/Makefile	Tue Mar 23 10:40:28 2004 +0000
     8.3 @@ -0,0 +1,3 @@
     8.4 +O_TARGET := drv.o
     8.5 +obj-y := balloon.o
     8.6 +include $(TOPDIR)/Rules.make
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/balloon/balloon.c	Tue Mar 23 10:40:28 2004 +0000
     9.3 @@ -0,0 +1,282 @@
     9.4 +/******************************************************************************
     9.5 + * balloon.c
     9.6 + *
     9.7 + * Xen balloon driver - enables returning/claiming memory to/from Xen.
     9.8 + *
     9.9 + * Copyright (c) 2003, B Dragovic
    9.10 + */
    9.11 +
    9.12 +#include <linux/config.h>
    9.13 +#include <linux/module.h>
    9.14 +#include <linux/kernel.h>
    9.15 +#include <linux/sched.h>
    9.16 +#include <linux/errno.h>
    9.17 +#include <asm/xen_proc.h>
    9.18 +
    9.19 +#include <linux/mm.h>
    9.20 +#include <linux/mman.h>
    9.21 +#include <linux/smp_lock.h>
    9.22 +#include <linux/pagemap.h>
    9.23 +
    9.24 +#include <asm/hypervisor.h>
    9.25 +#include <asm/pgalloc.h>
    9.26 +#include <asm/pgtable.h>
    9.27 +#include <asm/uaccess.h>
    9.28 +#include <asm/tlb.h>
    9.29 +
    9.30 +#include <asm/hypervisor-ifs/dom_mem_ops.h>
    9.31 +
    9.32 +/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
    9.33 +#define USER_INFLATE_BALLOON  1   /* return mem to hypervisor */
    9.34 +#define USER_DEFLATE_BALLOON  2   /* claim mem from hypervisor */
    9.35 +typedef struct user_balloon_op {
    9.36 +    unsigned int  op;
    9.37 +    unsigned long size;
    9.38 +} user_balloon_op_t;
    9.39 +/* END OF USER DEFINE */
    9.40 +
    9.41 +/* Dead entry written into ballon-owned entries in the PMT. */
    9.42 +#define DEAD 0xdeadbeef
    9.43 +
    9.44 +static struct proc_dir_entry *balloon_pde;
    9.45 +unsigned long credit;
    9.46 +
    9.47 +static inline pte_t *get_ptep(unsigned long addr)
    9.48 +{
    9.49 +    pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
    9.50 +    pgd = pgd_offset_k(addr);
    9.51 +
    9.52 +    if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
    9.53 +
    9.54 +    pmd = pmd_offset(pgd, addr);
    9.55 +    if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
    9.56 +
    9.57 +    ptep = pte_offset(pmd, addr);
    9.58 +
    9.59 +    return ptep;
    9.60 +}
    9.61 +
    9.62 +/* main function for relinquishing bit of memory */
    9.63 +static unsigned long inflate_balloon(unsigned long num_pages)
    9.64 +{
    9.65 +    dom_mem_op_t dom_mem_op;
    9.66 +    unsigned long *parray;
    9.67 +    unsigned long *currp;
    9.68 +    unsigned long curraddr;
    9.69 +    unsigned long ret = 0;
    9.70 +    unsigned long vaddr;
    9.71 +    unsigned long i, j;
    9.72 +
    9.73 +    parray = (unsigned long *)kmalloc(num_pages *
    9.74 +                                      sizeof(unsigned long), GFP_KERNEL);
    9.75 +    currp = parray;
    9.76 +
    9.77 +    for ( i = 0; i < num_pages; i++ )
    9.78 +    {
    9.79 +        /* try to obtain a free page, has to be done with GFP_ATOMIC
    9.80 +         * as we do not want to sleep indefinately.
    9.81 +         */
    9.82 +        vaddr = __get_free_page(GFP_ATOMIC);
    9.83 +
    9.84 +        /* if allocation fails, free all reserved pages */
    9.85 +        if(!vaddr){
    9.86 +            printk("Unable to inflate balloon by %ld, only %ld pages free.",
    9.87 +                   num_pages, i);
    9.88 +            currp = parray;
    9.89 +            for(j = 0; j < i; j++){
    9.90 +                free_page(*currp++);
    9.91 +            }
    9.92 +            goto cleanup;
    9.93 +        }
    9.94 +
    9.95 +        *currp++ = vaddr;
    9.96 +    }
    9.97 +
    9.98 +
    9.99 +    currp = parray;
   9.100 +    for ( i = 0; i < num_pages; i++ )
   9.101 +    {
   9.102 +        curraddr = *currp;
   9.103 +        *currp = virt_to_machine(*currp) >> PAGE_SHIFT;
   9.104 +        queue_l1_entry_update(get_ptep(curraddr), 0);
   9.105 +        phys_to_machine_mapping[__pa(curraddr) >> PAGE_SHIFT] = DEAD;
   9.106 +        currp++;
   9.107 +    }
   9.108 +
   9.109 +    XEN_flush_page_update_queue();
   9.110 +
   9.111 +    dom_mem_op.op = MEMOP_RESERVATION_DECREASE;
   9.112 +    dom_mem_op.u.decrease.size  = num_pages;
   9.113 +    dom_mem_op.u.decrease.pages = parray;
   9.114 +    if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != num_pages )
   9.115 +    {
   9.116 +        printk("Unable to inflate balloon, error %lx\n", ret);
   9.117 +        goto cleanup;
   9.118 +    }
   9.119 +
   9.120 +    credit += num_pages;
   9.121 +    ret = num_pages;
   9.122 +
   9.123 + cleanup:
   9.124 +    kfree(parray);
   9.125 +
   9.126 +    return ret;
   9.127 +}
   9.128 +
   9.129 +/* install new mem pages obtained by deflate_balloon. function walks 
   9.130 + * phys->machine mapping table looking for DEAD entries and populates
   9.131 + * them.
   9.132 + */
   9.133 +static unsigned long process_new_pages(unsigned long * parray, 
   9.134 +                                       unsigned long num)
   9.135 +{
   9.136 +    /* currently, this function is rather simplistic as 
   9.137 +     * it is assumed that domain reclaims only number of 
   9.138 +     * pages previously released. this is to change soon
   9.139 +     * and the code to extend page tables etc. will be 
   9.140 +     * incorporated here.
   9.141 +     */
   9.142 +     
   9.143 +    unsigned long tot_pages = start_info.nr_pages;   
   9.144 +    unsigned long * curr = parray;
   9.145 +    unsigned long num_installed;
   9.146 +    unsigned long i;
   9.147 +
   9.148 +    num_installed = 0;
   9.149 +    for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
   9.150 +    {
   9.151 +        if ( phys_to_machine_mapping[i] == DEAD )
   9.152 +        {
   9.153 +            phys_to_machine_mapping[i] = *curr;
   9.154 +            queue_l1_entry_update(
   9.155 +                (pte_t *)((i << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE), i);
   9.156 +            queue_l1_entry_update(
   9.157 +                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
   9.158 +                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
   9.159 +
   9.160 +            *curr = (unsigned long)__va(i << PAGE_SHIFT);
   9.161 +            curr++;
   9.162 +            num_installed++;
   9.163 +        }
   9.164 +    }
   9.165 +
   9.166 +    /* now, this is tricky (and will also change for machine addrs that 
   9.167 +      * are mapped to not previously released addresses). we free pages
   9.168 +      * that were allocated by get_free_page (the mappings are different 
   9.169 +      * now, of course).
   9.170 +      */
   9.171 +    curr = parray;
   9.172 +    for ( i = 0; i < num_installed; i++ )
   9.173 +    {
   9.174 +        free_page(*curr);
   9.175 +        curr++;
   9.176 +    }
   9.177 +
   9.178 +    return num_installed;
   9.179 +}
   9.180 +
   9.181 +unsigned long deflate_balloon(unsigned long num_pages)
   9.182 +{
   9.183 +    dom_mem_op_t dom_mem_op;
   9.184 +    unsigned long ret;
   9.185 +    unsigned long * parray;
   9.186 +
   9.187 +    printk(KERN_ALERT "bd240 debug: deflate balloon called for %lx pages\n", num_pages);
   9.188 +
   9.189 +    if ( num_pages > credit )
   9.190 +    {
   9.191 +        printk("Can not allocate more pages than previously released.\n");
   9.192 +        return -EAGAIN;
   9.193 +    }
   9.194 +
   9.195 +    parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long), 
   9.196 +                                      GFP_KERNEL);
   9.197 +
   9.198 +    dom_mem_op.op = MEMOP_RESERVATION_INCREASE;
   9.199 +    dom_mem_op.u.increase.size = num_pages;
   9.200 +    dom_mem_op.u.increase.pages = parray;
   9.201 +    if((ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != num_pages){
   9.202 +        printk("Unable to deflate balloon, error %lx\n", ret);
   9.203 +        goto cleanup;
   9.204 +    }
   9.205 +
   9.206 +    if((ret = process_new_pages(parray, num_pages)) < num_pages){
   9.207 +        printk("Unable to deflate balloon by specified %lx pages, only %lx.\n",
   9.208 +               num_pages, ret);
   9.209 +        goto cleanup;
   9.210 +    }
   9.211 +
   9.212 +    ret = num_pages;
   9.213 +    credit -= num_pages;
   9.214 +
   9.215 + cleanup:
   9.216 +    kfree(parray);
   9.217 +
   9.218 +    return ret;
   9.219 +}
   9.220 +
   9.221 +static int balloon_write(struct file *file, const char *buffer,
   9.222 +                         u_long count, void *data)
   9.223 +{
   9.224 +    user_balloon_op_t bop;
   9.225 +
   9.226 +    /* Only admin can play with the balloon :) */
   9.227 +    if ( !capable(CAP_SYS_ADMIN) )
   9.228 +        return -EPERM;
   9.229 +
   9.230 +    if ( copy_from_user(&bop, buffer, sizeof(bop)) )
   9.231 +        return -EFAULT;
   9.232 +
   9.233 +    switch ( bop.op )
   9.234 +    {
   9.235 +    case USER_INFLATE_BALLOON:
   9.236 +        if ( inflate_balloon(bop.size) < bop.size )
   9.237 +            return -EAGAIN;
   9.238 +        break;
   9.239 +        
   9.240 +    case USER_DEFLATE_BALLOON:
   9.241 +        deflate_balloon(bop.size);
   9.242 +        break;
   9.243 +
   9.244 +    default:
   9.245 +        printk("Unknown command to balloon driver.");
   9.246 +        return -EFAULT;
   9.247 +    }
   9.248 +
   9.249 +    return sizeof(bop);
   9.250 +}
   9.251 +
   9.252 +/*
   9.253 + * main balloon driver initialization function.
   9.254 + */
   9.255 +static int __init init_module(void)
   9.256 +{
   9.257 +    printk(KERN_ALERT "Starting Xen Balloon driver\n");
   9.258 +
   9.259 +    credit = 0;
   9.260 +
   9.261 +    balloon_pde = create_xen_proc_entry("balloon", 0600);
   9.262 +    if ( balloon_pde == NULL )
   9.263 +    {
   9.264 +        printk(KERN_ALERT "Unable to create balloon driver proc entry!");
   9.265 +        return -1;
   9.266 +    }
   9.267 +
   9.268 +    balloon_pde->write_proc = balloon_write;
   9.269 +
   9.270 +    return 0;
   9.271 +}
   9.272 +
   9.273 +static void __exit cleanup_module(void)
   9.274 +{
   9.275 +    if ( balloon_pde != NULL )
   9.276 +    {
   9.277 +        remove_xen_proc_entry("balloon");
   9.278 +        balloon_pde = NULL;
   9.279 +    }
   9.280 +}
   9.281 +
   9.282 +module_init(init_module);
   9.283 +module_exit(cleanup_module);
   9.284 +
   9.285 +
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/block/Makefile	Tue Mar 23 10:40:28 2004 +0000
    10.3 @@ -0,0 +1,3 @@
    10.4 +O_TARGET := drv.o
    10.5 +obj-y := block.o vbd.o
    10.6 +include $(TOPDIR)/Rules.make
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/block/block.c	Tue Mar 23 10:40:28 2004 +0000
    11.3 @@ -0,0 +1,621 @@
    11.4 +/******************************************************************************
    11.5 + * block.c
    11.6 + * 
    11.7 + * Xenolinux virtual block-device driver.
    11.8 + * 
    11.9 + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
   11.10 + * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
   11.11 + */
   11.12 +
   11.13 +#include "block.h"
   11.14 +#include <linux/blk.h>
   11.15 +#include <linux/cdrom.h>
   11.16 +#include <linux/tqueue.h>
   11.17 +#include <linux/sched.h>
   11.18 +#include <scsi/scsi.h>
   11.19 +
   11.20 +#include <linux/interrupt.h>
   11.21 +
   11.22 +typedef unsigned char byte; /* from linux/ide.h */
   11.23 +
   11.24 +#define XLBLK_RESPONSE_IRQ HYPEREVENT_IRQ(_EVENT_BLKDEV)
   11.25 +#define XLBLK_UPDATE_IRQ   HYPEREVENT_IRQ(_EVENT_VBD_UPD)
   11.26 +#define DEBUG_IRQ          HYPEREVENT_IRQ(_EVENT_DEBUG)
   11.27 +
   11.28 +#define STATE_ACTIVE    0
   11.29 +#define STATE_SUSPENDED 1
   11.30 +#define STATE_CLOSED    2
   11.31 +static unsigned int state = STATE_SUSPENDED;
   11.32 +
   11.33 +static blk_ring_t *blk_ring;
   11.34 +static BLK_RING_IDX resp_cons; /* Response consumer for comms ring. */
   11.35 +static BLK_RING_IDX req_prod;  /* Private request producer.         */
   11.36 +
   11.37 +/* We plug the I/O ring if the driver is suspended or if the ring is full. */
   11.38 +#define RING_PLUGGED (((req_prod - resp_cons) == BLK_RING_SIZE) || \
   11.39 +                      (state != STATE_ACTIVE))
   11.40 +
   11.41 +
   11.42 +/*
   11.43 + * Request queues with outstanding work, but ring is currently full.
   11.44 + * We need no special lock here, as we always access this with the
   11.45 + * io_request_lock held. We only need a small maximum list.
   11.46 + */
   11.47 +#define MAX_PENDING 8
   11.48 +static request_queue_t *pending_queues[MAX_PENDING];
   11.49 +static int nr_pending;
   11.50 +
   11.51 +static kdev_t        sg_dev;
   11.52 +static int           sg_operation = -1;
   11.53 +static unsigned long sg_next_sect;
   11.54 +#define DISABLE_SCATTERGATHER() (sg_operation = -1)
   11.55 +
   11.56 +static inline void signal_requests_to_xen(void)
   11.57 +{
   11.58 +    block_io_op_t op; 
   11.59 +
   11.60 +    DISABLE_SCATTERGATHER();
   11.61 +    blk_ring->req_prod = req_prod;
   11.62 +
   11.63 +    op.cmd = BLOCK_IO_OP_SIGNAL; 
   11.64 +    HYPERVISOR_block_io_op(&op);
   11.65 +    return;
   11.66 +}
   11.67 +
   11.68 +
   11.69 +/*
   11.70 + * xlblk_update_int/update-vbds_task - handle VBD update events from Xen
   11.71 + * 
   11.72 + * Schedule a task for keventd to run, which will update the VBDs and perform 
   11.73 + * the corresponding updates to our view of VBD state, so the XenoLinux will 
   11.74 + * respond to changes / additions / deletions to the set of VBDs automatically.
   11.75 + */
   11.76 +static struct tq_struct update_tq;
   11.77 +static void update_vbds_task(void *unused)
   11.78 +{ 
   11.79 +    xlvbd_update_vbds();
   11.80 +}
   11.81 +static void xlblk_update_int(int irq, void *dev_id, struct pt_regs *ptregs)
   11.82 +{
   11.83 +    update_tq.routine = update_vbds_task;
   11.84 +    schedule_task(&update_tq);
   11.85 +}
   11.86 +
   11.87 +
   11.88 +int xen_block_open(struct inode *inode, struct file *filep)
   11.89 +{
   11.90 +    short xldev = inode->i_rdev; 
   11.91 +    struct gendisk *gd = get_gendisk(xldev);
   11.92 +    xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
   11.93 +    short minor = MINOR(xldev); 
   11.94 +
   11.95 +    if ( gd->part[minor].nr_sects == 0 )
   11.96 +    { 
   11.97 +        /*
   11.98 +         * Device either doesn't exist, or has zero capacity; we use a few
   11.99 +         * cheesy heuristics to return the relevant error code
  11.100 +         */
  11.101 +        if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
  11.102 +             ((minor & (gd->max_p - 1)) != 0) )
  11.103 +        { 
  11.104 +            /*
  11.105 +             * We have a real device, but no such partition, or we just have a
  11.106 +             * partition number so guess this is the problem.
  11.107 +             */
  11.108 +            return -ENXIO;     /* no such device or address */
  11.109 +        }
  11.110 +        else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
  11.111 +        {
  11.112 +            /* This is a removable device => assume that media is missing. */ 
  11.113 +            return -ENOMEDIUM; /* media not present (this is a guess) */
  11.114 +        } 
  11.115 +        else
  11.116 +        { 
  11.117 +            /* Just go for the general 'no such device' error. */
  11.118 +            return -ENODEV;    /* no such device */
  11.119 +        }
  11.120 +    }
  11.121 +    
  11.122 +    /* Update of usage count is protected by per-device semaphore. */
  11.123 +    disk->usage++;
  11.124 +
  11.125 +    return 0;
  11.126 +}
  11.127 +
  11.128 +
  11.129 +int xen_block_release(struct inode *inode, struct file *filep)
  11.130 +{
  11.131 +    xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
  11.132 +
  11.133 +    /*
  11.134 +     * When usage drops to zero it may allow more VBD updates to occur.
  11.135 +     * Update of usage count is protected by a per-device semaphore.
  11.136 +     */
  11.137 +    if ( --disk->usage == 0 )
  11.138 +    {
  11.139 +        update_tq.routine = update_vbds_task;
  11.140 +        schedule_task(&update_tq);
  11.141 +    }
  11.142 +
  11.143 +    return 0;
  11.144 +}
  11.145 +
  11.146 +
  11.147 +int xen_block_ioctl(struct inode *inode, struct file *filep,
  11.148 +                          unsigned command, unsigned long argument)
  11.149 +{
  11.150 +    kdev_t dev = inode->i_rdev;
  11.151 +    struct hd_geometry *geo = (struct hd_geometry *)argument;
  11.152 +    struct gendisk *gd;     
  11.153 +    struct hd_struct *part; 
  11.154 +    int i;
  11.155 +
  11.156 +    /* NB. No need to check permissions. That is done for us. */
  11.157 +    
  11.158 +    DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
  11.159 +                  command, (long) argument, dev); 
  11.160 +  
  11.161 +    gd = get_gendisk(dev);
  11.162 +    part = &gd->part[MINOR(dev)]; 
  11.163 +
  11.164 +    switch ( command )
  11.165 +    {
  11.166 +    case BLKGETSIZE:
  11.167 +        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 
  11.168 +        return put_user(part->nr_sects, (unsigned long *) argument);
  11.169 +
  11.170 +    case BLKGETSIZE64:
  11.171 +        DPRINTK_IOCTL("   BLKGETSIZE64: %x %llx\n", BLKGETSIZE64,
  11.172 +                      (u64)part->nr_sects * 512);
  11.173 +        return put_user((u64)part->nr_sects * 512, (u64 *) argument);
  11.174 +
  11.175 +    case BLKRRPART:                               /* re-read partition table */
  11.176 +        DPRINTK_IOCTL("   BLKRRPART: %x\n", BLKRRPART);
  11.177 +        return xen_block_revalidate(dev);
  11.178 +
  11.179 +    case BLKSSZGET:
  11.180 +        return hardsect_size[MAJOR(dev)][MINOR(dev)]; 
  11.181 +
  11.182 +    case BLKBSZGET:                                        /* get block size */
  11.183 +        DPRINTK_IOCTL("   BLKBSZGET: %x\n", BLKBSZGET);
  11.184 +        break;
  11.185 +
  11.186 +    case BLKBSZSET:                                        /* set block size */
  11.187 +        DPRINTK_IOCTL("   BLKBSZSET: %x\n", BLKBSZSET);
  11.188 +        break;
  11.189 +
  11.190 +    case BLKRASET:                                         /* set read-ahead */
  11.191 +        DPRINTK_IOCTL("   BLKRASET: %x\n", BLKRASET);
  11.192 +        break;
  11.193 +
  11.194 +    case BLKRAGET:                                         /* get read-ahead */
  11.195 +        DPRINTK_IOCTL("   BLKRAFET: %x\n", BLKRAGET);
  11.196 +        break;
  11.197 +
  11.198 +    case HDIO_GETGEO:
  11.199 +        /* note: these values are complete garbage */
  11.200 +        DPRINTK_IOCTL("   HDIO_GETGEO: %x\n", HDIO_GETGEO);
  11.201 +        if (!argument) return -EINVAL;
  11.202 +        if (put_user(0x00,  (unsigned long *) &geo->start)) return -EFAULT;
  11.203 +        if (put_user(0xff,  (byte *)&geo->heads)) return -EFAULT;
  11.204 +        if (put_user(0x3f,  (byte *)&geo->sectors)) return -EFAULT;
  11.205 +        if (put_user(0x106, (unsigned short *)&geo->cylinders)) return -EFAULT;
  11.206 +        return 0;
  11.207 +
  11.208 +    case HDIO_GETGEO_BIG: 
  11.209 +        /* note: these values are complete garbage */
  11.210 +        DPRINTK_IOCTL("   HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
  11.211 +        if (!argument) return -EINVAL;
  11.212 +        if (put_user(0x00,  (unsigned long *) &geo->start))  return -EFAULT;
  11.213 +        if (put_user(0xff,  (byte *)&geo->heads))   return -EFAULT;
  11.214 +        if (put_user(0x3f,  (byte *)&geo->sectors)) return -EFAULT;
  11.215 +        if (put_user(0x106, (unsigned int *) &geo->cylinders)) return -EFAULT;
  11.216 +        return 0;
  11.217 +
  11.218 +    case CDROMMULTISESSION:
  11.219 +        DPRINTK("FIXME: support multisession CDs later\n");
  11.220 +        for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
  11.221 +            if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
  11.222 +        return 0;
  11.223 +
  11.224 +    case SCSI_IOCTL_GET_BUS_NUMBER:
  11.225 +        DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in Xen blkdev");
  11.226 +        return -ENOSYS;
  11.227 +
  11.228 +    default:
  11.229 +        printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);
  11.230 +        return -ENOSYS;
  11.231 +    }
  11.232 +    
  11.233 +    return 0;
  11.234 +}
  11.235 +
  11.236 +/* check media change: should probably do something here in some cases :-) */
  11.237 +int xen_block_check(kdev_t dev)
  11.238 +{
  11.239 +    DPRINTK("xen_block_check\n");
  11.240 +    return 0;
  11.241 +}
  11.242 +
  11.243 +int xen_block_revalidate(kdev_t dev)
  11.244 +{
  11.245 +    struct block_device *bd;
  11.246 +    struct gendisk *gd;
  11.247 +    xl_disk_t *disk;
  11.248 +    unsigned long capacity;
  11.249 +    int i, rc = 0;
  11.250 +    
  11.251 +    if ( (bd = bdget(dev)) == NULL )
  11.252 +        return -EINVAL;
  11.253 +
  11.254 +    /*
  11.255 +     * Update of partition info, and check of usage count, is protected
  11.256 +     * by the per-block-device semaphore.
  11.257 +     */
  11.258 +    down(&bd->bd_sem);
  11.259 +
  11.260 +    if ( ((gd = get_gendisk(dev)) == NULL) ||
  11.261 +         ((disk = xldev_to_xldisk(dev)) == NULL) ||
  11.262 +         ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
  11.263 +    {
  11.264 +        rc = -EINVAL;
  11.265 +        goto out;
  11.266 +    }
  11.267 +
  11.268 +    if ( disk->usage > 1 )
  11.269 +    {
  11.270 +        rc = -EBUSY;
  11.271 +        goto out;
  11.272 +    }
  11.273 +
  11.274 +    /* Only reread partition table if VBDs aren't mapped to partitions. */
  11.275 +    if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
  11.276 +    {
  11.277 +        for ( i = gd->max_p - 1; i >= 0; i-- )
  11.278 +        {
  11.279 +            invalidate_device(dev+i, 1);
  11.280 +            gd->part[MINOR(dev+i)].start_sect = 0;
  11.281 +            gd->part[MINOR(dev+i)].nr_sects   = 0;
  11.282 +            gd->sizes[MINOR(dev+i)]           = 0;
  11.283 +        }
  11.284 +
  11.285 +        grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
  11.286 +    }
  11.287 +
  11.288 + out:
  11.289 +    up(&bd->bd_sem);
  11.290 +    bdput(bd);
  11.291 +    return rc;
  11.292 +}
  11.293 +
  11.294 +
  11.295 +/*
  11.296 + * hypervisor_request
  11.297 + *
  11.298 + * request block io 
  11.299 + * 
  11.300 + * id: for guest use only.
  11.301 + * operation: XEN_BLOCK_{READ,WRITE,PROBE,VBD*}
  11.302 + * buffer: buffer to read/write into. this should be a
  11.303 + *   virtual address in the guest os.
  11.304 + */
  11.305 +static int hypervisor_request(unsigned long   id,
  11.306 +                              int             operation,
  11.307 +                              char *          buffer,
  11.308 +                              unsigned long   sector_number,
  11.309 +                              unsigned short  nr_sectors,
  11.310 +                              kdev_t          device)
  11.311 +{
  11.312 +    unsigned long buffer_ma = phys_to_machine(virt_to_phys(buffer)); 
  11.313 +    struct gendisk *gd;
  11.314 +    blk_ring_req_entry_t *req;
  11.315 +    struct buffer_head *bh;
  11.316 +
  11.317 +    if ( unlikely(nr_sectors >= (1<<9)) )
  11.318 +        BUG();
  11.319 +    if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) )
  11.320 +        BUG();
  11.321 +
  11.322 +    if ( unlikely(state == STATE_CLOSED) )
  11.323 +        return 1;
  11.324 +
  11.325 +    switch ( operation )
  11.326 +    {
  11.327 +
  11.328 +    case XEN_BLOCK_READ:
  11.329 +    case XEN_BLOCK_WRITE:
  11.330 +        gd = get_gendisk(device); 
  11.331 +
  11.332 +        /*
  11.333 +         * Update the sector_number we'll pass down as appropriate; note that
  11.334 +         * we could sanity check that resulting sector will be in this
  11.335 +         * partition, but this will happen in xen anyhow.
  11.336 +         */
  11.337 +        sector_number += gd->part[MINOR(device)].start_sect;
  11.338 +
  11.339 +        /*
  11.340 +         * If this unit doesn't consist of virtual (i.e., Xen-specified)
  11.341 +         * partitions then we clear the partn bits from the device number.
  11.342 +         */
  11.343 +        if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & 
  11.344 +               GENHD_FL_VIRT_PARTNS) )
  11.345 +            device &= ~(gd->max_p - 1);
  11.346 +
  11.347 +        if ( (sg_operation == operation) &&
  11.348 +             (sg_dev == device) &&
  11.349 +             (sg_next_sect == sector_number) )
  11.350 +        {
  11.351 +            req = &blk_ring->ring[MASK_BLK_IDX(req_prod-1)].req;
  11.352 +            bh = (struct buffer_head *)id;
  11.353 +            bh->b_reqnext = (struct buffer_head *)req->id;
  11.354 +            req->id = id;
  11.355 +            req->buffer_and_sects[req->nr_segments] = buffer_ma | nr_sectors;
  11.356 +            if ( ++req->nr_segments < MAX_BLK_SEGS )
  11.357 +                sg_next_sect += nr_sectors;
  11.358 +            else
  11.359 +                DISABLE_SCATTERGATHER();
  11.360 +            return 0;
  11.361 +        }
  11.362 +        else if ( RING_PLUGGED )
  11.363 +        {
  11.364 +            return 1;
  11.365 +        }
  11.366 +        else
  11.367 +        {
  11.368 +            sg_operation = operation;
  11.369 +            sg_dev       = device;
  11.370 +            sg_next_sect = sector_number + nr_sectors;
  11.371 +        }
  11.372 +        break;
  11.373 +
  11.374 +    default:
  11.375 +        panic("unknown op %d\n", operation);
  11.376 +    }
  11.377 +
  11.378 +    /* Fill out a communications ring structure. */
  11.379 +    req = &blk_ring->ring[MASK_BLK_IDX(req_prod)].req;
  11.380 +    req->id            = id;
  11.381 +    req->operation     = operation;
  11.382 +    req->sector_number = (xen_sector_t)sector_number;
  11.383 +    req->device        = device; 
  11.384 +    req->nr_segments   = 1;
  11.385 +    req->buffer_and_sects[0] = buffer_ma | nr_sectors;
  11.386 +    req_prod++;
  11.387 +
  11.388 +    return 0;
  11.389 +}
  11.390 +
  11.391 +
  11.392 +/*
  11.393 + * do_xlblk_request
  11.394 + *  read a block; request is in a request queue
  11.395 + */
  11.396 +void do_xlblk_request(request_queue_t *rq)
  11.397 +{
  11.398 +    struct request *req;
  11.399 +    struct buffer_head *bh, *next_bh;
  11.400 +    int rw, nsect, full, queued = 0;
  11.401 +
  11.402 +    DPRINTK("xlblk.c::do_xlblk_request\n"); 
  11.403 +
  11.404 +    while ( !rq->plugged && !list_empty(&rq->queue_head))
  11.405 +    {
  11.406 +        if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 
  11.407 +            goto out;
  11.408 +  
  11.409 +        DPRINTK("do_xlblk_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
  11.410 +                req, req->cmd, req->sector,
  11.411 +                req->current_nr_sectors, req->nr_sectors, req->bh);
  11.412 +
  11.413 +        rw = req->cmd;
  11.414 +        if ( rw == READA )
  11.415 +            rw = READ;
  11.416 +        if ( unlikely((rw != READ) && (rw != WRITE)) )
  11.417 +            panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw);
  11.418 +
  11.419 +        req->errors = 0;
  11.420 +
  11.421 +        bh = req->bh;
  11.422 +        while ( bh != NULL )
  11.423 +        {
  11.424 +            next_bh = bh->b_reqnext;
  11.425 +            bh->b_reqnext = NULL;
  11.426 +
  11.427 +            full = hypervisor_request(
  11.428 +                (unsigned long)bh,
  11.429 +                (rw == READ) ? XEN_BLOCK_READ : XEN_BLOCK_WRITE, 
  11.430 +                bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
  11.431 +
  11.432 +            if ( full )
  11.433 +            { 
  11.434 +                bh->b_reqnext = next_bh;
  11.435 +                pending_queues[nr_pending++] = rq;
  11.436 +                if ( unlikely(nr_pending >= MAX_PENDING) )
  11.437 +                    BUG();
  11.438 +                goto out; 
  11.439 +            }
  11.440 +
  11.441 +            queued++;
  11.442 +
  11.443 +            /* Dequeue the buffer head from the request. */
  11.444 +            nsect = bh->b_size >> 9;
  11.445 +            bh = req->bh = next_bh;
  11.446 +            
  11.447 +            if ( bh != NULL )
  11.448 +            {
  11.449 +                /* There's another buffer head to do. Update the request. */
  11.450 +                req->hard_sector += nsect;
  11.451 +                req->hard_nr_sectors -= nsect;
  11.452 +                req->sector = req->hard_sector;
  11.453 +                req->nr_sectors = req->hard_nr_sectors;
  11.454 +                req->current_nr_sectors = bh->b_size >> 9;
  11.455 +                req->buffer = bh->b_data;
  11.456 +            }
  11.457 +            else
  11.458 +            {
  11.459 +                /* That was the last buffer head. Finalise the request. */
  11.460 +                if ( unlikely(end_that_request_first(req, 1, "XenBlk")) )
  11.461 +                    BUG();
  11.462 +                blkdev_dequeue_request(req);
  11.463 +                end_that_request_last(req);
  11.464 +            }
  11.465 +        }
  11.466 +    }
  11.467 +
  11.468 + out:
  11.469 +    if ( queued != 0 ) signal_requests_to_xen();
  11.470 +}
  11.471 +
  11.472 +
  11.473 +static void kick_pending_request_queues(void)
  11.474 +{
  11.475 +    /* We kick pending request queues if the ring is reasonably empty. */
  11.476 +    if ( (nr_pending != 0) && 
  11.477 +         ((req_prod - resp_cons) < (BLK_RING_SIZE >> 1)) )
  11.478 +    {
  11.479 +        /* Attempt to drain the queue, but bail if the ring becomes full. */
  11.480 +        while ( (nr_pending != 0) && !RING_PLUGGED )
  11.481 +            do_xlblk_request(pending_queues[--nr_pending]);
  11.482 +    }
  11.483 +}
  11.484 +
  11.485 +
  11.486 +static void xlblk_response_int(int irq, void *dev_id, struct pt_regs *ptregs)
  11.487 +{
  11.488 +    BLK_RING_IDX i; 
  11.489 +    unsigned long flags; 
  11.490 +    struct buffer_head *bh, *next_bh;
  11.491 +    
  11.492 +    if ( unlikely(state == STATE_CLOSED) )
  11.493 +        return;
  11.494 +    
  11.495 +    spin_lock_irqsave(&io_request_lock, flags);     
  11.496 +
  11.497 +    for ( i = resp_cons; i != blk_ring->resp_prod; i++ )
  11.498 +    {
  11.499 +        blk_ring_resp_entry_t *bret = &blk_ring->ring[MASK_BLK_IDX(i)].resp;
  11.500 +        switch ( bret->operation )
  11.501 +        {
  11.502 +        case XEN_BLOCK_READ:
  11.503 +        case XEN_BLOCK_WRITE:
  11.504 +            if ( unlikely(bret->status != 0) )
  11.505 +                DPRINTK("Bad return from blkdev data request: %lx\n",
  11.506 +                        bret->status);
  11.507 +            for ( bh = (struct buffer_head *)bret->id; 
  11.508 +                  bh != NULL; 
  11.509 +                  bh = next_bh )
  11.510 +            {
  11.511 +                next_bh = bh->b_reqnext;
  11.512 +                bh->b_reqnext = NULL;
  11.513 +                bh->b_end_io(bh, !bret->status);
  11.514 +            }
  11.515 +            break;
  11.516 +     
  11.517 +        default:
  11.518 +            BUG();
  11.519 +        }
  11.520 +    }
  11.521 +    
  11.522 +    resp_cons = i;
  11.523 +
  11.524 +    kick_pending_request_queues();
  11.525 +
  11.526 +    spin_unlock_irqrestore(&io_request_lock, flags);
  11.527 +}
  11.528 +
  11.529 +
  11.530 +static void reset_xlblk_interface(void)
  11.531 +{
  11.532 +    block_io_op_t op; 
  11.533 +
  11.534 +    nr_pending = 0;
  11.535 +
  11.536 +    op.cmd = BLOCK_IO_OP_RESET;
  11.537 +    if ( HYPERVISOR_block_io_op(&op) != 0 )
  11.538 +        printk(KERN_ALERT "Possible blkdev trouble: couldn't reset ring\n");
  11.539 +
  11.540 +    op.cmd = BLOCK_IO_OP_RING_ADDRESS;
  11.541 +    (void)HYPERVISOR_block_io_op(&op);
  11.542 +
  11.543 +    set_fixmap(FIX_BLKRING_BASE, op.u.ring_mfn << PAGE_SHIFT);
  11.544 +    blk_ring = (blk_ring_t *)fix_to_virt(FIX_BLKRING_BASE);
  11.545 +    blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
  11.546 +
  11.547 +    wmb();
  11.548 +    state = STATE_ACTIVE;
  11.549 +}
  11.550 +
  11.551 +
  11.552 +int __init xlblk_init(void)
  11.553 +{
  11.554 +    int error; 
  11.555 +
  11.556 +    reset_xlblk_interface();
  11.557 +
  11.558 +    error = request_irq(XLBLK_RESPONSE_IRQ, xlblk_response_int, 
  11.559 +                        SA_SAMPLE_RANDOM, "blkdev", NULL);
  11.560 +    if ( error )
  11.561 +    {
  11.562 +        printk(KERN_ALERT "Could not allocate receive interrupt\n");
  11.563 +        goto fail;
  11.564 +    }
  11.565 +
  11.566 +    error = request_irq(XLBLK_UPDATE_IRQ, xlblk_update_int,
  11.567 +                        SA_INTERRUPT, "blkdev", NULL);
  11.568 +
  11.569 +    if ( error )
  11.570 +    {
  11.571 +        printk(KERN_ALERT "Could not allocate block update interrupt\n");
  11.572 +        goto fail;
  11.573 +    }
  11.574 +
  11.575 +    (void)xlvbd_init();
  11.576 +
  11.577 +    return 0;
  11.578 +
  11.579 + fail:
  11.580 +    return error;
  11.581 +}
  11.582 +
  11.583 +
  11.584 +static void __exit xlblk_cleanup(void)
  11.585 +{
  11.586 +    xlvbd_cleanup();
  11.587 +    free_irq(XLBLK_RESPONSE_IRQ, NULL);
  11.588 +    free_irq(XLBLK_UPDATE_IRQ, NULL);
  11.589 +}
  11.590 +
  11.591 +
  11.592 +#ifdef MODULE
  11.593 +module_init(xlblk_init);
  11.594 +module_exit(xlblk_cleanup);
  11.595 +#endif
  11.596 +
  11.597 +
  11.598 +void blkdev_suspend(void)
  11.599 +{
  11.600 +    state = STATE_SUSPENDED;
  11.601 +    wmb();
  11.602 +
  11.603 +    while ( resp_cons != blk_ring->req_prod )
  11.604 +    {
  11.605 +        barrier();
  11.606 +        current->state = TASK_INTERRUPTIBLE;
  11.607 +        schedule_timeout(1);
  11.608 +    }
  11.609 +
  11.610 +    wmb();
  11.611 +    state = STATE_CLOSED;
  11.612 +    wmb();
  11.613 +
  11.614 +    clear_fixmap(FIX_BLKRING_BASE);
  11.615 +}
  11.616 +
  11.617 +
  11.618 +void blkdev_resume(void)
  11.619 +{
  11.620 +    reset_xlblk_interface();
  11.621 +    spin_lock_irq(&io_request_lock);
  11.622 +    kick_pending_request_queues();
  11.623 +    spin_unlock_irq(&io_request_lock);
  11.624 +}
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/block/block.h	Tue Mar 23 10:40:28 2004 +0000
    12.3 @@ -0,0 +1,82 @@
    12.4 +/******************************************************************************
    12.5 + * block.h
    12.6 + * 
    12.7 + * Shared definitions between all levels of XenoLinux Virtual block devices.
    12.8 + */
    12.9 +
   12.10 +#ifndef __XEN_DRIVERS_BLOCK_H__
   12.11 +#define __XEN_DRIVERS_BLOCK_H__
   12.12 +
   12.13 +#include <linux/config.h>
   12.14 +#include <linux/module.h>
   12.15 +
   12.16 +#include <linux/kernel.h>
   12.17 +#include <linux/sched.h>
   12.18 +#include <linux/slab.h>
   12.19 +#include <linux/string.h>
   12.20 +#include <linux/errno.h>
   12.21 +
   12.22 +#include <linux/fs.h>
   12.23 +#include <linux/hdreg.h>
   12.24 +#include <linux/blkdev.h>
   12.25 +#include <linux/major.h>
   12.26 +
   12.27 +#include <asm/hypervisor-ifs/hypervisor-if.h>
   12.28 +#include <asm/hypervisor-ifs/vbd.h>
   12.29 +#include <asm/io.h>
   12.30 +#include <asm/atomic.h>
   12.31 +#include <asm/uaccess.h>
   12.32 +
   12.33 +#if 0
   12.34 +#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
   12.35 +#else
   12.36 +#define DPRINTK(_f, _a...) ((void)0)
   12.37 +#endif
   12.38 +
   12.39 +#if 0
   12.40 +#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
   12.41 +#else
   12.42 +#define DPRINTK_IOCTL(_f, _a...) ((void)0)
   12.43 +#endif
   12.44 +
   12.45 +/* Private gendisk->flags[] values. */
   12.46 +#define GENHD_FL_XEN        2 /* Is unit a Xen block device?  */
   12.47 +#define GENHD_FL_VIRT_PARTNS 4 /* Are unit partitions virtual? */
   12.48 +
   12.49 +/*
   12.50 + * We have one of these per vbd, whether ide, scsi or 'other'.
   12.51 + * They hang in an array off the gendisk structure. We may end up putting
   12.52 + * all kinds of interesting stuff here :-)
   12.53 + */
   12.54 +typedef struct xl_disk {
   12.55 +    int usage;
   12.56 +} xl_disk_t;
   12.57 +
   12.58 +extern int xen_control_msg(int operration, char *buffer, int size);
   12.59 +extern int xen_block_open(struct inode *inode, struct file *filep);
   12.60 +extern int xen_block_release(struct inode *inode, struct file *filep);
   12.61 +extern int xen_block_ioctl(struct inode *inode, struct file *filep,
   12.62 +                                 unsigned command, unsigned long argument);
   12.63 +extern int xen_block_check(kdev_t dev);
   12.64 +extern int xen_block_revalidate(kdev_t dev);
   12.65 +extern void do_xlblk_request (request_queue_t *rq); 
   12.66 +
   12.67 +extern void xlvbd_update_vbds(void);
   12.68 +
   12.69 +static inline xl_disk_t *xldev_to_xldisk(kdev_t xldev)
   12.70 +{
   12.71 +    struct gendisk *gd = get_gendisk(xldev);
   12.72 +    
   12.73 +    if ( gd == NULL ) 
   12.74 +        return NULL;
   12.75 +    
   12.76 +    return (xl_disk_t *)gd->real_devices + 
   12.77 +        (MINOR(xldev) >> gd->minor_shift);
   12.78 +}
   12.79 +
   12.80 +
   12.81 +/* Virtual block-device subsystem. */
   12.82 +extern int  xlvbd_init(void);
   12.83 +extern void xlvbd_cleanup(void); 
   12.84 +
   12.85 +#endif /* __XEN_DRIVERS_BLOCK_H__ */
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/block/vbd.c	Tue Mar 23 10:40:28 2004 +0000
    13.3 @@ -0,0 +1,561 @@
    13.4 +/******************************************************************************
    13.5 + * vbd.c
    13.6 + * 
    13.7 + * Xenolinux virtual block-device driver (xvd).
    13.8 + * 
    13.9 + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
   13.10 + * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
   13.11 + */
   13.12 +
   13.13 +#include "block.h"
   13.14 +#include <linux/blk.h>
   13.15 +
   13.16 +/*
   13.17 + * For convenience we distinguish between ide, scsi and 'other' (i.e.
   13.18 + * potentially combinations of the two) in the naming scheme and in a few 
   13.19 + * other places (like default readahead, etc).
   13.20 + */
   13.21 +#define XLIDE_MAJOR_NAME  "hd"
   13.22 +#define XLSCSI_MAJOR_NAME "sd"
   13.23 +#define XLVBD_MAJOR_NAME "xvd"
   13.24 +
   13.25 +#define XLIDE_DEVS_PER_MAJOR   2
   13.26 +#define XLSCSI_DEVS_PER_MAJOR 16
   13.27 +#define XLVBD_DEVS_PER_MAJOR  16
   13.28 +
   13.29 +#define XLIDE_PARTN_SHIFT  6    /* amount to shift minor to get 'real' minor */
   13.30 +#define XLIDE_MAX_PART    (1 << XLIDE_PARTN_SHIFT)     /* minors per ide vbd */
   13.31 +
   13.32 +#define XLSCSI_PARTN_SHIFT 4    /* amount to shift minor to get 'real' minor */
   13.33 +#define XLSCSI_MAX_PART   (1 << XLSCSI_PARTN_SHIFT)   /* minors per scsi vbd */
   13.34 +
   13.35 +#define XLVBD_PARTN_SHIFT  4    /* amount to shift minor to get 'real' minor */
   13.36 +#define XLVBD_MAX_PART    (1 << XLVBD_PARTN_SHIFT) /* minors per 'other' vbd */
   13.37 +
   13.38 +/* The below are for the generic drivers/block/ll_rw_block.c code. */
   13.39 +static int xlide_blksize_size[256];
   13.40 +static int xlide_hardsect_size[256];
   13.41 +static int xlide_max_sectors[256];
   13.42 +static int xlscsi_blksize_size[256];
   13.43 +static int xlscsi_hardsect_size[256];
   13.44 +static int xlscsi_max_sectors[256];
   13.45 +static int xlvbd_blksize_size[256];
   13.46 +static int xlvbd_hardsect_size[256];
   13.47 +static int xlvbd_max_sectors[256];
   13.48 +
   13.49 +/* Information from Xen about our VBDs. */
   13.50 +#define MAX_VBDS 64
   13.51 +static int nr_vbds;
   13.52 +static xen_disk_t *vbd_info;
   13.53 +
   13.54 +static struct block_device_operations xlvbd_block_fops = 
   13.55 +{
   13.56 +    open:               xen_block_open,
   13.57 +    release:            xen_block_release,
   13.58 +    ioctl:              xen_block_ioctl,
   13.59 +    check_media_change: xen_block_check,
   13.60 +    revalidate:         xen_block_revalidate,
   13.61 +};
   13.62 +
   13.63 +static int xlvbd_get_vbd_info(xen_disk_t *disk_info)
   13.64 +{
   13.65 +    int error;
   13.66 +    block_io_op_t op; 
   13.67 +
   13.68 +    /* Probe for disk information. */
   13.69 +    memset(&op, 0, sizeof(op)); 
   13.70 +    op.cmd = BLOCK_IO_OP_VBD_PROBE; 
   13.71 +    op.u.probe_params.domain    = 0; 
   13.72 +    op.u.probe_params.xdi.max   = MAX_VBDS;
   13.73 +    op.u.probe_params.xdi.disks = disk_info;
   13.74 +    op.u.probe_params.xdi.count = 0;
   13.75 +
   13.76 +    if ( (error = HYPERVISOR_block_io_op(&op)) != 0 )
   13.77 +    {
   13.78 +        printk(KERN_ALERT "Could not probe disks (%d)\n", error);
   13.79 +        return -1;
   13.80 +    }
   13.81 +
   13.82 +    return op.u.probe_params.xdi.count;
   13.83 +}
   13.84 +
   13.85 +/*
   13.86 + * xlvbd_init_device - initialise a VBD device
   13.87 + * @disk:              a xen_disk_t describing the VBD
   13.88 + *
   13.89 + * Takes a xen_disk_t * that describes a VBD the domain has access to.
   13.90 + * Performs appropriate initialisation and registration of the device.
   13.91 + *
   13.92 + * Care needs to be taken when making re-entrant calls to ensure that
   13.93 + * corruption does not occur.  Also, devices that are in use should not have
   13.94 + * their details updated.  This is the caller's responsibility.
   13.95 + */
   13.96 +static int xlvbd_init_device(xen_disk_t *xd)
   13.97 +{
   13.98 +    int device = xd->device;
   13.99 +    int major  = MAJOR(device); 
  13.100 +    int minor  = MINOR(device);
  13.101 +    int is_ide = IDE_DISK_MAJOR(major);  /* is this an ide device? */
  13.102 +    int is_scsi= SCSI_BLK_MAJOR(major);  /* is this a scsi device? */
  13.103 +    char *major_name;
  13.104 +    struct gendisk *gd;
  13.105 +    struct block_device *bd;
  13.106 +    xl_disk_t *disk;
  13.107 +    int i, rc = 0, max_part, partno;
  13.108 +    unsigned long capacity;
  13.109 +
  13.110 +    unsigned char buf[64];
  13.111 +
  13.112 +    if ( (bd = bdget(device)) == NULL )
  13.113 +        return -1;
  13.114 +
  13.115 +    /*
  13.116 +     * Update of partition info, and check of usage count, is protected
  13.117 +     * by the per-block-device semaphore.
  13.118 +     */
  13.119 +    down(&bd->bd_sem);
  13.120 +
  13.121 +    if ( ((disk = xldev_to_xldisk(device)) != NULL) && (disk->usage != 0) )
  13.122 +    {
  13.123 +        printk(KERN_ALERT "VBD update failed - in use [dev=%x]\n", device);
  13.124 +        rc = -1;
  13.125 +        goto out;
  13.126 +    }
  13.127 +
  13.128 +    if ( is_ide ) {
  13.129 +
  13.130 +	major_name = XLIDE_MAJOR_NAME; 
  13.131 +	max_part   = XLIDE_MAX_PART;
  13.132 +
  13.133 +    } else if ( is_scsi ) {
  13.134 +
  13.135 +	major_name = XLSCSI_MAJOR_NAME;
  13.136 +	max_part   = XLSCSI_MAX_PART;
  13.137 +
  13.138 +    } else if (XD_VIRTUAL(xd->info)) {
  13.139 +
  13.140 +	major_name = XLVBD_MAJOR_NAME;
  13.141 +	max_part   = XLVBD_MAX_PART;
  13.142 +
  13.143 +    } else { 
  13.144 +
  13.145 +        /* SMH: hmm - probably a CCISS driver or sim; assume CCISS for now */
  13.146 +	printk(KERN_ALERT "Assuming device %02x:%02x is CCISS/SCSI\n", 
  13.147 +	       major, minor);
  13.148 +	is_scsi    = 1; 
  13.149 +	major_name = "cciss"; 
  13.150 +	max_part   = XLSCSI_MAX_PART;
  13.151 +
  13.152 +    }
  13.153 +    
  13.154 +    partno = minor & (max_part - 1); 
  13.155 +    
  13.156 +    if ( (gd = get_gendisk(device)) == NULL )
  13.157 +    {
  13.158 +        rc = register_blkdev(major, major_name, &xlvbd_block_fops);
  13.159 +        if ( rc < 0 )
  13.160 +        {
  13.161 +            printk(KERN_ALERT "XL VBD: can't get major %d\n", major);
  13.162 +            goto out;
  13.163 +        }
  13.164 +
  13.165 +        if ( is_ide )
  13.166 +        { 
  13.167 +            blksize_size[major]  = xlide_blksize_size;
  13.168 +            hardsect_size[major] = xlide_hardsect_size;
  13.169 +            max_sectors[major]   = xlide_max_sectors;
  13.170 +            read_ahead[major]    = 8; /* from drivers/ide/ide-probe.c */
  13.171 +        } 
  13.172 +        else if ( is_scsi )
  13.173 +        { 
  13.174 +            blksize_size[major]  = xlscsi_blksize_size;
  13.175 +            hardsect_size[major] = xlscsi_hardsect_size;
  13.176 +            max_sectors[major]   = xlscsi_max_sectors;
  13.177 +            read_ahead[major]    = 0; /* XXX 8; -- guessing */
  13.178 +        }
  13.179 +        else
  13.180 +        { 
  13.181 +            blksize_size[major]  = xlvbd_blksize_size;
  13.182 +            hardsect_size[major] = xlvbd_hardsect_size;
  13.183 +            max_sectors[major]   = xlvbd_max_sectors;
  13.184 +            read_ahead[major]    = 8;
  13.185 +        }
  13.186 +
  13.187 +        blk_init_queue(BLK_DEFAULT_QUEUE(major), do_xlblk_request);
  13.188 +
  13.189 +        /*
  13.190 +         * Turn off barking 'headactive' mode. We dequeue buffer heads as
  13.191 +         * soon as we pass them down to Xen.
  13.192 +         */
  13.193 +        blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
  13.194 +
  13.195 +        /* Construct an appropriate gendisk structure. */
  13.196 +        gd             = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
  13.197 +        gd->major      = major;
  13.198 +        gd->major_name = major_name; 
  13.199 +    
  13.200 +        gd->max_p      = max_part; 
  13.201 +        if ( is_ide )
  13.202 +        { 
  13.203 +            gd->minor_shift  = XLIDE_PARTN_SHIFT; 
  13.204 +            gd->nr_real      = XLIDE_DEVS_PER_MAJOR; 
  13.205 +        } 
  13.206 +        else if ( is_scsi )
  13.207 +        { 
  13.208 +            gd->minor_shift  = XLSCSI_PARTN_SHIFT; 
  13.209 +            gd->nr_real      = XLSCSI_DEVS_PER_MAJOR; 
  13.210 +        }
  13.211 +        else
  13.212 +        { 
  13.213 +            gd->minor_shift  = XLVBD_PARTN_SHIFT; 
  13.214 +            gd->nr_real      = XLVBD_DEVS_PER_MAJOR; 
  13.215 +        }
  13.216 +
  13.217 +        /* 
  13.218 +        ** The sizes[] and part[] arrays hold the sizes and other 
  13.219 +        ** information about every partition with this 'major' (i.e. 
  13.220 +        ** every disk sharing the 8 bit prefix * max partns per disk) 
  13.221 +        */
  13.222 +        gd->sizes = kmalloc(max_part*gd->nr_real*sizeof(int), GFP_KERNEL);
  13.223 +        gd->part  = kmalloc(max_part*gd->nr_real*sizeof(struct hd_struct), 
  13.224 +                            GFP_KERNEL);
  13.225 +        memset(gd->sizes, 0, max_part * gd->nr_real * sizeof(int));
  13.226 +        memset(gd->part,  0, max_part * gd->nr_real 
  13.227 +               * sizeof(struct hd_struct));
  13.228 +
  13.229 +
  13.230 +        gd->real_devices = kmalloc(gd->nr_real * sizeof(xl_disk_t), 
  13.231 +                                   GFP_KERNEL);
  13.232 +        memset(gd->real_devices, 0, gd->nr_real * sizeof(xl_disk_t));
  13.233 +
  13.234 +        gd->next   = NULL;            
  13.235 +        gd->fops   = &xlvbd_block_fops;
  13.236 +
  13.237 +        gd->de_arr = kmalloc(gd->nr_real * sizeof(*gd->de_arr), 
  13.238 +                             GFP_KERNEL);
  13.239 +        gd->flags  = kmalloc(gd->nr_real * sizeof(*gd->flags), GFP_KERNEL);
  13.240 +    
  13.241 +        memset(gd->de_arr, 0, gd->nr_real * sizeof(*gd->de_arr));
  13.242 +        memset(gd->flags, 0, gd->nr_real *  sizeof(*gd->flags));
  13.243 +
  13.244 +        add_gendisk(gd);
  13.245 +
  13.246 +        blk_size[major] = gd->sizes;
  13.247 +    }
  13.248 +
  13.249 +    if ( XD_READONLY(xd->info) )
  13.250 +        set_device_ro(device, 1); 
  13.251 +
  13.252 +    gd->flags[minor >> gd->minor_shift] |= GENHD_FL_XEN;
  13.253 +
  13.254 +    /* NB. Linux 2.4 only handles 32-bit sector offsets and capacities. */
  13.255 +    capacity = (unsigned long)xd->capacity;
  13.256 +
  13.257 +    if ( partno != 0 )
  13.258 +    {
  13.259 +        /*
  13.260 +         * If this was previously set up as a real disc we will have set 
  13.261 +         * up partition-table information. Virtual partitions override 
  13.262 +         * 'real' partitions, and the two cannot coexist on a device.
  13.263 +         */
  13.264 +        if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
  13.265 +             (gd->sizes[minor & ~(max_part-1)] != 0) )
  13.266 +        {
  13.267 +            /*
  13.268 +             * Any non-zero sub-partition entries must be cleaned out before
  13.269 +             * installing 'virtual' partition entries. The two types cannot
  13.270 +             * coexist, and virtual partitions are favoured.
  13.271 +             */
  13.272 +            kdev_t dev = device & ~(max_part-1);
  13.273 +            for ( i = max_part - 1; i > 0; i-- )
  13.274 +            {
  13.275 +                invalidate_device(dev+i, 1);
  13.276 +                gd->part[MINOR(dev+i)].start_sect = 0;
  13.277 +                gd->part[MINOR(dev+i)].nr_sects   = 0;
  13.278 +                gd->sizes[MINOR(dev+i)]           = 0;
  13.279 +            }
  13.280 +            printk(KERN_ALERT
  13.281 +                   "Virtual partitions found for /dev/%s - ignoring any "
  13.282 +                   "real partition information we may have found.\n",
  13.283 +                   disk_name(gd, MINOR(device), buf));
  13.284 +        }
  13.285 +
  13.286 +        /* Need to skankily setup 'partition' information */
  13.287 +        gd->part[minor].start_sect = 0; 
  13.288 +        gd->part[minor].nr_sects   = capacity; 
  13.289 +        gd->sizes[minor]           = capacity; 
  13.290 +
  13.291 +        gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
  13.292 +    }
  13.293 +    else
  13.294 +    {
  13.295 +        gd->part[minor].nr_sects = capacity;
  13.296 +        gd->sizes[minor] = capacity>>(BLOCK_SIZE_BITS-9);
  13.297 +        
  13.298 +        /* Some final fix-ups depending on the device type */
  13.299 +        switch ( XD_TYPE(xd->info) )
  13.300 +        { 
  13.301 +        case XD_TYPE_CDROM:
  13.302 +        case XD_TYPE_FLOPPY: 
  13.303 +        case XD_TYPE_TAPE:
  13.304 +            gd->flags[minor >> gd->minor_shift] |= GENHD_FL_REMOVABLE; 
  13.305 +            printk(KERN_ALERT 
  13.306 +                   "Skipping partition check on %s /dev/%s\n", 
  13.307 +                   XD_TYPE(xd->info)==XD_TYPE_CDROM ? "cdrom" : 
  13.308 +                   (XD_TYPE(xd->info)==XD_TYPE_TAPE ? "tape" : 
  13.309 +                    "floppy"), disk_name(gd, MINOR(device), buf)); 
  13.310 +            break; 
  13.311 +
  13.312 +        case XD_TYPE_DISK:
  13.313 +            /* Only check partitions on real discs (not virtual!). */
  13.314 +            if ( gd->flags[minor>>gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
  13.315 +            {
  13.316 +                printk(KERN_ALERT
  13.317 +                       "Skipping partition check on virtual /dev/%s\n",
  13.318 +                       disk_name(gd, MINOR(device), buf));
  13.319 +                break;
  13.320 +            }
  13.321 +            register_disk(gd, device, gd->max_p, &xlvbd_block_fops, capacity);
  13.322 +            break; 
  13.323 +
  13.324 +        default:
  13.325 +            printk(KERN_ALERT "XenoLinux: unknown device type %d\n", 
  13.326 +                   XD_TYPE(xd->info)); 
  13.327 +            break; 
  13.328 +        }
  13.329 +    }
  13.330 +
  13.331 + out:
  13.332 +    up(&bd->bd_sem);
  13.333 +    bdput(bd);    
  13.334 +    return rc;
  13.335 +}
  13.336 +
  13.337 +
  13.338 +/*
  13.339 + * xlvbd_remove_device - remove a device node if possible
  13.340 + * @device:       numeric device ID
  13.341 + *
  13.342 + * Updates the gendisk structure and invalidates devices.
  13.343 + *
  13.344 + * This is OK for now but in future, should perhaps consider where this should
  13.345 + * deallocate gendisks / unregister devices.
  13.346 + */
  13.347 +static int xlvbd_remove_device(int device)
  13.348 +{
  13.349 +    int i, rc = 0, minor = MINOR(device);
  13.350 +    struct gendisk *gd;
  13.351 +    struct block_device *bd;
  13.352 +    xl_disk_t *disk = NULL;
  13.353 +
  13.354 +    if ( (bd = bdget(device)) == NULL )
  13.355 +        return -1;
  13.356 +
  13.357 +    /*
  13.358 +     * Update of partition info, and check of usage count, is protected
  13.359 +     * by the per-block-device semaphore.
  13.360 +     */
  13.361 +    down(&bd->bd_sem);
  13.362 +
  13.363 +    if ( ((gd = get_gendisk(device)) == NULL) ||
  13.364 +         ((disk = xldev_to_xldisk(device)) == NULL) )
  13.365 +        BUG();
  13.366 +
  13.367 +    if ( disk->usage != 0 )
  13.368 +    {
  13.369 +        printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device);
  13.370 +        rc = -1;
  13.371 +        goto out;
  13.372 +    }
  13.373 + 
  13.374 +    if ( (minor & (gd->max_p-1)) != 0 )
  13.375 +    {
  13.376 +        /* 1: The VBD is mapped to a partition rather than a whole unit. */
  13.377 +        invalidate_device(device, 1);
  13.378 +	gd->part[minor].start_sect = 0;
  13.379 +        gd->part[minor].nr_sects   = 0;
  13.380 +        gd->sizes[minor]           = 0;
  13.381 +
  13.382 +        /* Clear the consists-of-virtual-partitions flag if possible. */
  13.383 +        gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS;
  13.384 +        for ( i = 1; i < gd->max_p; i++ )
  13.385 +            if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 )
  13.386 +                gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
  13.387 +
  13.388 +        /*
  13.389 +         * If all virtual partitions are now gone, and a 'whole unit' VBD is
  13.390 +         * present, then we can try to grok the unit's real partition table.
  13.391 +         */
  13.392 +        if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
  13.393 +             (gd->sizes[minor & ~(gd->max_p-1)] != 0) &&
  13.394 +             !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) )
  13.395 +        {
  13.396 +            register_disk(gd,
  13.397 +                          device&~(gd->max_p-1), 
  13.398 +                          gd->max_p, 
  13.399 +                          &xlvbd_block_fops,
  13.400 +                          gd->part[minor&~(gd->max_p-1)].nr_sects);
  13.401 +        }
  13.402 +    }
  13.403 +    else
  13.404 +    {
  13.405 +        /*
  13.406 +         * 2: The VBD is mapped to an entire 'unit'. Clear all partitions.
  13.407 +         * NB. The partition entries are only cleared if there are no VBDs
  13.408 +         * mapped to individual partitions on this unit.
  13.409 +         */
  13.410 +        i = gd->max_p - 1; /* Default: clear subpartitions as well. */
  13.411 +        if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
  13.412 +            i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */
  13.413 +        while ( i >= 0 )
  13.414 +        {
  13.415 +            invalidate_device(device+i, 1);
  13.416 +            gd->part[minor+i].start_sect = 0;
  13.417 +            gd->part[minor+i].nr_sects   = 0;
  13.418 +            gd->sizes[minor+i]           = 0;
  13.419 +            i--;
  13.420 +        }
  13.421 +    }
  13.422 +
  13.423 + out:
  13.424 +    up(&bd->bd_sem);
  13.425 +    bdput(bd);
  13.426 +    return rc;
  13.427 +}
  13.428 +
  13.429 +/*
  13.430 + * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
  13.431 + * state. The VBDs need to be updated in this way when the domain is
  13.432 + * initialised and also each time we receive an XLBLK_UPDATE event.
  13.433 + */
  13.434 +void xlvbd_update_vbds(void)
  13.435 +{
  13.436 +    int i, j, k, old_nr, new_nr;
  13.437 +    xen_disk_t *old_info, *new_info, *merged_info;
  13.438 +
  13.439 +    old_info = vbd_info;
  13.440 +    old_nr   = nr_vbds;
  13.441 +
  13.442 +    new_info = kmalloc(MAX_VBDS * sizeof(xen_disk_t), GFP_KERNEL);
  13.443 +    if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 )
  13.444 +    {
  13.445 +        kfree(new_info);
  13.446 +        return;
  13.447 +    }
  13.448 +
  13.449 +    /*
  13.450 +     * Final list maximum size is old list + new list. This occurs only when
  13.451 +     * old list and new list do not overlap at all, and we cannot yet destroy
  13.452 +     * VBDs in the old list because the usage counts are busy.
  13.453 +     */
  13.454 +    merged_info = kmalloc((old_nr + new_nr) * sizeof(xen_disk_t), GFP_KERNEL);
  13.455 +
  13.456 +    /* @i tracks old list; @j tracks new list; @k tracks merged list. */
  13.457 +    i = j = k = 0;
  13.458 +
  13.459 +    while ( (i < old_nr) && (j < new_nr) )
  13.460 +    {
  13.461 +        if ( old_info[i].device < new_info[j].device )
  13.462 +        {
  13.463 +            if ( xlvbd_remove_device(old_info[i].device) != 0 )
  13.464 +                memcpy(&merged_info[k++], &old_info[i], sizeof(xen_disk_t));
  13.465 +            i++;
  13.466 +        }
  13.467 +        else if ( old_info[i].device > new_info[j].device )
  13.468 +        {
  13.469 +            if ( xlvbd_init_device(&new_info[j]) == 0 )
  13.470 +                memcpy(&merged_info[k++], &new_info[j], sizeof(xen_disk_t));
  13.471 +            j++;
  13.472 +        }
  13.473 +        else
  13.474 +        {
  13.475 +            if ( ((old_info[i].capacity == new_info[j].capacity) &&
  13.476 +                  (old_info[i].info == new_info[j].info)) ||
  13.477 +                 (xlvbd_remove_device(old_info[i].device) != 0) )
  13.478 +                memcpy(&merged_info[k++], &old_info[i], sizeof(xen_disk_t));
  13.479 +            else if ( xlvbd_init_device(&new_info[j]) == 0 )
  13.480 +                memcpy(&merged_info[k++], &new_info[j], sizeof(xen_disk_t));
  13.481 +            i++; j++;
  13.482 +        }
  13.483 +    }
  13.484 +
  13.485 +    for ( ; i < old_nr; i++ )
  13.486 +    {
  13.487 +        if ( xlvbd_remove_device(old_info[i].device) != 0 )
  13.488 +            memcpy(&merged_info[k++], &old_info[i], sizeof(xen_disk_t));
  13.489 +    }
  13.490 +
  13.491 +    for ( ; j < new_nr; j++ )
  13.492 +    {
  13.493 +        if ( xlvbd_init_device(&new_info[j]) == 0 )
  13.494 +            memcpy(&merged_info[k++], &new_info[j], sizeof(xen_disk_t));
  13.495 +    }
  13.496 +
  13.497 +    vbd_info = merged_info;
  13.498 +    nr_vbds  = k;
  13.499 +
  13.500 +    kfree(old_info);
  13.501 +    kfree(new_info);
  13.502 +}
  13.503 +
  13.504 +
  13.505 +/*
  13.506 + * Set up all the linux device goop for the virtual block devices (vbd's) that 
  13.507 + * xen tells us about. Note that although from xen's pov VBDs are addressed 
  13.508 + * simply an opaque 16-bit device number, the domain creation tools 
  13.509 + * conventionally allocate these numbers to correspond to those used by 'real' 
  13.510 + * linux -- this is just for convenience as it means e.g. that the same 
  13.511 + * /etc/fstab can be used when booting with or without xen.
  13.512 + */
  13.513 +int __init xlvbd_init(void)
  13.514 +{
  13.515 +    int i;
  13.516 +    
  13.517 +    /*
  13.518 +     * If compiled as a module, we don't support unloading yet. We therefore 
  13.519 +     * permanently increment the reference count to disallow it.
  13.520 +     */
  13.521 +    SET_MODULE_OWNER(&xlvbd_block_fops);
  13.522 +    MOD_INC_USE_COUNT;
  13.523 +
  13.524 +    /* Initialize the global arrays. */
  13.525 +    for ( i = 0; i < 256; i++ ) 
  13.526 +    {
  13.527 +        /* from the generic ide code (drivers/ide/ide-probe.c, etc) */
  13.528 +        xlide_blksize_size[i]  = 1024;
  13.529 +        xlide_hardsect_size[i] = 512;
  13.530 +        xlide_max_sectors[i]   = 128;  /* 'hwif->rqsize' if we knew it */
  13.531 +
  13.532 +        /* from the generic scsi disk code (drivers/scsi/sd.c) */
  13.533 +        xlscsi_blksize_size[i]  = 1024; /* XXX 512; */
  13.534 +        xlscsi_hardsect_size[i] = 512;
  13.535 +        xlscsi_max_sectors[i]   = 128*8; /* XXX 128; */
  13.536 +
  13.537 +        /* we don't really know what to set these too since it depends */
  13.538 +        xlvbd_blksize_size[i]  = 512;
  13.539 +        xlvbd_hardsect_size[i] = 512;
  13.540 +        xlvbd_max_sectors[i]   = 128;
  13.541 +    }
  13.542 +
  13.543 +    vbd_info = kmalloc(MAX_VBDS * sizeof(xen_disk_t), GFP_KERNEL);
  13.544 +    nr_vbds  = xlvbd_get_vbd_info(vbd_info);
  13.545 +
  13.546 +    if ( nr_vbds < 0 )
  13.547 +    {
  13.548 +        kfree(vbd_info);
  13.549 +        vbd_info = NULL;
  13.550 +        nr_vbds  = 0;
  13.551 +    }
  13.552 +    else
  13.553 +    {
  13.554 +        for ( i = 0; i < nr_vbds; i++ )
  13.555 +            xlvbd_init_device(&vbd_info[i]);
  13.556 +    }
  13.557 +
  13.558 +    return 0;
  13.559 +}
  13.560 +
  13.561 +
  13.562 +#ifdef MODULE
  13.563 +module_init(xlvbd_init);
  13.564 +#endif
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/console/Makefile	Tue Mar 23 10:40:28 2004 +0000
    14.3 @@ -0,0 +1,3 @@
    14.4 +O_TARGET := drv.o
    14.5 +obj-$(CONFIG_XEN_CONSOLE) := console.o
    14.6 +include $(TOPDIR)/Rules.make
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c	Tue Mar 23 10:40:28 2004 +0000
    15.3 @@ -0,0 +1,508 @@
    15.4 +/******************************************************************************
    15.5 + * console.c
    15.6 + * 
    15.7 + * Virtual console driver.
    15.8 + * 
    15.9 + * Copyright (c) 2002-2004, K A Fraser.
   15.10 + */
   15.11 +
   15.12 +#include <linux/config.h>
   15.13 +#include <linux/module.h>
   15.14 +#include <linux/errno.h>
   15.15 +#include <linux/signal.h>
   15.16 +#include <linux/sched.h>
   15.17 +#include <linux/interrupt.h>
   15.18 +#include <linux/tty.h>
   15.19 +#include <linux/tty_flip.h>
   15.20 +#include <linux/serial.h>
   15.21 +#include <linux/major.h>
   15.22 +#include <linux/ptrace.h>
   15.23 +#include <linux/ioport.h>
   15.24 +#include <linux/mm.h>
   15.25 +#include <linux/slab.h>
   15.26 +#include <linux/init.h>
   15.27 +#include <linux/console.h>
   15.28 +#include <asm/evtchn.h>
   15.29 +#include <asm/io.h>
   15.30 +#include <asm/irq.h>
   15.31 +#include <asm/uaccess.h>
   15.32 +#include <asm/hypervisor.h>
   15.33 +#include <asm/hypervisor-ifs/event_channel.h>
   15.34 +#include <asm/control_if.h>
   15.35 +
   15.36 +static spinlock_t xen_console_lock = SPIN_LOCK_UNLOCKED;
   15.37 +
   15.38 +#define XEN_TTY_MINOR 123
   15.39 +
   15.40 +/******************** Kernel console driver ********************************/
   15.41 +
   15.42 +static void nonpriv_conwrite(const char *s, unsigned int count)
   15.43 +{
   15.44 +    control_if_t *ctrl_if;
   15.45 +    evtchn_op_t   evtchn_op;
   15.46 +    int           src, dst, p;
   15.47 +
   15.48 +    ctrl_if = (control_if_t *)((char *)HYPERVISOR_shared_info + 2048);
   15.49 +
   15.50 +    while ( count != 0 )
   15.51 +    {
   15.52 +        /* Wait for the request ring to drain. */
   15.53 +        while ( ctrl_if->tx_resp_prod != ctrl_if->tx_req_prod )
   15.54 +            barrier();
   15.55 +
   15.56 +        p = MASK_CONTROL_IDX(ctrl_if->tx_req_prod);
   15.57 +        
   15.58 +        ctrl_if->tx_ring[p].type    = CMSG_CONSOLE;
   15.59 +        ctrl_if->tx_ring[p].subtype = CMSG_CONSOLE_DATA;
   15.60 +        ctrl_if->tx_ring[p].id      = 0xaa;
   15.61 +        src = dst = 0;
   15.62 +        while ( (src < count) && (dst < (sizeof(ctrl_if->tx_ring[p].msg)-1)) )
   15.63 +        {
   15.64 +            if ( (ctrl_if->tx_ring[p].msg[dst++] = s[src++]) == '\n' )
   15.65 +                ctrl_if->tx_ring[p].msg[dst++] = '\r';
   15.66 +        }
   15.67 +        ctrl_if->tx_ring[p].length = dst;
   15.68 +        
   15.69 +        ctrl_if->tx_req_prod++;
   15.70 +        evtchn_op.cmd = EVTCHNOP_send;
   15.71 +        evtchn_op.u.send.local_port = 0;
   15.72 +        (void)HYPERVISOR_event_channel_op(&evtchn_op);
   15.73 +        
   15.74 +        s     += src;
   15.75 +        count -= src;
   15.76 +    }
   15.77 +}
   15.78 +
   15.79 +static void priv_conwrite(const char *s, unsigned int count)
   15.80 +{
   15.81 +    int rc;
   15.82 +
   15.83 +    while ( count > 0 )
   15.84 +    {
   15.85 +        if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write, count, s)) > 0 )
   15.86 +        {
   15.87 +            count -= rc;
   15.88 +            s += rc;
   15.89 +        }
   15.90 +    }
   15.91 +}
   15.92 +
   15.93 +static void xen_console_write(struct console *co, const char *s, 
   15.94 +                              unsigned int count)
   15.95 +{
   15.96 +    unsigned long flags;
   15.97 +    spin_lock_irqsave(&xen_console_lock, flags);
   15.98 +    if ( !(start_info.flags & SIF_INITDOMAIN) )
   15.99 +        nonpriv_conwrite(s, count);
  15.100 +    else
  15.101 +        priv_conwrite(s, count);
  15.102 +    spin_unlock_irqrestore(&xen_console_lock, flags);
  15.103 +}
  15.104 +
  15.105 +static kdev_t xen_console_device(struct console *c)
  15.106 +{
  15.107 +    /*
  15.108 +     * This is the magic that binds our "struct console" to our
  15.109 +     * "tty_struct", defined below.
  15.110 +     */
  15.111 +    return MKDEV(TTY_MAJOR, XEN_TTY_MINOR);
  15.112 +}
  15.113 +
  15.114 +static struct console xen_console_info = {
  15.115 +    name:		"xencons", /* Used to be xen_console, but we're only
  15.116 +				      actually allowed 8 charcters including
  15.117 +				      the terminator... */
  15.118 +    write:		xen_console_write,
  15.119 +    device:             xen_console_device,
  15.120 +    flags:		CON_PRINTBUFFER,
  15.121 +    index:		-1,
  15.122 +};
  15.123 +
  15.124 +void xen_console_init(void)
  15.125 +{
  15.126 +    register_console(&xen_console_info);
  15.127 +}
  15.128 +
  15.129 +
  15.130 +/*** Useful function for console debugging -- goes straight to Xen ****/
  15.131 +asmlinkage int xprintk(const char *fmt, ...)
  15.132 +{
  15.133 +    va_list args;
  15.134 +    int printk_len;
  15.135 +    static char printk_buf[1024];
  15.136 +    
  15.137 +    /* Emit the output into the temporary buffer */
  15.138 +    va_start(args, fmt);
  15.139 +    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  15.140 +    va_end(args);
  15.141 +    
  15.142 +    /* Send the processed output directly to Xen. */
  15.143 +    xen_console_write(NULL, printk_buf, printk_len);
  15.144 +
  15.145 +    return 0;
  15.146 +}
  15.147 +
  15.148 +
  15.149 +/******************** User-space console driver (/dev/console) ************/
  15.150 +
  15.151 +static struct tty_driver xen_console_driver;
  15.152 +static int xen_console_refcount;
  15.153 +static struct tty_struct *xen_console_table[1];
  15.154 +static struct termios *xen_console_termios[1];
  15.155 +static struct termios *xen_console_termios_locked[1];
  15.156 +static struct tty_struct *xen_console_tty;
  15.157 +
  15.158 +#define WBUF_SIZE     1024
  15.159 +#define WBUF_MASK(_i) ((_i)&(WBUF_SIZE-1))
  15.160 +static char wbuf[WBUF_SIZE], x_char;
  15.161 +static unsigned int wc, wp; /* write_cons, write_prod */
  15.162 +
  15.163 +static void __do_console_io(void)
  15.164 +{
  15.165 +    control_if_t    *ctrl_if;
  15.166 +    control_msg_t   *msg;
  15.167 +    evtchn_op_t      evtchn_op;
  15.168 +    CONTROL_RING_IDX c;
  15.169 +    int              i, l, work_done = 0;
  15.170 +    static char      rbuf[16];
  15.171 +
  15.172 +    if ( xen_console_tty == NULL )
  15.173 +        return;
  15.174 +
  15.175 +    /* Special-case I/O handling for domain 0. */
  15.176 +    if ( start_info.flags & SIF_INITDOMAIN )
  15.177 +    {
  15.178 +        /* Receive work. */
  15.179 +        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
  15.180 +            for ( i = 0; i < l; i++ )
  15.181 +                tty_insert_flip_char(xen_console_tty, rbuf[i], 0);
  15.182 +        if ( xen_console_tty->flip.count != 0 )
  15.183 +            tty_flip_buffer_push(xen_console_tty);
  15.184 +
  15.185 +        /* Transmit work. */
  15.186 +        while ( wc != wp )
  15.187 +        {
  15.188 +            l = wp - wc;
  15.189 +            if ( l > (WBUF_SIZE - WBUF_MASK(wc)) )
  15.190 +                l = WBUF_SIZE - WBUF_MASK(wc);
  15.191 +            priv_conwrite(&wbuf[WBUF_MASK(wc)], l);
  15.192 +            wc += l;
  15.193 +            wake_up_interruptible(&xen_console_tty->write_wait);
  15.194 +            if ( (xen_console_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  15.195 +                 (xen_console_tty->ldisc.write_wakeup != NULL) )
  15.196 +                (xen_console_tty->ldisc.write_wakeup)(xen_console_tty);
  15.197 +        }
  15.198 +
  15.199 +        return;
  15.200 +    }
  15.201 +
  15.202 +    /* Acknowledge the notification. */
  15.203 +    evtchn_clear_port(0);
  15.204 +
  15.205 +    ctrl_if = (control_if_t *)((char *)HYPERVISOR_shared_info + 2048);
  15.206 +    
  15.207 +    /* Receive work. */
  15.208 +    for ( c = ctrl_if->rx_resp_prod; c != ctrl_if->rx_req_prod; c++ )
  15.209 +    {
  15.210 +        msg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(c)];
  15.211 +        if ( (msg->type == CMSG_CONSOLE) &&
  15.212 +             (msg->subtype == CMSG_CONSOLE_DATA) )
  15.213 +        {
  15.214 +            for ( i = 0; i < msg->length; i++ )
  15.215 +                tty_insert_flip_char(xen_console_tty, msg->msg[i], 0);
  15.216 +        }
  15.217 +        msg->length = 0;
  15.218 +    }
  15.219 +    if ( ctrl_if->rx_resp_prod != c )
  15.220 +    {
  15.221 +        ctrl_if->rx_resp_prod = c;
  15.222 +        work_done = 1;
  15.223 +        tty_flip_buffer_push(xen_console_tty);
  15.224 +    }
  15.225 +    
  15.226 +    /* Transmit work. */
  15.227 +    for ( c = ctrl_if->tx_req_prod; 
  15.228 +          (c - ctrl_if->tx_resp_prod) != CONTROL_RING_SIZE; 
  15.229 +          c++ )
  15.230 +    {
  15.231 +        if ( (wc == wp) && (x_char == 0) )
  15.232 +            break;
  15.233 +        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(c)];
  15.234 +        msg->type    = CMSG_CONSOLE;
  15.235 +        msg->subtype = CMSG_CONSOLE_DATA;
  15.236 +        msg->id      = 0xaa;
  15.237 +        l = 0;
  15.238 +        if ( x_char != 0 ) /* Handle XON/XOFF urgently. */
  15.239 +        {
  15.240 +            msg->msg[l++] = x_char;
  15.241 +            x_char = 0;
  15.242 +        }
  15.243 +        while ( (l < sizeof(msg->msg)) && (wc != wp) )
  15.244 +            msg->msg[l++] = wbuf[WBUF_MASK(wc++)];
  15.245 +        msg->length = l;
  15.246 +    }
  15.247 +    if ( ctrl_if->tx_req_prod != c )
  15.248 +    {
  15.249 +        ctrl_if->tx_req_prod = c;
  15.250 +        work_done = 1;
  15.251 +        /* There might be something for waiters to do. */
  15.252 +        wake_up_interruptible(&xen_console_tty->write_wait);
  15.253 +        if ( (xen_console_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  15.254 +             (xen_console_tty->ldisc.write_wakeup != NULL) )
  15.255 +            (xen_console_tty->ldisc.write_wakeup)(xen_console_tty);
  15.256 +    }
  15.257 +
  15.258 +    if ( work_done )
  15.259 +    {
  15.260 +        /* Send a notification to the controller. */
  15.261 +        evtchn_op.cmd = EVTCHNOP_send;
  15.262 +        evtchn_op.u.send.local_port = 0;
  15.263 +        (void)HYPERVISOR_event_channel_op(&evtchn_op);
  15.264 +    }
  15.265 +}
  15.266 +
  15.267 +/* This is the callback entry point for domains != 0. */
  15.268 +static void control_event(unsigned int port)
  15.269 +{
  15.270 +    unsigned long flags;
  15.271 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.272 +    __do_console_io();
  15.273 +    spin_unlock_irqrestore(&xen_console_lock, flags);
  15.274 +}
  15.275 +
  15.276 +/* This is the callback entry point for domain 0. */
  15.277 +static void control_irq(int irq, void *dev_id, struct pt_regs *regs)
  15.278 +{
  15.279 +    unsigned long flags;
  15.280 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.281 +    __do_console_io();
  15.282 +    spin_unlock_irqrestore(&xen_console_lock, flags);    
  15.283 +}
  15.284 +
  15.285 +static int xen_console_write_room(struct tty_struct *tty)
  15.286 +{
  15.287 +    return WBUF_SIZE - (wp - wc);
  15.288 +}
  15.289 +
  15.290 +static int xen_console_chars_in_buffer(struct tty_struct *tty)
  15.291 +{
  15.292 +    return wp - wc;
  15.293 +}
  15.294 +
  15.295 +static void xen_console_send_xchar(struct tty_struct *tty, char ch)
  15.296 +{
  15.297 +    unsigned long flags;
  15.298 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.299 +    x_char = ch;
  15.300 +    __do_console_io();
  15.301 +    spin_unlock_irqrestore(&xen_console_lock, flags);
  15.302 +}
  15.303 +
  15.304 +static void xen_console_throttle(struct tty_struct *tty)
  15.305 +{
  15.306 +    if ( I_IXOFF(tty) )
  15.307 +        xen_console_send_xchar(tty, STOP_CHAR(tty));
  15.308 +}
  15.309 +
  15.310 +static void xen_console_unthrottle(struct tty_struct *tty)
  15.311 +{
  15.312 +    if ( I_IXOFF(tty) )
  15.313 +    {
  15.314 +        if ( x_char != 0 )
  15.315 +            x_char = 0;
  15.316 +        else
  15.317 +            xen_console_send_xchar(tty, START_CHAR(tty));
  15.318 +    }
  15.319 +}
  15.320 +
  15.321 +static void xen_console_flush_buffer(struct tty_struct *tty)
  15.322 +{
  15.323 +    unsigned long flags;
  15.324 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.325 +    wc = wp = 0;
  15.326 +    spin_unlock_irqrestore(&xen_console_lock, flags);
  15.327 +}
  15.328 +
  15.329 +static inline int __xen_console_put_char(int ch)
  15.330 +{
  15.331 +    char _ch = (char)ch;
  15.332 +    if ( (wp - wc) == WBUF_SIZE )
  15.333 +        return 0;
  15.334 +    wbuf[WBUF_MASK(wp++)] = _ch;
  15.335 +    return 1;
  15.336 +}
  15.337 +
  15.338 +static int xen_console_write(struct tty_struct *tty, int from_user,
  15.339 +                       const u_char * buf, int count)
  15.340 +{
  15.341 +    int i;
  15.342 +    unsigned long flags;
  15.343 +
  15.344 +    if ( from_user && verify_area(VERIFY_READ, buf, count) )
  15.345 +        return -EINVAL;
  15.346 +
  15.347 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.348 +
  15.349 +    for ( i = 0; i < count; i++ )
  15.350 +    {
  15.351 +        char ch;
  15.352 +        if ( from_user )
  15.353 +            __get_user(ch, buf + i);
  15.354 +        else
  15.355 +            ch = buf[i];
  15.356 +        if ( !__xen_console_put_char(ch) )
  15.357 +            break;
  15.358 +    }
  15.359 +
  15.360 +    if ( i != 0 )
  15.361 +        __do_console_io();
  15.362 +
  15.363 +    spin_unlock_irqrestore(&xen_console_lock, flags);
  15.364 +
  15.365 +    return i;
  15.366 +}
  15.367 +
  15.368 +static void xen_console_put_char(struct tty_struct *tty, u_char ch)
  15.369 +{
  15.370 +    unsigned long flags;
  15.371 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.372 +    (void)__xen_console_put_char(ch);
  15.373 +    spin_unlock_irqrestore(&xen_console_lock, flags);
  15.374 +}
  15.375 +
  15.376 +static void xen_console_flush_chars(struct tty_struct *tty)
  15.377 +{
  15.378 +    unsigned long flags;
  15.379 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.380 +    __do_console_io();
  15.381 +    spin_unlock_irqrestore(&xen_console_lock, flags);    
  15.382 +}
  15.383 +
  15.384 +static void xen_console_wait_until_sent(struct tty_struct *tty, int timeout)
  15.385 +{
  15.386 +    unsigned long orig_jiffies = jiffies;
  15.387 +
  15.388 +    while ( tty->driver.chars_in_buffer(tty) )
  15.389 +    {
  15.390 +        set_current_state(TASK_INTERRUPTIBLE);
  15.391 +        schedule_timeout(1);
  15.392 +        if ( signal_pending(current) )
  15.393 +            break;
  15.394 +        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
  15.395 +            break;
  15.396 +    }
  15.397 +    
  15.398 +    set_current_state(TASK_RUNNING);
  15.399 +}
  15.400 +
  15.401 +static int xen_console_open(struct tty_struct *tty, struct file *filp)
  15.402 +{
  15.403 +    int line;
  15.404 +    unsigned long flags;
  15.405 +
  15.406 +    MOD_INC_USE_COUNT;
  15.407 +    line = MINOR(tty->device) - tty->driver.minor_start;
  15.408 +    if ( line != 0 )
  15.409 +    {
  15.410 +        MOD_DEC_USE_COUNT;
  15.411 +        return -ENODEV;
  15.412 +    }
  15.413 +
  15.414 +    spin_lock_irqsave(&xen_console_lock, flags);
  15.415 +    tty->driver_data = NULL;
  15.416 +    if ( xen_console_tty == NULL )
  15.417 +        xen_console_tty = tty;
  15.418 +    __do_console_io();
  15.419 +    spin_unlock_irqrestore(&xen_console_lock, flags);    
  15.420 +
  15.421 +    return 0;
  15.422 +}
  15.423 +
  15.424 +static void xen_console_close(struct tty_struct *tty, struct file *filp)
  15.425 +{
  15.426 +    unsigned long flags;
  15.427 +
  15.428 +    if ( tty->count == 1 )
  15.429 +    {
  15.430 +        tty->closing = 1;
  15.431 +        tty_wait_until_sent(tty, 0);
  15.432 +        if ( tty->driver.flush_buffer != NULL )
  15.433 +            tty->driver.flush_buffer(tty);
  15.434 +        if ( tty->ldisc.flush_buffer != NULL )
  15.435 +            tty->ldisc.flush_buffer(tty);
  15.436 +        tty->closing = 0;
  15.437 +        spin_lock_irqsave(&xen_console_lock, flags);
  15.438 +        xen_console_tty = NULL;
  15.439 +        spin_unlock_irqrestore(&xen_console_lock, flags);    
  15.440 +    }
  15.441 +
  15.442 +    MOD_DEC_USE_COUNT;
  15.443 +}
  15.444 +
  15.445 +int __init xen_con_init(void)
  15.446 +{
  15.447 +    memset(&xen_console_driver, 0, sizeof(struct tty_driver));
  15.448 +    xen_console_driver.magic           = TTY_DRIVER_MAGIC;
  15.449 +    xen_console_driver.name            = "xencons";
  15.450 +    xen_console_driver.major           = TTY_MAJOR;
  15.451 +    xen_console_driver.minor_start     = XEN_TTY_MINOR;
  15.452 +    xen_console_driver.num             = 1;
  15.453 +    xen_console_driver.type            = TTY_DRIVER_TYPE_SERIAL;
  15.454 +    xen_console_driver.subtype         = SERIAL_TYPE_NORMAL;
  15.455 +    xen_console_driver.init_termios    = tty_std_termios;
  15.456 +    xen_console_driver.flags           = 
  15.457 +        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
  15.458 +    xen_console_driver.refcount        = &xen_console_refcount;
  15.459 +    xen_console_driver.table           = xen_console_table;
  15.460 +    xen_console_driver.termios         = xen_console_termios;
  15.461 +    xen_console_driver.termios_locked  = xen_console_termios_locked;
  15.462 +
  15.463 +    xen_console_driver.open            = xen_console_open;
  15.464 +    xen_console_driver.close           = xen_console_close;
  15.465 +    xen_console_driver.write           = xen_console_write;
  15.466 +    xen_console_driver.write_room      = xen_console_write_room;
  15.467 +    xen_console_driver.put_char        = xen_console_put_char;
  15.468 +    xen_console_driver.flush_chars     = xen_console_flush_chars;
  15.469 +    xen_console_driver.chars_in_buffer = xen_console_chars_in_buffer;
  15.470 +    xen_console_driver.send_xchar      = xen_console_send_xchar;
  15.471 +    xen_console_driver.flush_buffer    = xen_console_flush_buffer;
  15.472 +    xen_console_driver.throttle        = xen_console_throttle;
  15.473 +    xen_console_driver.unthrottle      = xen_console_unthrottle;
  15.474 +    xen_console_driver.wait_until_sent = xen_console_wait_until_sent;
  15.475 +
  15.476 +    if ( tty_register_driver(&xen_console_driver) )
  15.477 +        panic("Couldn't register Xen virtual console driver\n");
  15.478 +
  15.479 +    if ( !(start_info.flags & SIF_INITDOMAIN) )
  15.480 +    {
  15.481 +        if ( evtchn_request_port(0, control_event) != 0 )
  15.482 +            BUG();
  15.483 +        control_event(0); /* kickstart the console */
  15.484 +    }
  15.485 +    else
  15.486 +    {
  15.487 +        request_irq(HYPEREVENT_IRQ(_EVENT_CONSOLE), 
  15.488 +                    control_irq, 0, "console", NULL);
  15.489 +        control_irq(0, NULL, NULL); /* kickstart the console */
  15.490 +    }
  15.491 +
  15.492 +    printk("Xen virtual console successfully installed\n");
  15.493 +    
  15.494 +    return 0;
  15.495 +}
  15.496 +
  15.497 +void __exit xen_con_fini(void)
  15.498 +{
  15.499 +    int ret;
  15.500 +
  15.501 +    ret = tty_unregister_driver(&xen_console_driver);
  15.502 +    if ( ret != 0 )
  15.503 +        printk(KERN_ERR "Unable to unregister Xen console driver: %d\n", ret);
  15.504 +
  15.505 +    if ( !(start_info.flags & SIF_INITDOMAIN) )
  15.506 +        (void)evtchn_free_port(0);
  15.507 +}
  15.508 +
  15.509 +module_init(xen_con_init);
  15.510 +module_exit(xen_con_fini);
  15.511 +
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/Makefile	Tue Mar 23 10:40:28 2004 +0000
    16.3 @@ -0,0 +1,3 @@
    16.4 +O_TARGET := drv.o
    16.5 +obj-y := core.o vfr.o
    16.6 +include $(TOPDIR)/Rules.make
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/core.c	Tue Mar 23 10:40:28 2004 +0000
    17.3 @@ -0,0 +1,104 @@
    17.4 +/******************************************************************************
    17.5 + * core.c
    17.6 + * 
    17.7 + * Interface to privileged domain-0 commands.
    17.8 + * 
    17.9 + * Copyright (c) 2002-2004, K A Fraser, B Dragovic
   17.10 + */
   17.11 +
   17.12 +#include <linux/config.h>
   17.13 +#include <linux/module.h>
   17.14 +#include <linux/kernel.h>
   17.15 +#include <linux/sched.h>
   17.16 +#include <linux/slab.h>
   17.17 +#include <linux/string.h>
   17.18 +#include <linux/errno.h>
   17.19 +#include <linux/mm.h>
   17.20 +#include <linux/mman.h>
   17.21 +#include <linux/swap.h>
   17.22 +#include <linux/smp_lock.h>
   17.23 +#include <linux/swapctl.h>
   17.24 +#include <linux/iobuf.h>
   17.25 +#include <linux/highmem.h>
   17.26 +#include <linux/pagemap.h>
   17.27 +#include <linux/seq_file.h>
   17.28 +
   17.29 +#include <asm/pgalloc.h>
   17.30 +#include <asm/pgtable.h>
   17.31 +#include <asm/uaccess.h>
   17.32 +#include <asm/tlb.h>
   17.33 +#include <asm/proc_cmd.h>
   17.34 +#include <asm/hypervisor-ifs/dom0_ops.h>
   17.35 +#include <asm/xen_proc.h>
   17.36 +
   17.37 +static struct proc_dir_entry *privcmd_intf;
   17.38 +
   17.39 +static int privcmd_ioctl(struct inode *inode, struct file *file,
   17.40 +                         unsigned int cmd, unsigned long data)
   17.41 +{
   17.42 +    int ret = 0;
   17.43 +
   17.44 +    switch ( cmd )
   17.45 +    {
   17.46 +    case IOCTL_PRIVCMD_HYPERCALL:
   17.47 +    {
   17.48 +        privcmd_hypercall_t hypercall;
   17.49 +  
   17.50 +        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
   17.51 +            return -EFAULT;
   17.52 +
   17.53 +        __asm__ __volatile__ (
   17.54 +            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
   17.55 +            "movl  4(%%eax),%%ebx ;"
   17.56 +            "movl  8(%%eax),%%ecx ;"
   17.57 +            "movl 12(%%eax),%%edx ;"
   17.58 +            "movl 16(%%eax),%%esi ;"
   17.59 +            "movl 20(%%eax),%%edi ;"
   17.60 +            "movl   (%%eax),%%eax ;"
   17.61 +            TRAP_INSTR "; "
   17.62 +            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
   17.63 +            : "=a" (ret) : "0" (&hypercall) : "memory" );
   17.64 +
   17.65 +    }
   17.66 +    break;
   17.67 +
   17.68 +    default:
   17.69 +        ret = -EINVAL;
   17.70 +    	break;
   17.71 +	}
   17.72 +    return ret;
   17.73 +}
   17.74 +
   17.75 +
   17.76 +static struct file_operations privcmd_file_ops = {
   17.77 +  ioctl : privcmd_ioctl
   17.78 +};
   17.79 +
   17.80 +
   17.81 +static int __init init_module(void)
   17.82 +{
   17.83 +    if ( !(start_info.flags & SIF_PRIVILEGED) )
   17.84 +        return 0;
   17.85 +
   17.86 +    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
   17.87 +    if ( privcmd_intf != NULL )
   17.88 +    {
   17.89 +        privcmd_intf->owner      = THIS_MODULE;
   17.90 +        privcmd_intf->nlink      = 1;
   17.91 +	privcmd_intf->proc_fops  = &privcmd_file_ops;
   17.92 +    }
   17.93 +
   17.94 +    return 0;
   17.95 +}
   17.96 +
   17.97 +
   17.98 +static void __exit cleanup_module(void)
   17.99 +{
  17.100 +    if ( privcmd_intf == NULL ) return;
  17.101 +    remove_xen_proc_entry("privcmd");
  17.102 +    privcmd_intf = NULL;
  17.103 +}
  17.104 +
  17.105 +
  17.106 +module_init(init_module);
  17.107 +module_exit(cleanup_module);
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/dom0/vfr.c	Tue Mar 23 10:40:28 2004 +0000
    18.3 @@ -0,0 +1,343 @@
    18.4 +/******************************************************************************
    18.5 + * vfr.c
    18.6 + *
    18.7 + * Interface to the virtual firewall/router.
    18.8 + *
    18.9 + */
   18.10 +
   18.11 +#include <linux/config.h>
   18.12 +#include <linux/module.h>
   18.13 +#include <linux/kernel.h>
   18.14 +#include <linux/sched.h>
   18.15 +#include <linux/slab.h>
   18.16 +#include <linux/string.h>
   18.17 +#include <linux/errno.h>
   18.18 +#include <asm/xen_proc.h>
   18.19 +#include <asm/hypervisor-ifs/network.h>
   18.20 +
   18.21 +static struct proc_dir_entry *proc_vfr;
   18.22 +
   18.23 +static unsigned char readbuf[1024];
   18.24 +
   18.25 +/* Helpers, implemented at the bottom. */
   18.26 +u32 getipaddr(const char *buff, unsigned int len);
   18.27 +u16 antous(const char *buff, int len);
   18.28 +u64 antoull(const char *buff, int len);
   18.29 +int anton(const char *buff, int len);
   18.30 +
   18.31 +static int vfr_read_proc(char *page, char **start, off_t off,
   18.32 +                         int count, int *eof, void *data)
   18.33 +{   
   18.34 +    strcpy(page, readbuf);
   18.35 +    *readbuf = '\0';
   18.36 +    *eof = 1;
   18.37 +    *start = page;
   18.38 +    return strlen(page);
   18.39 +}
   18.40 +
   18.41 +/* The format for the vfr interface is as follows:
   18.42 + *
   18.43 + *  COMMAND <field>=<val> [<field>=<val> [...]]
   18.44 + *
   18.45 + *  where:
   18.46 + *
   18.47 + *  COMMAND = { ACCEPT | COUNT }
   18.48 + *
   18.49 + *  field=val pairs are as follows:
   18.50 + *
   18.51 + *  field = { srcaddr | dstaddr }
   18.52 + *      val is a dot seperated, numeric IP address.
   18.53 + *
   18.54 + *  field = { srcport | dstport }
   18.55 + *      val is a (16-bit) unsigned int
   18.56 + *
   18.57 + *  field = { proto }
   18.58 + *      val = { IP | TCP | UDP | ARP }
   18.59 + *
   18.60 + */
   18.61 +
   18.62 +#define isspace(_x) ( ((_x)==' ')  || ((_x)=='\t') || ((_x)=='\v') || \
   18.63 +		      ((_x)=='\f') || ((_x)=='\r') || ((_x)=='\n') )
   18.64 +
   18.65 +static int vfr_write_proc(struct file *file, const char *buffer,
   18.66 +                          u_long count, void *data)
   18.67 +{
   18.68 +    network_op_t op;
   18.69 +    int ret, len;
   18.70 +    int ts, te, tl; // token start, end, and length
   18.71 +    int fs, fe, fl; // field.
   18.72 +
   18.73 +    len = count;
   18.74 +    ts = te = 0;
   18.75 +
   18.76 +    memset(&op, 0, sizeof(network_op_t));
   18.77 +
   18.78 +    // get the command:
   18.79 +    while ( count && isspace(buffer[ts]) ) { ts++; count--; } // skip spaces.
   18.80 +    te = ts;
   18.81 +    while ( count && !isspace(buffer[te]) ) { te++; count--; } // command end
   18.82 +    if ( te <= ts ) goto bad;
   18.83 +    tl = te - ts;
   18.84 +  
   18.85 +    if ( strncmp(&buffer[ts], "ADD", tl) == 0 )
   18.86 +    {
   18.87 +        op.cmd = NETWORK_OP_ADDRULE;
   18.88 +    }
   18.89 +    else if ( strncmp(&buffer[ts], "DELETE", tl) == 0 )
   18.90 +    {
   18.91 +        op.cmd = NETWORK_OP_DELETERULE;
   18.92 +    }
   18.93 +    else if ( strncmp(&buffer[ts], "PRINT", tl) == 0 )
   18.94 +    {
   18.95 +        op.cmd = NETWORK_OP_GETRULELIST;
   18.96 +        goto doneparsing;
   18.97 +    }
   18.98 +        
   18.99 +    ts = te;
  18.100 +  
  18.101 +    // get the action
  18.102 +    while ( count && (buffer[ts] == ' ') ) { ts++; count--; } // skip spaces.
  18.103 +    te = ts;
  18.104 +    while ( count && (buffer[te] != ' ') ) { te++; count--; } // command end
  18.105 +    if ( te <= ts ) goto bad;
  18.106 +    tl = te - ts;
  18.107 +
  18.108 +    if ( strncmp(&buffer[ts], "ACCEPT", tl) == 0 ) 
  18.109 +    {
  18.110 +        op.u.net_rule.action = NETWORK_ACTION_ACCEPT;
  18.111 +        goto keyval;
  18.112 +    }
  18.113 +    if ( strncmp(&buffer[ts], "COUNT", tl) == 0 ) 
  18.114 +    {
  18.115 +        op.u.net_rule.action = NETWORK_ACTION_COUNT;
  18.116 +        goto keyval;
  18.117 +    }
  18.118 +   
  18.119 +    // default case;
  18.120 +    return (len);
  18.121 +  
  18.122 +
  18.123 +    // get the key=val pairs.
  18.124 + keyval:
  18.125 +    while (count)
  18.126 +    {
  18.127 +        //get field
  18.128 +        ts = te; while ( count && isspace(buffer[ts]) ) { ts++; count--; }
  18.129 +        te = ts;
  18.130 +        while ( count && !isspace(buffer[te]) && (buffer[te] != '=') ) 
  18.131 +        { te++; count--; }
  18.132 +        if ( te <= ts )
  18.133 +            goto doneparsing;
  18.134 +        tl = te - ts;
  18.135 +        fs = ts; fe = te; fl = tl; // save the field markers.
  18.136 +        // skip "   =   " (ignores extra equals.)
  18.137 +        while ( count && (isspace(buffer[te]) || (buffer[te] == '=')) ) 
  18.138 +        { te++; count--; }
  18.139 +        ts = te;
  18.140 +        while ( count && !isspace(buffer[te]) ) { te++; count--; }
  18.141 +        tl = te - ts;
  18.142 +
  18.143 +        if ( (fl <= 0) || (tl <= 0) ) goto bad;
  18.144 +
  18.145 +        /* NB. Prefix matches must go first! */
  18.146 +        if (strncmp(&buffer[fs], "src", fl) == 0)
  18.147 +        {
  18.148 +            op.u.net_rule.src_dom = VIF_SPECIAL;
  18.149 +            op.u.net_rule.src_idx = VIF_ANY_INTERFACE;
  18.150 +        }
  18.151 +        else if (strncmp(&buffer[fs], "dst", fl) == 0)
  18.152 +        {
  18.153 +            op.u.net_rule.dst_dom = VIF_SPECIAL;
  18.154 +            op.u.net_rule.dst_idx = VIF_PHYSICAL_INTERFACE;
  18.155 +        }
  18.156 +        else if (strncmp(&buffer[fs], "srcaddr", fl) == 0) 
  18.157 +        {  
  18.158 +            op.u.net_rule.src_addr = getipaddr(&buffer[ts], tl);
  18.159 +        }
  18.160 +        else if (strncmp(&buffer[fs], "dstaddr", fl) == 0)
  18.161 +        {    
  18.162 +            op.u.net_rule.dst_addr = getipaddr(&buffer[ts], tl);
  18.163 +        }
  18.164 +        else if (strncmp(&buffer[fs], "srcaddrmask", fl) == 0) 
  18.165 +        {
  18.166 +            op.u.net_rule.src_addr_mask = getipaddr(&buffer[ts], tl);
  18.167 +        }
  18.168 +        else if (strncmp(&buffer[fs], "dstaddrmask", fl) == 0)
  18.169 +        {
  18.170 +            op.u.net_rule.dst_addr_mask = getipaddr(&buffer[ts], tl);
  18.171 +        }
  18.172 +        else if (strncmp(&buffer[fs], "srcport", fl) == 0)
  18.173 +        {
  18.174 +            op.u.net_rule.src_port = antous(&buffer[ts], tl);
  18.175 +        }
  18.176 +        else if (strncmp(&buffer[fs], "dstport", fl) == 0)
  18.177 +        {
  18.178 +            op.u.net_rule.dst_port = antous(&buffer[ts], tl);
  18.179 +        }
  18.180 +        else if (strncmp(&buffer[fs], "srcportmask", fl) == 0)
  18.181 +        {
  18.182 +            op.u.net_rule.src_port_mask = antous(&buffer[ts], tl);
  18.183 +        }
  18.184 +        else if (strncmp(&buffer[fs], "dstportmask", fl) == 0)
  18.185 +        {
  18.186 +            op.u.net_rule.dst_port_mask = antous(&buffer[ts], tl);
  18.187 +        }
  18.188 +        else if (strncmp(&buffer[fs], "srcdom", fl) == 0)
  18.189 +        {
  18.190 +            op.u.net_rule.src_dom = antoull(&buffer[ts], tl);
  18.191 +        }
  18.192 +        else if (strncmp(&buffer[fs], "srcidx", fl) == 0)
  18.193 +        {
  18.194 +            op.u.net_rule.src_idx = anton(&buffer[ts], tl);
  18.195 +        }
  18.196 +        else if (strncmp(&buffer[fs], "dstdom", fl) == 0)
  18.197 +        {
  18.198 +            op.u.net_rule.dst_dom = antoull(&buffer[ts], tl);
  18.199 +        }
  18.200 +        else if (strncmp(&buffer[fs], "dstidx", fl) == 0)
  18.201 +        {
  18.202 +            op.u.net_rule.dst_idx = anton(&buffer[ts], tl);
  18.203 +        }
  18.204 +        else if ( (strncmp(&buffer[fs], "proto", fl) == 0))
  18.205 +        {	
  18.206 +            if (strncmp(&buffer[ts], "any", tl) == 0) 
  18.207 +                op.u.net_rule.proto = NETWORK_PROTO_ANY; 
  18.208 +            if (strncmp(&buffer[ts], "ip", tl) == 0)
  18.209 +                op.u.net_rule.proto = NETWORK_PROTO_IP;
  18.210 +            if (strncmp(&buffer[ts], "tcp", tl) == 0) 
  18.211 +                op.u.net_rule.proto = NETWORK_PROTO_TCP;
  18.212 +            if (strncmp(&buffer[ts], "udp", tl) == 0)
  18.213 +                op.u.net_rule.proto = NETWORK_PROTO_UDP;
  18.214 +            if (strncmp(&buffer[ts], "arp", tl) == 0)
  18.215 +                op.u.net_rule.proto = NETWORK_PROTO_ARP;
  18.216 +        }
  18.217 +    }
  18.218 +
  18.219 + doneparsing:  
  18.220 +    ret = HYPERVISOR_network_op(&op);
  18.221 +    return(len);
  18.222 +
  18.223 + bad:
  18.224 +    return(len);
  18.225 +    
  18.226 +    
  18.227 +}
  18.228 +
  18.229 +static int __init init_module(void)
  18.230 +{
  18.231 +    if ( !(start_info.flags & SIF_PRIVILEGED) )
  18.232 +        return 0;
  18.233 +
  18.234 +    *readbuf = '\0';
  18.235 +    proc_vfr = create_xen_proc_entry("vfr", 0600);
  18.236 +    if ( proc_vfr != NULL )
  18.237 +    {
  18.238 +        proc_vfr->owner      = THIS_MODULE;
  18.239 +        proc_vfr->nlink      = 1;
  18.240 +        proc_vfr->read_proc  = vfr_read_proc;
  18.241 +        proc_vfr->write_proc = vfr_write_proc;
  18.242 +        printk("Successfully installed virtual firewall/router interface\n");
  18.243 +    }
  18.244 +    return 0;
  18.245 +}
  18.246 +
  18.247 +static void __exit cleanup_module(void)
  18.248 +{
  18.249 +    if ( proc_vfr == NULL ) return;
  18.250 +    remove_xen_proc_entry("vfr");
  18.251 +    proc_vfr = NULL;
  18.252 +}
  18.253 +
  18.254 +module_init(init_module);
  18.255 +module_exit(cleanup_module);
  18.256 +
  18.257 +/* Helper functions start here: */
  18.258 +
  18.259 +int anton(const char *buff, int len)
  18.260 +{
  18.261 +    int ret;
  18.262 +    char c;
  18.263 +    int sign = 1;
  18.264 +    
  18.265 +    ret = 0;
  18.266 +
  18.267 +    if (len == 0) return 0;
  18.268 +    if (*buff == '-') { sign = -1; buff++; len--; }
  18.269 +
  18.270 +    while ( (len) && ((c = *buff) >= '0') && (c <= '9') )
  18.271 +    {
  18.272 +        ret *= 10;
  18.273 +        ret += c - '0';
  18.274 +        buff++; len--;
  18.275 +    }
  18.276 +
  18.277 +    ret *= sign;
  18.278 +    return ret;
  18.279 +}
  18.280 +    
  18.281 +u16 antous(const char *buff, int len)
  18.282 +{
  18.283 +    u16 ret;
  18.284 +    char c;
  18.285 +
  18.286 +    ret = 0;
  18.287 +
  18.288 +    while ( (len) && ((c = *buff) >= '0') && (c <= '9') )
  18.289 +    {
  18.290 +        ret *= 10;
  18.291 +        ret += c - '0';
  18.292 +        buff++; len--;
  18.293 +    }
  18.294 +
  18.295 +    return ret;
  18.296 +}
  18.297 +
  18.298 +u64 antoull(const char *buff, int len)
  18.299 +{
  18.300 +    u64 ret;
  18.301 +    char c;
  18.302 +
  18.303 +    ret = 0;
  18.304 +
  18.305 +    while ( (len) && ((c = *buff) >= '0') && (c <= '9') )
  18.306 +    {
  18.307 +        ret *= 10;
  18.308 +        ret += c - '0';
  18.309 +        buff++; len--;
  18.310 +    }
  18.311 +
  18.312 +    return ret;
  18.313 +}
  18.314 +
  18.315 +u32 getipaddr(const char *buff, unsigned int len)
  18.316 +{
  18.317 +    char c;
  18.318 +    u32 ret, val;
  18.319 +
  18.320 +    ret = 0; val = 0;
  18.321 +
  18.322 +    while ( len )
  18.323 +    {
  18.324 +        if (!((((c = *buff) >= '0') && ( c <= '9')) || ( c == '.' ) ) ) 
  18.325 +        {
  18.326 +            return(0); // malformed.
  18.327 +        }
  18.328 +
  18.329 +        if ( c == '.' ) {
  18.330 +            if (val > 255) return (0); //malformed.
  18.331 +            ret = ret << 8; 
  18.332 +            ret += val;
  18.333 +            val = 0;
  18.334 +            len--; buff++;
  18.335 +            continue;
  18.336 +        }
  18.337 +        val *= 10;
  18.338 +        val += c - '0';
  18.339 +        buff++; len--;
  18.340 +    }
  18.341 +    ret = ret << 8;
  18.342 +    ret += val;
  18.343 +
  18.344 +    return (ret);
  18.345 +}
  18.346 +
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/evtchn/Makefile	Tue Mar 23 10:40:28 2004 +0000
    19.3 @@ -0,0 +1,3 @@
    19.4 +O_TARGET := drv.o
    19.5 +obj-y := evtchn.o
    19.6 +include $(TOPDIR)/Rules.make
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/evtchn/evtchn.c	Tue Mar 23 10:40:28 2004 +0000
    20.3 @@ -0,0 +1,481 @@
    20.4 +/******************************************************************************
    20.5 + * evtchn.c
    20.6 + * 
    20.7 + * Xenolinux driver for receiving and demuxing event-channel signals.
    20.8 + * 
    20.9 + * Copyright (c) 2004, K A Fraser
   20.10 + */
   20.11 +
   20.12 +#include <linux/config.h>
   20.13 +#include <linux/module.h>
   20.14 +#include <linux/kernel.h>
   20.15 +#include <linux/sched.h>
   20.16 +#include <linux/slab.h>
   20.17 +#include <linux/string.h>
   20.18 +#include <linux/errno.h>
   20.19 +#include <linux/fs.h>
   20.20 +#include <linux/errno.h>
   20.21 +#include <linux/miscdevice.h>
   20.22 +#include <linux/major.h>
   20.23 +#include <linux/proc_fs.h>
   20.24 +#include <linux/devfs_fs_kernel.h>
   20.25 +#include <linux/stat.h>
   20.26 +#include <linux/poll.h>
   20.27 +#include <linux/irq.h>
   20.28 +#include <asm/evtchn.h>
   20.29 +
   20.30 +/* NB. This must be shared amongst drivers if more things go in /dev/xen */
   20.31 +static devfs_handle_t xen_dev_dir;
   20.32 +
   20.33 +/* Only one process may open /dev/xen/evtchn at any time. */
   20.34 +static unsigned long evtchn_dev_inuse;
   20.35 +
   20.36 +/* Notification ring, accessed via /dev/xen/evtchn. */
   20.37 +#define RING_SIZE     2048  /* 2048 16-bit entries */
   20.38 +#define RING_MASK(_i) ((_i)&(RING_SIZE-1))
   20.39 +static u16 *ring;
   20.40 +static unsigned int ring_cons, ring_prod, ring_overflow;
   20.41 +
   20.42 +/* Processes wait on this queue via /dev/xen/evtchn when ring is empty. */
   20.43 +static DECLARE_WAIT_QUEUE_HEAD(evtchn_wait);
   20.44 +static struct fasync_struct *evtchn_async_queue;
   20.45 +
   20.46 +static evtchn_receiver_t rx_fns[1024];
   20.47 +
   20.48 +static u32 pend_outstanding[32];
   20.49 +static u32 disc_outstanding[32];
   20.50 +
   20.51 +static spinlock_t lock;
   20.52 +
   20.53 +int evtchn_request_port(unsigned int port, evtchn_receiver_t rx_fn)
   20.54 +{
   20.55 +    unsigned long flags;
   20.56 +    int rc;
   20.57 +
   20.58 +    spin_lock_irqsave(&lock, flags);
   20.59 +
   20.60 +    if ( rx_fns[port] != NULL )
   20.61 +    {
   20.62 +        printk(KERN_ALERT "Event channel port %d already in use.\n", port);
   20.63 +        rc = -EINVAL;
   20.64 +    }
   20.65 +    else
   20.66 +    {
   20.67 +        rx_fns[port] = rx_fn;
   20.68 +        rc = 0;
   20.69 +    }
   20.70 +
   20.71 +    spin_unlock_irqrestore(&lock, flags);
   20.72 +
   20.73 +    return rc;
   20.74 +}
   20.75 +
   20.76 +int evtchn_free_port(unsigned int port)
   20.77 +{
   20.78 +    unsigned long flags;
   20.79 +    int rc;
   20.80 +
   20.81 +    spin_lock_irqsave(&lock, flags);
   20.82 +
   20.83 +    if ( rx_fns[port] == NULL )
   20.84 +    {
   20.85 +        printk(KERN_ALERT "Event channel port %d not in use.\n", port);
   20.86 +        rc = -EINVAL;
   20.87 +    }
   20.88 +    else
   20.89 +    {
   20.90 +        rx_fns[port] = NULL;
   20.91 +        rc = 0;
   20.92 +    }
   20.93 +
   20.94 +    spin_unlock_irqrestore(&lock, flags);
   20.95 +
   20.96 +    return rc;
   20.97 +}
   20.98 +
   20.99 +/*
  20.100 + * NB. Clearing port can race a notification from remote end. Caller must
  20.101 + * therefore recheck notification status on return to avoid missing events.
  20.102 + */
  20.103 +void evtchn_clear_port(unsigned int port)
  20.104 +{
  20.105 +    unsigned int p = port & PORTIDX_MASK;
  20.106 +    unsigned long flags;
  20.107 +
  20.108 +    spin_lock_irqsave(&lock, flags);
  20.109 +
  20.110 +    if ( unlikely(port & PORT_DISCONNECT) )
  20.111 +    {
  20.112 +        clear_bit(p, &disc_outstanding[0]);
  20.113 +        clear_bit(p, &HYPERVISOR_shared_info->event_channel_disc[0]);
  20.114 +    }
  20.115 +    else
  20.116 +    {
  20.117 +        clear_bit(p, &pend_outstanding[0]);
  20.118 +        clear_bit(p, &HYPERVISOR_shared_info->event_channel_pend[0]);
  20.119 +    }
  20.120 +
  20.121 +    spin_unlock_irqrestore(&lock, flags);
  20.122 +}
  20.123 +
  20.124 +static inline void process_bitmask(u32 *sel, 
  20.125 +                                   u32 *mask,
  20.126 +                                   u32 *outstanding,
  20.127 +                                   unsigned int port_subtype)
  20.128 +{
  20.129 +    unsigned long l1, l2;
  20.130 +    unsigned int  l1_idx, l2_idx, port;
  20.131 +
  20.132 +    l1 = xchg(sel, 0);
  20.133 +    while ( (l1_idx = ffs(l1)) != 0 )
  20.134 +    {
  20.135 +        l1_idx--;
  20.136 +        l1 &= ~(1 << l1_idx);
  20.137 +
  20.138 +        l2 = mask[l1_idx] & ~outstanding[l1_idx];
  20.139 +        outstanding[l1_idx] |= l2;
  20.140 +        while ( (l2_idx = ffs(l2)) != 0 )
  20.141 +        {
  20.142 +            l2_idx--;
  20.143 +            l2 &= ~(1 << l2_idx);
  20.144 +
  20.145 +            port = (l1_idx * 32) + l2_idx;
  20.146 +            if ( rx_fns[port] != NULL )
  20.147 +            {
  20.148 +                (*rx_fns[port])(port | port_subtype);
  20.149 +            }
  20.150 +            else if ( ring != NULL )
  20.151 +            {
  20.152 +                if ( (ring_prod - ring_cons) < RING_SIZE )
  20.153 +                {
  20.154 +                    ring[RING_MASK(ring_prod)] = (u16)(port | port_subtype);
  20.155 +                    if ( ring_cons == ring_prod++ )
  20.156 +                    {
  20.157 +                        wake_up_interruptible(&evtchn_wait);
  20.158 +                        kill_fasync(&evtchn_async_queue, SIGIO, POLL_IN);
  20.159 +                    }
  20.160 +                }
  20.161 +                else
  20.162 +                {
  20.163 +                    ring_overflow = 1;
  20.164 +                }
  20.165 +            }
  20.166 +        }
  20.167 +    }
  20.168 +}
  20.169 +
  20.170 +static void evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  20.171 +{
  20.172 +    shared_info_t *si = HYPERVISOR_shared_info;
  20.173 +    unsigned long flags;
  20.174 +
  20.175 +    spin_lock_irqsave(&lock, flags);
  20.176 +
  20.177 +    process_bitmask(&si->event_channel_pend_sel, 
  20.178 +                    &si->event_channel_pend[0],
  20.179 +                    &pend_outstanding[0],
  20.180 +                    PORT_NORMAL);
  20.181 +        
  20.182 +    process_bitmask(&si->event_channel_disc_sel,
  20.183 +                    &si->event_channel_disc[0],
  20.184 +                    &disc_outstanding[0],
  20.185 +                    PORT_DISCONNECT);
  20.186 +        
  20.187 +    spin_unlock_irqrestore(&lock, flags);
  20.188 +}
  20.189 +
  20.190 +static void __evtchn_reset_buffer_ring(void)
  20.191 +{
  20.192 +    u32          m;
  20.193 +    unsigned int i, j;
  20.194 +
  20.195 +    /* Initialise the ring with currently outstanding notifications. */
  20.196 +    ring_cons = ring_prod = ring_overflow = 0;
  20.197 +
  20.198 +    for ( i = 0; i < 32; i++ )
  20.199 +    {
  20.200 +        m = pend_outstanding[i];
  20.201 +        while ( (j = ffs(m)) != 0 )
  20.202 +        {
  20.203 +            m &= ~(1 << --j);
  20.204 +            if ( rx_fns[(i * 32) + j] == NULL )
  20.205 +                ring[ring_prod++] = (u16)(((i * 32) + j) | PORT_NORMAL);
  20.206 +        }
  20.207 +
  20.208 +        m = disc_outstanding[i];
  20.209 +        while ( (j = ffs(m)) != 0 )
  20.210 +        {
  20.211 +            m &= ~(1 << --j);
  20.212 +            if ( rx_fns[(i * 32) + j] == NULL )
  20.213 +                ring[ring_prod++] = (u16)(((i * 32) + j) | PORT_DISCONNECT);
  20.214 +        }
  20.215 +    }
  20.216 +}
  20.217 +
  20.218 +static ssize_t evtchn_read(struct file *file, char *buf,
  20.219 +                           size_t count, loff_t *ppos)
  20.220 +{
  20.221 +    int rc;
  20.222 +    unsigned int c, p, bytes1 = 0, bytes2 = 0;
  20.223 +    DECLARE_WAITQUEUE(wait, current);
  20.224 +
  20.225 +    add_wait_queue(&evtchn_wait, &wait);
  20.226 +
  20.227 +    count &= ~1; /* even number of bytes */
  20.228 +
  20.229 +    if ( count == 0 )
  20.230 +    {
  20.231 +        rc = 0;
  20.232 +        goto out;
  20.233 +    }
  20.234 +
  20.235 +    if ( count > PAGE_SIZE )
  20.236 +        count = PAGE_SIZE;
  20.237 +
  20.238 +    for ( ; ; )
  20.239 +    {
  20.240 +        set_current_state(TASK_INTERRUPTIBLE);
  20.241 +
  20.242 +        if ( (c = ring_cons) != (p = ring_prod) )
  20.243 +            break;
  20.244 +
  20.245 +        if ( ring_overflow )
  20.246 +        {
  20.247 +            rc = -EFBIG;
  20.248 +            goto out;
  20.249 +        }
  20.250 +
  20.251 +        if ( file->f_flags & O_NONBLOCK )
  20.252 +        {
  20.253 +            rc = -EAGAIN;
  20.254 +            goto out;
  20.255 +        }
  20.256 +
  20.257 +        if ( signal_pending(current) )
  20.258 +        {
  20.259 +            rc = -ERESTARTSYS;
  20.260 +            goto out;
  20.261 +        }
  20.262 +
  20.263 +        schedule();
  20.264 +    }
  20.265 +
  20.266 +    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  20.267 +    if ( ((c ^ p) & RING_SIZE) != 0 )
  20.268 +    {
  20.269 +        bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
  20.270 +        bytes2 = RING_MASK(p) * sizeof(u16);
  20.271 +    }
  20.272 +    else
  20.273 +    {
  20.274 +        bytes1 = (p - c) * sizeof(u16);
  20.275 +        bytes2 = 0;
  20.276 +    }
  20.277 +
  20.278 +    /* Truncate chunks according to caller's maximum byte count. */
  20.279 +    if ( bytes1 > count )
  20.280 +    {
  20.281 +        bytes1 = count;
  20.282 +        bytes2 = 0;
  20.283 +    }
  20.284 +    else if ( (bytes1 + bytes2) > count )
  20.285 +    {
  20.286 +        bytes2 = count - bytes1;
  20.287 +    }
  20.288 +
  20.289 +    if ( copy_to_user(buf, &ring[RING_MASK(c)], bytes1) ||
  20.290 +         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &ring[0], bytes2)) )
  20.291 +    {
  20.292 +        rc = -EFAULT;
  20.293 +        goto out;
  20.294 +    }
  20.295 +
  20.296 +    ring_cons += (bytes1 + bytes2) / sizeof(u16);
  20.297 +
  20.298 +    rc = bytes1 + bytes2;
  20.299 +
  20.300 + out:
  20.301 +    __set_current_state(TASK_RUNNING);
  20.302 +    remove_wait_queue(&evtchn_wait, &wait);
  20.303 +    return rc;
  20.304 +}
  20.305 +
  20.306 +static ssize_t evtchn_write(struct file *file, const char *buf,
  20.307 +                            size_t count, loff_t *ppos)
  20.308 +{
  20.309 +    int  rc, i;
  20.310 +    u16 *kbuf = (u16 *)get_free_page(GFP_KERNEL);
  20.311 +
  20.312 +    if ( kbuf == NULL )
  20.313 +        return -ENOMEM;
  20.314 +
  20.315 +    count &= ~1; /* even number of bytes */
  20.316 +
  20.317 +    if ( count == 0 )
  20.318 +    {
  20.319 +        rc = 0;
  20.320 +        goto out;
  20.321 +    }
  20.322 +
  20.323 +    if ( count > PAGE_SIZE )
  20.324 +        count = PAGE_SIZE;
  20.325 +
  20.326 +    if ( copy_from_user(kbuf, buf, count) != 0 )
  20.327 +    {
  20.328 +        rc = -EFAULT;
  20.329 +        goto out;
  20.330 +    }
  20.331 +
  20.332 +    for ( i = 0; i < (count/2); i++ )
  20.333 +        evtchn_clear_port(kbuf[i]);
  20.334 +
  20.335 +    rc = count;
  20.336 +
  20.337 + out:
  20.338 +    free_page((unsigned long)kbuf);
  20.339 +    return rc;
  20.340 +}
  20.341 +
  20.342 +static int evtchn_ioctl(struct inode *inode, struct file *file,
  20.343 +                        unsigned int cmd, unsigned long arg)
  20.344 +{
  20.345 +    if ( cmd != EVTCHN_RESET )
  20.346 +        return -EINVAL;
  20.347 +
  20.348 +    spin_lock_irq(&lock);
  20.349 +    __evtchn_reset_buffer_ring();
  20.350 +    spin_unlock_irq(&lock);   
  20.351 +
  20.352 +    return 0;
  20.353 +}
  20.354 +
  20.355 +static unsigned int evtchn_poll(struct file *file, poll_table *wait)
  20.356 +{
  20.357 +    unsigned int mask = POLLOUT | POLLWRNORM;
  20.358 +    poll_wait(file, &evtchn_wait, wait);
  20.359 +    if ( ring_cons != ring_prod )
  20.360 +        mask |= POLLIN | POLLRDNORM;
  20.361 +    if ( ring_overflow )
  20.362 +        mask = POLLERR;
  20.363 +    return mask;
  20.364 +}
  20.365 +
  20.366 +static int evtchn_fasync(int fd, struct file *filp, int on)
  20.367 +{
  20.368 +    return fasync_helper(fd, filp, on, &evtchn_async_queue);
  20.369 +}
  20.370 +
  20.371 +static int evtchn_open(struct inode *inode, struct file *filp)
  20.372 +{
  20.373 +    u16 *_ring;
  20.374 +
  20.375 +    if ( test_and_set_bit(0, &evtchn_dev_inuse) )
  20.376 +        return -EBUSY;
  20.377 +
  20.378 +    /* Allocate outside locked region so that we can use GFP_KERNEL. */
  20.379 +    if ( (_ring = (u16 *)get_free_page(GFP_KERNEL)) == NULL )
  20.380 +        return -ENOMEM;
  20.381 +
  20.382 +    spin_lock_irq(&lock);
  20.383 +    ring = _ring;
  20.384 +    __evtchn_reset_buffer_ring();
  20.385 +    spin_unlock_irq(&lock);
  20.386 +
  20.387 +    MOD_INC_USE_COUNT;
  20.388 +
  20.389 +    return 0;
  20.390 +}
  20.391 +
  20.392 +static int evtchn_release(struct inode *inode, struct file *filp)
  20.393 +{
  20.394 +    spin_lock_irq(&lock);
  20.395 +    if ( ring != NULL )
  20.396 +    {
  20.397 +        free_page((unsigned long)ring);
  20.398 +        ring = NULL;
  20.399 +    }
  20.400 +    spin_unlock_irq(&lock);
  20.401 +
  20.402 +    evtchn_dev_inuse = 0;
  20.403 +
  20.404 +    MOD_DEC_USE_COUNT;
  20.405 +
  20.406 +    return 0;
  20.407 +}
  20.408 +
  20.409 +static struct file_operations evtchn_fops = {
  20.410 +    owner:    THIS_MODULE,
  20.411 +    read:     evtchn_read,
  20.412 +    write:    evtchn_write,
  20.413 +    ioctl:    evtchn_ioctl,
  20.414 +    poll:     evtchn_poll,
  20.415 +    fasync:   evtchn_fasync,
  20.416 +    open:     evtchn_open,
  20.417 +    release:  evtchn_release
  20.418 +};
  20.419 +
  20.420 +static struct miscdevice evtchn_miscdev = {
  20.421 +    minor:    EVTCHN_MINOR,
  20.422 +    name:     "evtchn",
  20.423 +    fops:     &evtchn_fops
  20.424 +};
  20.425 +
  20.426 +static int __init init_module(void)
  20.427 +{
  20.428 +    devfs_handle_t symlink_handle;
  20.429 +    int            err, pos;
  20.430 +    char           link_dest[64];
  20.431 +
  20.432 +    /* (DEVFS) create '/dev/misc/evtchn'. */
  20.433 +    err = misc_register(&evtchn_miscdev);
  20.434 +    if ( err != 0 )
  20.435 +    {
  20.436 +        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  20.437 +        return err;
  20.438 +    }
  20.439 +
  20.440 +    /* (DEVFS) create directory '/dev/xen'. */
  20.441 +    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
  20.442 +
  20.443 +    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
  20.444 +    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
  20.445 +                              &link_dest[3], 
  20.446 +                              sizeof(link_dest) - 3);
  20.447 +    if ( pos >= 0 )
  20.448 +        strncpy(&link_dest[pos], "../", 3);
  20.449 +
  20.450 +    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
  20.451 +    (void)devfs_mk_symlink(xen_dev_dir, 
  20.452 +                           "evtchn", 
  20.453 +                           DEVFS_FL_DEFAULT, 
  20.454 +                           &link_dest[pos],
  20.455 +                           &symlink_handle, 
  20.456 +                           NULL);
  20.457 +
  20.458 +    /* (DEVFS) automatically destroy the symlink with its destination. */
  20.459 +    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
  20.460 +
  20.461 +    err = request_irq(HYPEREVENT_IRQ(_EVENT_EVTCHN),
  20.462 +                      evtchn_interrupt, 0, "evtchn", NULL);
  20.463 +    if ( err != 0 )
  20.464 +    {
  20.465 +        printk(KERN_ALERT "Could not allocate evtchn receive interrupt\n");
  20.466 +        return err;
  20.467 +    }
  20.468 +
  20.469 +    /* Kickstart servicing of notifications. */
  20.470 +    evtchn_interrupt(0, NULL, NULL);
  20.471 +
  20.472 +    printk("Event-channel driver installed.\n");
  20.473 +
  20.474 +    return 0;
  20.475 +}
  20.476 +
  20.477 +static void cleanup_module(void)
  20.478 +{
  20.479 +    free_irq(HYPEREVENT_IRQ(_EVENT_EVTCHN), NULL);
  20.480 +    misc_deregister(&evtchn_miscdev);
  20.481 +}
  20.482 +
  20.483 +module_init(init_module);
  20.484 +module_exit(cleanup_module);
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/network/Makefile	Tue Mar 23 10:40:28 2004 +0000
    21.3 @@ -0,0 +1,3 @@
    21.4 +O_TARGET := drv.o
    21.5 +obj-y := network.o
    21.6 +include $(TOPDIR)/Rules.make
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/network/network.c	Tue Mar 23 10:40:28 2004 +0000
    22.3 @@ -0,0 +1,631 @@
    22.4 +/******************************************************************************
    22.5 + * network.c
    22.6 + * 
    22.7 + * Virtual network driver for XenoLinux.
    22.8 + * 
    22.9 + * Copyright (c) 2002-2003, K A Fraser
   22.10 + */
   22.11 +
   22.12 +#include <linux/config.h>
   22.13 +#include <linux/module.h>
   22.14 +
   22.15 +#include <linux/kernel.h>
   22.16 +#include <linux/sched.h>
   22.17 +#include <linux/slab.h>
   22.18 +#include <linux/string.h>
   22.19 +#include <linux/errno.h>
   22.20 +
   22.21 +#include <linux/netdevice.h>
   22.22 +#include <linux/inetdevice.h>
   22.23 +#include <linux/etherdevice.h>
   22.24 +#include <linux/skbuff.h>
   22.25 +#include <linux/init.h>
   22.26 +
   22.27 +#include <asm/io.h>
   22.28 +#include <net/sock.h>
   22.29 +#include <net/pkt_sched.h>
   22.30 +
   22.31 +#define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
   22.32 +
   22.33 +static void network_interrupt(int irq, void *dev_id, struct pt_regs *ptregs);
   22.34 +static void network_tx_buf_gc(struct net_device *dev);
   22.35 +static void network_alloc_rx_buffers(struct net_device *dev);
   22.36 +static void cleanup_module(void);
   22.37 +
   22.38 +static struct list_head dev_list;
   22.39 +
   22.40 +struct net_private
   22.41 +{
   22.42 +    struct list_head list;
   22.43 +    struct net_device *dev;
   22.44 +
   22.45 +    struct net_device_stats stats;
   22.46 +    NET_RING_IDX rx_resp_cons, tx_resp_cons;
   22.47 +    unsigned int net_ring_fixmap_idx, tx_full;
   22.48 +    net_ring_t  *net_ring;
   22.49 +    net_idx_t   *net_idx;
   22.50 +    spinlock_t   tx_lock;
   22.51 +    unsigned int idx; /* Domain-specific index of this VIF. */
   22.52 +
   22.53 +    unsigned int rx_bufs_to_notify;
   22.54 +
   22.55 +#define STATE_ACTIVE    0
   22.56 +#define STATE_SUSPENDED 1
   22.57 +#define STATE_CLOSED    2
   22.58 +    unsigned int state;
   22.59 +
   22.60 +    /*
   22.61 +     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   22.62 +     * array is an index into a chain of free entries.
   22.63 +     */
   22.64 +    struct sk_buff *tx_skbs[XENNET_TX_RING_SIZE+1];
   22.65 +    struct sk_buff *rx_skbs[XENNET_RX_RING_SIZE+1];
   22.66 +};
   22.67 +
   22.68 +/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
   22.69 +#define ADD_ID_TO_FREELIST(_list, _id)             \
   22.70 +    (_list)[(_id)] = (_list)[0];                   \
   22.71 +    (_list)[0]     = (void *)(unsigned long)(_id);
   22.72 +#define GET_ID_FROM_FREELIST(_list)                \
   22.73 + ({ unsigned long _id = (unsigned long)(_list)[0]; \
   22.74 +    (_list)[0]  = (_list)[_id];                    \
   22.75 +    (unsigned short)_id; })
   22.76 +
   22.77 +
   22.78 +static void _dbg_network_int(struct net_device *dev)
   22.79 +{
   22.80 +    struct net_private *np = dev->priv;
   22.81 +
   22.82 +    if ( np->state == STATE_CLOSED )
   22.83 +        return;
   22.84 +    
   22.85 +    printk(KERN_ALERT "net: tx_full=%d, tx_resp_cons=0x%08x,"
   22.86 +           " tx_req_prod=0x%08x\nnet: tx_resp_prod=0x%08x,"
   22.87 +           " tx_event=0x%08x, state=%d\n",
   22.88 +           np->tx_full, np->tx_resp_cons, 
   22.89 +           np->net_idx->tx_req_prod, np->net_idx->tx_resp_prod, 
   22.90 +           np->net_idx->tx_event,
   22.91 +           test_bit(__LINK_STATE_XOFF, &dev->state));
   22.92 +    printk(KERN_ALERT "net: rx_resp_cons=0x%08x,"
   22.93 +           " rx_req_prod=0x%08x\nnet: rx_resp_prod=0x%08x, rx_event=0x%08x\n",
   22.94 +           np->rx_resp_cons, np->net_idx->rx_req_prod,
   22.95 +           np->net_idx->rx_resp_prod, np->net_idx->rx_event);
   22.96 +}
   22.97 +
   22.98 +
   22.99 +static void dbg_network_int(int irq, void *unused, struct pt_regs *ptregs)
  22.100 +{
  22.101 +    struct list_head *ent;
  22.102 +    struct net_private *np;
  22.103 +    list_for_each ( ent, &dev_list )
  22.104 +    {
  22.105 +        np = list_entry(ent, struct net_private, list);
  22.106 +        _dbg_network_int(np->dev);
  22.107 +    }
  22.108 +}
  22.109 +
  22.110 +
  22.111 +static int network_open(struct net_device *dev)
  22.112 +{
  22.113 +    struct net_private *np = dev->priv;
  22.114 +    netop_t netop;
  22.115 +    int i, ret;
  22.116 +
  22.117 +    netop.cmd = NETOP_RESET_RINGS;
  22.118 +    netop.vif = np->idx;
  22.119 +    if ( (ret = HYPERVISOR_net_io_op(&netop)) != 0 )
  22.120 +    {
  22.121 +        printk(KERN_ALERT "Possible net trouble: couldn't reset ring idxs\n");
  22.122 +        return ret;
  22.123 +    }
  22.124 +
  22.125 +    netop.cmd = NETOP_GET_VIF_INFO;
  22.126 +    netop.vif = np->idx;
  22.127 +    if ( (ret = HYPERVISOR_net_io_op(&netop)) != 0 )
  22.128 +    {
  22.129 +        printk(KERN_ALERT "Couldn't get info for vif %d\n", np->idx);
  22.130 +        return ret;
  22.131 +    }
  22.132 +
  22.133 +    memcpy(dev->dev_addr, netop.u.get_vif_info.vmac, ETH_ALEN);
  22.134 +
  22.135 +    set_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx, 
  22.136 +               netop.u.get_vif_info.ring_mfn << PAGE_SHIFT);
  22.137 +    np->net_ring = (net_ring_t *)fix_to_virt(
  22.138 +        FIX_NETRING0_BASE + np->net_ring_fixmap_idx);
  22.139 +    np->net_idx  = &HYPERVISOR_shared_info->net_idx[np->idx];
  22.140 +
  22.141 +    np->rx_bufs_to_notify = 0;
  22.142 +    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  22.143 +    memset(&np->stats, 0, sizeof(np->stats));
  22.144 +    spin_lock_init(&np->tx_lock);
  22.145 +    memset(np->net_ring, 0, sizeof(*np->net_ring));
  22.146 +    memset(np->net_idx, 0, sizeof(*np->net_idx));
  22.147 +
  22.148 +    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
  22.149 +    for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
  22.150 +        np->tx_skbs[i] = (void *)(i+1);
  22.151 +    for ( i = 0; i <= XENNET_RX_RING_SIZE; i++ )
  22.152 +        np->rx_skbs[i] = (void *)(i+1);
  22.153 +
  22.154 +    wmb();
  22.155 +    np->state = STATE_ACTIVE;
  22.156 +
  22.157 +    network_alloc_rx_buffers(dev);
  22.158 +
  22.159 +    netif_start_queue(dev);
  22.160 +
  22.161 +    MOD_INC_USE_COUNT;
  22.162 +
  22.163 +    return 0;
  22.164 +}
  22.165 +
  22.166 +
  22.167 +static void network_tx_buf_gc(struct net_device *dev)
  22.168 +{
  22.169 +    NET_RING_IDX i, prod;
  22.170 +    unsigned short id;
  22.171 +    struct net_private *np = dev->priv;
  22.172 +    struct sk_buff *skb;
  22.173 +    tx_entry_t *tx_ring = np->net_ring->tx_ring;
  22.174 +
  22.175 +    do {
  22.176 +        prod = np->net_idx->tx_resp_prod;
  22.177 +
  22.178 +        for ( i = np->tx_resp_cons; i != prod; i++ )
  22.179 +        {
  22.180 +            id  = tx_ring[MASK_NET_TX_IDX(i)].resp.id;
  22.181 +            skb = np->tx_skbs[id];
  22.182 +            ADD_ID_TO_FREELIST(np->tx_skbs, id);
  22.183 +            dev_kfree_skb_any(skb);
  22.184 +        }
  22.185 +        
  22.186 +        np->tx_resp_cons = prod;
  22.187 +        
  22.188 +        /*
  22.189 +         * Set a new event, then check for race with update of tx_cons. Note
  22.190 +         * that it is essential to schedule a callback, no matter how few
  22.191 +         * buffers are pending. Even if there is space in the transmit ring,
  22.192 +         * higher layers may be blocked because too much data is outstanding:
  22.193 +         * in such cases notification from Xen is likely to be the only kick
  22.194 +         * that we'll get.
  22.195 +         */
  22.196 +        np->net_idx->tx_event = 
  22.197 +            prod + ((np->net_idx->tx_req_prod - prod) >> 1) + 1;
  22.198 +        mb();
  22.199 +    }
  22.200 +    while ( prod != np->net_idx->tx_resp_prod );
  22.201 +
  22.202 +    if ( np->tx_full && 
  22.203 +         ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
  22.204 +    {
  22.205 +        np->tx_full = 0;
  22.206 +        if ( np->state == STATE_ACTIVE )
  22.207 +            netif_wake_queue(dev);
  22.208 +    }
  22.209 +}
  22.210 +
  22.211 +
  22.212 +static inline pte_t *get_ppte(void *addr)
  22.213 +{
  22.214 +    pgd_t *pgd; pmd_t *pmd; pte_t *pte;
  22.215 +    pgd = pgd_offset_k(   (unsigned long)addr);
  22.216 +    pmd = pmd_offset(pgd, (unsigned long)addr);
  22.217 +    pte = pte_offset(pmd, (unsigned long)addr);
  22.218 +    return pte;
  22.219 +}
  22.220 +
  22.221 +
  22.222 +static void network_alloc_rx_buffers(struct net_device *dev)
  22.223 +{
  22.224 +    unsigned short id;
  22.225 +    struct net_private *np = dev->priv;
  22.226 +    struct sk_buff *skb;
  22.227 +    netop_t netop;
  22.228 +    NET_RING_IDX i = np->net_idx->rx_req_prod;
  22.229 +
  22.230 +    if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) || 
  22.231 +         unlikely(np->state != STATE_ACTIVE) )
  22.232 +        return;
  22.233 +
  22.234 +    do {
  22.235 +        skb = dev_alloc_skb(RX_BUF_SIZE);
  22.236 +        if ( unlikely(skb == NULL) )
  22.237 +            break;
  22.238 +
  22.239 +        skb->dev = dev;
  22.240 +
  22.241 +        if ( unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
  22.242 +            panic("alloc_skb needs to provide us page-aligned buffers.");
  22.243 +
  22.244 +        id = GET_ID_FROM_FREELIST(np->rx_skbs);
  22.245 +        np->rx_skbs[id] = skb;
  22.246 +
  22.247 +        np->net_ring->rx_ring[MASK_NET_RX_IDX(i)].req.id   = id;
  22.248 +        np->net_ring->rx_ring[MASK_NET_RX_IDX(i)].req.addr = 
  22.249 +            virt_to_machine(get_ppte(skb->head));
  22.250 +
  22.251 +        np->rx_bufs_to_notify++;
  22.252 +    }
  22.253 +    while ( (++i - np->rx_resp_cons) != XENNET_RX_RING_SIZE );
  22.254 +
  22.255 +    /*
  22.256 +     * We may have allocated buffers which have entries outstanding in the page
  22.257 +     * update queue -- make sure we flush those first!
  22.258 +     */
  22.259 +    flush_page_update_queue();
  22.260 +
  22.261 +    np->net_idx->rx_req_prod = i;
  22.262 +    np->net_idx->rx_event    = np->rx_resp_cons + 1;
  22.263 +        
  22.264 +    /* Batch Xen notifications. */
  22.265 +    if ( np->rx_bufs_to_notify > (XENNET_RX_RING_SIZE/4) )
  22.266 +    {
  22.267 +        netop.cmd = NETOP_PUSH_BUFFERS;
  22.268 +        netop.vif = np->idx;
  22.269 +        (void)HYPERVISOR_net_io_op(&netop);
  22.270 +        np->rx_bufs_to_notify = 0;
  22.271 +    }
  22.272 +}
  22.273 +
  22.274 +
  22.275 +static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
  22.276 +{
  22.277 +    unsigned short id;
  22.278 +    struct net_private *np = (struct net_private *)dev->priv;
  22.279 +    tx_req_entry_t *tx;
  22.280 +    netop_t netop;
  22.281 +    NET_RING_IDX i;
  22.282 +
  22.283 +    if ( unlikely(np->tx_full) )
  22.284 +    {
  22.285 +        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
  22.286 +        netif_stop_queue(dev);
  22.287 +        return -ENOBUFS;
  22.288 +    }
  22.289 +
  22.290 +    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
  22.291 +                  PAGE_SIZE) )
  22.292 +    {
  22.293 +        struct sk_buff *new_skb = dev_alloc_skb(RX_BUF_SIZE);
  22.294 +        if ( unlikely(new_skb == NULL) )
  22.295 +            return 1;
  22.296 +        skb_put(new_skb, skb->len);
  22.297 +        memcpy(new_skb->data, skb->data, skb->len);
  22.298 +        dev_kfree_skb(skb);
  22.299 +        skb = new_skb;
  22.300 +    }   
  22.301 +    
  22.302 +    spin_lock_irq(&np->tx_lock);
  22.303 +
  22.304 +    i = np->net_idx->tx_req_prod;
  22.305 +
  22.306 +    id = GET_ID_FROM_FREELIST(np->tx_skbs);
  22.307 +    np->tx_skbs[id] = skb;
  22.308 +
  22.309 +    tx = &np->net_ring->tx_ring[MASK_NET_TX_IDX(i)].req;
  22.310 +
  22.311 +    tx->id   = id;
  22.312 +    tx->addr = phys_to_machine(virt_to_phys(skb->data));
  22.313 +    tx->size = skb->len;
  22.314 +
  22.315 +    wmb();
  22.316 +    np->net_idx->tx_req_prod = i + 1;
  22.317 +
  22.318 +    network_tx_buf_gc(dev);
  22.319 +
  22.320 +    if ( (i - np->tx_resp_cons) == (XENNET_TX_RING_SIZE - 1) )
  22.321 +    {
  22.322 +        np->tx_full = 1;
  22.323 +        netif_stop_queue(dev);
  22.324 +    }
  22.325 +
  22.326 +    spin_unlock_irq(&np->tx_lock);
  22.327 +
  22.328 +    np->stats.tx_bytes += skb->len;
  22.329 +    np->stats.tx_packets++;
  22.330 +
  22.331 +    /* Only notify Xen if there are no outstanding responses. */
  22.332 +    mb();
  22.333 +    if ( np->net_idx->tx_resp_prod == i )
  22.334 +    {
  22.335 +        netop.cmd = NETOP_PUSH_BUFFERS;
  22.336 +        netop.vif = np->idx;
  22.337 +        (void)HYPERVISOR_net_io_op(&netop);
  22.338 +    }
  22.339 +
  22.340 +    return 0;
  22.341 +}
  22.342 +
  22.343 +
  22.344 +static inline void _network_interrupt(struct net_device *dev)
  22.345 +{
  22.346 +    struct net_private *np = dev->priv;
  22.347 +    unsigned long flags;
  22.348 +    struct sk_buff *skb;
  22.349 +    rx_resp_entry_t *rx;
  22.350 +    NET_RING_IDX i;
  22.351 +
  22.352 +    if ( unlikely(np->state == STATE_CLOSED) )
  22.353 +        return;
  22.354 +    
  22.355 +    spin_lock_irqsave(&np->tx_lock, flags);
  22.356 +    network_tx_buf_gc(dev);
  22.357 +    spin_unlock_irqrestore(&np->tx_lock, flags);
  22.358 +
  22.359 + again:
  22.360 +    for ( i = np->rx_resp_cons; i != np->net_idx->rx_resp_prod; i++ )
  22.361 +    {
  22.362 +        rx = &np->net_ring->rx_ring[MASK_NET_RX_IDX(i)].resp;
  22.363 +
  22.364 +        skb = np->rx_skbs[rx->id];
  22.365 +        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
  22.366 +
  22.367 +        if ( unlikely(rx->status != RING_STATUS_OK) )
  22.368 +        {
  22.369 +            /* Gate this error. We get a (valid) slew of them on suspend. */
  22.370 +            if ( np->state == STATE_ACTIVE )
  22.371 +                printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
  22.372 +            dev_kfree_skb_any(skb);
  22.373 +            continue;
  22.374 +        }
  22.375 +
  22.376 +        /*
  22.377 +         * Set up shinfo -- from alloc_skb This was particularily nasty:  the
  22.378 +         * shared info is hidden at the back of the data area (presumably so it
  22.379 +         * can be shared), but on page flip it gets very spunked.
  22.380 +         */
  22.381 +        atomic_set(&(skb_shinfo(skb)->dataref), 1);
  22.382 +        skb_shinfo(skb)->nr_frags = 0;
  22.383 +        skb_shinfo(skb)->frag_list = NULL;
  22.384 +                                
  22.385 +        phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] =
  22.386 +            (*(unsigned long *)get_ppte(skb->head)) >> PAGE_SHIFT;
  22.387 +
  22.388 +        skb->data = skb->tail = skb->head + rx->offset;
  22.389 +        skb_put(skb, rx->size);
  22.390 +        skb->protocol = eth_type_trans(skb, dev);
  22.391 +
  22.392 +        np->stats.rx_packets++;
  22.393 +
  22.394 +        np->stats.rx_bytes += rx->size;
  22.395 +        netif_rx(skb);
  22.396 +        dev->last_rx = jiffies;
  22.397 +    }
  22.398 +
  22.399 +    np->rx_resp_cons = i;
  22.400 +
  22.401 +    network_alloc_rx_buffers(dev);
  22.402 +    
  22.403 +    /* Deal with hypervisor racing our resetting of rx_event. */
  22.404 +    mb();
  22.405 +    if ( np->net_idx->rx_resp_prod != i )
  22.406 +        goto again;
  22.407 +}
  22.408 +
  22.409 +
  22.410 +static void network_interrupt(int irq, void *unused, struct pt_regs *ptregs)
  22.411 +{
  22.412 +    struct list_head *ent;
  22.413 +    struct net_private *np;
  22.414 +    list_for_each ( ent, &dev_list )
  22.415 +    {
  22.416 +        np = list_entry(ent, struct net_private, list);
  22.417 +        _network_interrupt(np->dev);
  22.418 +    }
  22.419 +}
  22.420 +
  22.421 +
  22.422 +static int network_close(struct net_device *dev)
  22.423 +{
  22.424 +    struct net_private *np = dev->priv;
  22.425 +    netop_t netop;
  22.426 +
  22.427 +    np->state = STATE_SUSPENDED;
  22.428 +    wmb();
  22.429 +
  22.430 +    netif_stop_queue(np->dev);
  22.431 +
  22.432 +    netop.cmd = NETOP_FLUSH_BUFFERS;
  22.433 +    netop.vif = np->idx;
  22.434 +    (void)HYPERVISOR_net_io_op(&netop);
  22.435 +
  22.436 +    while ( (np->rx_resp_cons != np->net_idx->rx_req_prod) ||
  22.437 +            (np->tx_resp_cons != np->net_idx->tx_req_prod) )
  22.438 +    {
  22.439 +        barrier();
  22.440 +        current->state = TASK_INTERRUPTIBLE;
  22.441 +        schedule_timeout(1);
  22.442 +    }
  22.443 +
  22.444 +    wmb();
  22.445 +    np->state = STATE_CLOSED;
  22.446 +    wmb();
  22.447 +
  22.448 +    /* Now no longer safe to take interrupts for this device. */
  22.449 +    clear_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx);
  22.450 +
  22.451 +    MOD_DEC_USE_COUNT;
  22.452 +
  22.453 +    return 0;
  22.454 +}
  22.455 +
  22.456 +
  22.457 +static struct net_device_stats *network_get_stats(struct net_device *dev)
  22.458 +{
  22.459 +    struct net_private *np = (struct net_private *)dev->priv;
  22.460 +    return &np->stats;
  22.461 +}
  22.462 +
  22.463 +
  22.464 +/*
  22.465 + * This notifier is installed for domain 0 only.
  22.466 + * All other domains have VFR rules installed on their behalf by domain 0
  22.467 + * when they are created. For bootstrap, Xen creates wildcard rules for
  22.468 + * domain 0 -- this notifier is used to detect when we find our proper
  22.469 + * IP address, so we can poke down proper rules and remove the wildcards.
  22.470 + */
  22.471 +static int inetdev_notify(struct notifier_block *this, 
  22.472 +                          unsigned long event, 
  22.473 +                          void *ptr)
  22.474 +{
  22.475 +    struct in_ifaddr  *ifa  = (struct in_ifaddr *)ptr; 
  22.476 +    struct net_device *dev = ifa->ifa_dev->dev;
  22.477 +    struct list_head  *ent;
  22.478 +    struct net_private *np;
  22.479 +    int idx = -1;
  22.480 +    network_op_t op;
  22.481 +
  22.482 +    list_for_each ( ent, &dev_list )
  22.483 +    {
  22.484 +        np = list_entry(dev_list.next, struct net_private, list);
  22.485 +        if ( np->dev == dev )
  22.486 +            idx = np->idx;
  22.487 +    }
  22.488 +
  22.489 +    if ( idx == -1 )
  22.490 +        goto out;
  22.491 +    
  22.492 +    memset(&op, 0, sizeof(op));
  22.493 +    op.u.net_rule.proto         = NETWORK_PROTO_ANY;
  22.494 +    op.u.net_rule.action        = NETWORK_ACTION_ACCEPT;
  22.495 +
  22.496 +    if ( event == NETDEV_UP )
  22.497 +        op.cmd = NETWORK_OP_ADDRULE;
  22.498 +    else if ( event == NETDEV_DOWN )
  22.499 +        op.cmd = NETWORK_OP_DELETERULE;
  22.500 +    else
  22.501 +        goto out;
  22.502 +
  22.503 +    op.u.net_rule.src_dom       = 0;
  22.504 +    op.u.net_rule.src_idx       = idx;
  22.505 +    op.u.net_rule.dst_dom       = VIF_SPECIAL;
  22.506 +    op.u.net_rule.dst_idx       = VIF_PHYSICAL_INTERFACE;
  22.507 +    op.u.net_rule.src_addr      = ntohl(ifa->ifa_address);
  22.508 +    op.u.net_rule.src_addr_mask = ~0UL;
  22.509 +    op.u.net_rule.dst_addr      = 0;
  22.510 +    op.u.net_rule.dst_addr_mask = 0;
  22.511 +    (void)HYPERVISOR_network_op(&op);
  22.512 +    
  22.513 +    op.u.net_rule.src_dom       = VIF_SPECIAL;
  22.514 +    op.u.net_rule.src_idx       = VIF_ANY_INTERFACE;
  22.515 +    op.u.net_rule.dst_dom       = 0;
  22.516 +    op.u.net_rule.dst_idx       = idx;
  22.517 +    op.u.net_rule.src_addr      = 0;
  22.518 +    op.u.net_rule.src_addr_mask = 0;    
  22.519 +    op.u.net_rule.dst_addr      = ntohl(ifa->ifa_address);
  22.520 +    op.u.net_rule.dst_addr_mask = ~0UL;
  22.521 +    (void)HYPERVISOR_network_op(&op);
  22.522 +    
  22.523 + out:
  22.524 +    return NOTIFY_DONE;
  22.525 +}
  22.526 +
  22.527 +static struct notifier_block notifier_inetdev = {
  22.528 +    .notifier_call  = inetdev_notify,
  22.529 +    .next           = NULL,
  22.530 +    .priority       = 0
  22.531 +};
  22.532 +
  22.533 +
  22.534 +static int __init init_module(void)
  22.535 +{
  22.536 +    int i, fixmap_idx=-1, err;
  22.537 +    struct net_device *dev;
  22.538 +    struct net_private *np;
  22.539 +    netop_t netop;
  22.540 +
  22.541 +    INIT_LIST_HEAD(&dev_list);
  22.542 +
  22.543 +    /*
  22.544 +     * Domain 0 must poke its own network rules as it discovers its IP
  22.545 +     * addresses. All other domains have a privileged "parent" to do this for
  22.546 +     * them at start of day.
  22.547 +     */
  22.548 +    if ( start_info.flags & SIF_INITDOMAIN )
  22.549 +        (void)register_inetaddr_notifier(&notifier_inetdev);
  22.550 +
  22.551 +    err = request_irq(HYPEREVENT_IRQ(_EVENT_NET), network_interrupt, 
  22.552 +                      SA_SAMPLE_RANDOM, "network", NULL);
  22.553 +    if ( err )
  22.554 +    {
  22.555 +        printk(KERN_WARNING "Could not allocate network interrupt\n");
  22.556 +        goto fail;
  22.557 +    }
  22.558 +    
  22.559 +    err = request_irq(HYPEREVENT_IRQ(_EVENT_DEBUG), dbg_network_int, 
  22.560 +                      SA_SHIRQ, "net_dbg", &dbg_network_int);
  22.561 +    if ( err )
  22.562 +        printk(KERN_WARNING "Non-fatal error -- no debug interrupt\n");
  22.563 +
  22.564 +    for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
  22.565 +    {
  22.566 +        /* If the VIF is invalid then the query hypercall will fail. */
  22.567 +        netop.cmd = NETOP_GET_VIF_INFO;
  22.568 +        netop.vif = i;
  22.569 +        if ( HYPERVISOR_net_io_op(&netop) != 0 )
  22.570 +            continue;
  22.571 +
  22.572 +        /* We actually only support up to 4 vifs right now. */
  22.573 +        if ( ++fixmap_idx == 4 )
  22.574 +            break;
  22.575 +
  22.576 +        dev = alloc_etherdev(sizeof(struct net_private));
  22.577 +        if ( dev == NULL )
  22.578 +        {
  22.579 +            err = -ENOMEM;
  22.580 +            goto fail;
  22.581 +        }
  22.582 +
  22.583 +        np = dev->priv;
  22.584 +        np->state               = STATE_CLOSED;
  22.585 +        np->net_ring_fixmap_idx = fixmap_idx;
  22.586 +        np->idx                 = i;
  22.587 +
  22.588 +        SET_MODULE_OWNER(dev);
  22.589 +        dev->open            = network_open;
  22.590 +        dev->hard_start_xmit = network_start_xmit;
  22.591 +        dev->stop            = network_close;
  22.592 +        dev->get_stats       = network_get_stats;
  22.593 +
  22.594 +        memcpy(dev->dev_addr, netop.u.get_vif_info.vmac, ETH_ALEN);
  22.595 +
  22.596 +        if ( (err = register_netdev(dev)) != 0 )
  22.597 +        {
  22.598 +            kfree(dev);
  22.599 +            goto fail;
  22.600 +        }
  22.601 +
  22.602 +        np->dev = dev;
  22.603 +        list_add(&np->list, &dev_list);
  22.604 +    }
  22.605 +
  22.606 +    return 0;
  22.607 +
  22.608 + fail:
  22.609 +    cleanup_module();
  22.610 +    return err;
  22.611 +}
  22.612 +
  22.613 +
  22.614 +static void cleanup_module(void)
  22.615 +{
  22.616 +    struct net_private *np;
  22.617 +    struct net_device *dev;
  22.618 +
  22.619 +    while ( !list_empty(&dev_list) )
  22.620 +    {
  22.621 +        np = list_entry(dev_list.next, struct net_private, list);
  22.622 +        list_del(&np->list);
  22.623 +        dev = np->dev;
  22.624 +        unregister_netdev(dev);
  22.625 +        kfree(dev);
  22.626 +    }
  22.627 +
  22.628 +    if ( start_info.flags & SIF_INITDOMAIN )
  22.629 +        (void)unregister_inetaddr_notifier(&notifier_inetdev);
  22.630 +}
  22.631 +
  22.632 +
  22.633 +module_init(init_module);
  22.634 +module_exit(cleanup_module);
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/vnetif/Makefile	Tue Mar 23 10:40:28 2004 +0000
    23.3 @@ -0,0 +1,3 @@
    23.4 +O_TARGET := drv.o
    23.5 +obj-y := vnetif.o
    23.6 +include $(TOPDIR)/Rules.make
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/vnetif/vnetif.c	Tue Mar 23 10:40:28 2004 +0000
    24.3 @@ -0,0 +1,553 @@
    24.4 +/******************************************************************************
    24.5 + * vnetif.c
    24.6 + * 
    24.7 + * Virtual network driver for XenoLinux.
    24.8 + * 
    24.9 + * Copyright (c) 2002-2004, K A Fraser
   24.10 + */
   24.11 +
   24.12 +#include <linux/config.h>
   24.13 +#include <linux/module.h>
   24.14 +
   24.15 +#include <linux/kernel.h>
   24.16 +#include <linux/sched.h>
   24.17 +#include <linux/slab.h>
   24.18 +#include <linux/string.h>
   24.19 +#include <linux/errno.h>
   24.20 +
   24.21 +#include <linux/netdevice.h>
   24.22 +#include <linux/inetdevice.h>
   24.23 +#include <linux/etherdevice.h>
   24.24 +#include <linux/skbuff.h>
   24.25 +#include <linux/init.h>
   24.26 +
   24.27 +#include <asm/io.h>
   24.28 +#include <net/sock.h>
   24.29 +#include <net/pkt_sched.h>
   24.30 +
   24.31 +#define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
   24.32 +
   24.33 +static void network_interrupt(int irq, void *dev_id, struct pt_regs *ptregs);
   24.34 +static void network_tx_buf_gc(struct net_device *dev);
   24.35 +static void network_alloc_rx_buffers(struct net_device *dev);
   24.36 +static void cleanup_module(void);
   24.37 +
   24.38 +static struct list_head dev_list;
   24.39 +
   24.40 +struct net_private
   24.41 +{
   24.42 +    struct list_head list;
   24.43 +    struct net_device *dev;
   24.44 +
   24.45 +    struct net_device_stats stats;
   24.46 +    NET_RING_IDX rx_resp_cons, tx_resp_cons;
   24.47 +    unsigned int net_ring_fixmap_idx, tx_full;
   24.48 +    net_ring_t  *net_ring;
   24.49 +    net_idx_t   *net_idx;
   24.50 +    spinlock_t   tx_lock;
   24.51 +    unsigned int idx; /* Domain-specific index of this VIF. */
   24.52 +
   24.53 +    unsigned int rx_bufs_to_notify;
   24.54 +
   24.55 +#define STATE_ACTIVE    0
   24.56 +#define STATE_SUSPENDED 1
   24.57 +#define STATE_CLOSED    2
   24.58 +    unsigned int state;
   24.59 +
   24.60 +    /*
   24.61 +     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   24.62 +     * array is an index into a chain of free entries.
   24.63 +     */
   24.64 +    struct sk_buff *tx_skbs[XENNET_TX_RING_SIZE+1];
   24.65 +    struct sk_buff *rx_skbs[XENNET_RX_RING_SIZE+1];
   24.66 +};
   24.67 +
   24.68 +/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
   24.69 +#define ADD_ID_TO_FREELIST(_list, _id)             \
   24.70 +    (_list)[(_id)] = (_list)[0];                   \
   24.71 +    (_list)[0]     = (void *)(unsigned long)(_id);
   24.72 +#define GET_ID_FROM_FREELIST(_list)                \
   24.73 + ({ unsigned long _id = (unsigned long)(_list)[0]; \
   24.74 +    (_list)[0]  = (_list)[_id];                    \
   24.75 +    (unsigned short)_id; })
   24.76 +
   24.77 +
   24.78 +static void _dbg_network_int(struct net_device *dev)
   24.79 +{
   24.80 +    struct net_private *np = dev->priv;
   24.81 +
   24.82 +    if ( np->state == STATE_CLOSED )
   24.83 +        return;
   24.84 +    
   24.85 +    printk(KERN_ALERT "net: tx_full=%d, tx_resp_cons=0x%08x,"
   24.86 +           " tx_req_prod=0x%08x\nnet: tx_resp_prod=0x%08x,"
   24.87 +           " tx_event=0x%08x, state=%d\n",
   24.88 +           np->tx_full, np->tx_resp_cons, 
   24.89 +           np->net_idx->tx_req_prod, np->net_idx->tx_resp_prod, 
   24.90 +           np->net_idx->tx_event,
   24.91 +           test_bit(__LINK_STATE_XOFF, &dev->state));
   24.92 +    printk(KERN_ALERT "net: rx_resp_cons=0x%08x,"
   24.93 +           " rx_req_prod=0x%08x\nnet: rx_resp_prod=0x%08x, rx_event=0x%08x\n",
   24.94 +           np->rx_resp_cons, np->net_idx->rx_req_prod,
   24.95 +           np->net_idx->rx_resp_prod, np->net_idx->rx_event);
   24.96 +}
   24.97 +
   24.98 +
   24.99 +static void dbg_network_int(int irq, void *unused, struct pt_regs *ptregs)
  24.100 +{
  24.101 +    struct list_head *ent;
  24.102 +    struct net_private *np;
  24.103 +    list_for_each ( ent, &dev_list )
  24.104 +    {
  24.105 +        np = list_entry(ent, struct net_private, list);
  24.106 +        _dbg_network_int(np->dev);
  24.107 +    }
  24.108 +}
  24.109 +
  24.110 +
  24.111 +static int network_open(struct net_device *dev)
  24.112 +{
  24.113 +    struct net_private *np = dev->priv;
  24.114 +    netop_t netop;
  24.115 +    int i, ret;
  24.116 +
  24.117 +    netop.cmd = NETOP_RESET_RINGS;
  24.118 +    netop.vif = np->idx;
  24.119 +    if ( (ret = HYPERVISOR_net_io_op(&netop)) != 0 )
  24.120 +    {
  24.121 +        printk(KERN_ALERT "Possible net trouble: couldn't reset ring idxs\n");
  24.122 +        return ret;
  24.123 +    }
  24.124 +
  24.125 +    netop.cmd = NETOP_GET_VIF_INFO;
  24.126 +    netop.vif = np->idx;
  24.127 +    if ( (ret = HYPERVISOR_net_io_op(&netop)) != 0 )
  24.128 +    {
  24.129 +        printk(KERN_ALERT "Couldn't get info for vif %d\n", np->idx);
  24.130 +        return ret;
  24.131 +    }
  24.132 +
  24.133 +    memcpy(dev->dev_addr, netop.u.get_vif_info.vmac, ETH_ALEN);
  24.134 +
  24.135 +    set_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx, 
  24.136 +               netop.u.get_vif_info.ring_mfn << PAGE_SHIFT);
  24.137 +    np->net_ring = (net_ring_t *)fix_to_virt(
  24.138 +        FIX_NETRING0_BASE + np->net_ring_fixmap_idx);
  24.139 +    np->net_idx  = &HYPERVISOR_shared_info->net_idx[np->idx];
  24.140 +
  24.141 +    np->rx_bufs_to_notify = 0;
  24.142 +    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  24.143 +    memset(&np->stats, 0, sizeof(np->stats));
  24.144 +    spin_lock_init(&np->tx_lock);
  24.145 +    memset(np->net_ring, 0, sizeof(*np->net_ring));
  24.146 +    memset(np->net_idx, 0, sizeof(*np->net_idx));
  24.147 +
  24.148 +    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
  24.149 +    for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
  24.150 +        np->tx_skbs[i] = (void *)(i+1);
  24.151 +    for ( i = 0; i <= XENNET_RX_RING_SIZE; i++ )
  24.152 +        np->rx_skbs[i] = (void *)(i+1);
  24.153 +
  24.154 +    wmb();
  24.155 +    np->state = STATE_ACTIVE;
  24.156 +
  24.157 +    network_alloc_rx_buffers(dev);
  24.158 +
  24.159 +    netif_start_queue(dev);
  24.160 +
  24.161 +    MOD_INC_USE_COUNT;
  24.162 +
  24.163 +    return 0;
  24.164 +}
  24.165 +
  24.166 +
  24.167 +static void network_tx_buf_gc(struct net_device *dev)
  24.168 +{
  24.169 +    NET_RING_IDX i, prod;
  24.170 +    unsigned short id;
  24.171 +    struct net_private *np = dev->priv;
  24.172 +    struct sk_buff *skb;
  24.173 +    tx_entry_t *tx_ring = np->net_ring->tx_ring;
  24.174 +
  24.175 +    do {
  24.176 +        prod = np->net_idx->tx_resp_prod;
  24.177 +
  24.178 +        for ( i = np->tx_resp_cons; i != prod; i++ )
  24.179 +        {
  24.180 +            id  = tx_ring[MASK_NET_TX_IDX(i)].resp.id;
  24.181 +            skb = np->tx_skbs[id];
  24.182 +            ADD_ID_TO_FREELIST(np->tx_skbs, id);
  24.183 +            dev_kfree_skb_any(skb);
  24.184 +        }
  24.185 +        
  24.186 +        np->tx_resp_cons = prod;
  24.187 +        
  24.188 +        /*
  24.189 +         * Set a new event, then check for race with update of tx_cons. Note
  24.190 +         * that it is essential to schedule a callback, no matter how few
  24.191 +         * buffers are pending. Even if there is space in the transmit ring,
  24.192 +         * higher layers may be blocked because too much data is outstanding:
  24.193 +         * in such cases notification from Xen is likely to be the only kick
  24.194 +         * that we'll get.
  24.195 +         */
  24.196 +        np->net_idx->tx_event = 
  24.197 +            prod + ((np->net_idx->tx_req_prod - prod) >> 1) + 1;
  24.198 +        mb();
  24.199 +    }
  24.200 +    while ( prod != np->net_idx->tx_resp_prod );
  24.201 +
  24.202 +    if ( np->tx_full && 
  24.203 +         ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
  24.204 +    {
  24.205 +        np->tx_full = 0;
  24.206 +        if ( np->state == STATE_ACTIVE )
  24.207 +            netif_wake_queue(dev);
  24.208 +    }
  24.209 +}
  24.210 +
  24.211 +
  24.212 +static inline pte_t *get_ppte(void *addr)
  24.213 +{
  24.214 +    pgd_t *pgd; pmd_t *pmd; pte_t *pte;
  24.215 +    pgd = pgd_offset_k(   (unsigned long)addr);
  24.216 +    pmd = pmd_offset(pgd, (unsigned long)addr);
  24.217 +    pte = pte_offset(pmd, (unsigned long)addr);
  24.218 +    return pte;
  24.219 +}
  24.220 +
  24.221 +
  24.222 +static void network_alloc_rx_buffers(struct net_device *dev)
  24.223 +{
  24.224 +    unsigned short id;
  24.225 +    struct net_private *np = dev->priv;
  24.226 +    struct sk_buff *skb;
  24.227 +    netop_t netop;
  24.228 +    NET_RING_IDX i = np->net_idx->rx_req_prod;
  24.229 +
  24.230 +    if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) || 
  24.231 +         unlikely(np->state != STATE_ACTIVE) )
  24.232 +        return;
  24.233 +
  24.234 +    do {
  24.235 +        skb = dev_alloc_skb(RX_BUF_SIZE);
  24.236 +        if ( unlikely(skb == NULL) )
  24.237 +            break;
  24.238 +
  24.239 +        skb->dev = dev;
  24.240 +
  24.241 +        if ( unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
  24.242 +            panic("alloc_skb needs to provide us page-aligned buffers.");
  24.243 +
  24.244 +        id = GET_ID_FROM_FREELIST(np->rx_skbs);
  24.245 +        np->rx_skbs[id] = skb;
  24.246 +
  24.247 +        np->net_ring->rx_ring[MASK_NET_RX_IDX(i)].req.id   = id;
  24.248 +        np->net_ring->rx_ring[MASK_NET_RX_IDX(i)].req.addr = 
  24.249 +            virt_to_machine(get_ppte(skb->head));
  24.250 +
  24.251 +        np->rx_bufs_to_notify++;
  24.252 +    }
  24.253 +    while ( (++i - np->rx_resp_cons) != XENNET_RX_RING_SIZE );
  24.254 +
  24.255 +    /*
  24.256 +     * We may have allocated buffers which have entries outstanding in the page
  24.257 +     * update queue -- make sure we flush those first!
  24.258 +     */
  24.259 +    flush_page_update_queue();
  24.260 +
  24.261 +    np->net_idx->rx_req_prod = i;
  24.262 +    np->net_idx->rx_event    = np->rx_resp_cons + 1;
  24.263 +        
  24.264 +    /* Batch Xen notifications. */
  24.265 +    if ( np->rx_bufs_to_notify > (XENNET_RX_RING_SIZE/4) )
  24.266 +    {
  24.267 +        netop.cmd = NETOP_PUSH_BUFFERS;
  24.268 +        netop.vif = np->idx;
  24.269 +        (void)HYPERVISOR_net_io_op(&netop);
  24.270 +        np->rx_bufs_to_notify = 0;
  24.271 +    }
  24.272 +}
  24.273 +
  24.274 +
  24.275 +static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
  24.276 +{
  24.277 +    unsigned short id;
  24.278 +    struct net_private *np = (struct net_private *)dev->priv;
  24.279 +    tx_req_entry_t *tx;
  24.280 +    netop_t netop;
  24.281 +    NET_RING_IDX i;
  24.282 +
  24.283 +    if ( unlikely(np->tx_full) )
  24.284 +    {
  24.285 +        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
  24.286 +        netif_stop_queue(dev);
  24.287 +        return -ENOBUFS;
  24.288 +    }
  24.289 +
  24.290 +    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
  24.291 +                  PAGE_SIZE) )
  24.292 +    {
  24.293 +        struct sk_buff *new_skb = dev_alloc_skb(RX_BUF_SIZE);
  24.294 +        if ( unlikely(new_skb == NULL) )
  24.295 +            return 1;
  24.296 +        skb_put(new_skb, skb->len);
  24.297 +        memcpy(new_skb->data, skb->data, skb->len);
  24.298 +        dev_kfree_skb(skb);
  24.299 +        skb = new_skb;
  24.300 +    }   
  24.301 +    
  24.302 +    spin_lock_irq(&np->tx_lock);
  24.303 +
  24.304 +    i = np->net_idx->tx_req_prod;
  24.305 +
  24.306 +    id = GET_ID_FROM_FREELIST(np->tx_skbs);
  24.307 +    np->tx_skbs[id] = skb;
  24.308 +
  24.309 +    tx = &np->net_ring->tx_ring[MASK_NET_TX_IDX(i)].req;
  24.310 +
  24.311 +    tx->id   = id;
  24.312 +    tx->addr = phys_to_machine(virt_to_phys(skb->data));
  24.313 +    tx->size = skb->len;
  24.314 +
  24.315 +    wmb();
  24.316 +    np->net_idx->tx_req_prod = i + 1;
  24.317 +
  24.318 +    network_tx_buf_gc(dev);
  24.319 +
  24.320 +    if ( (i - np->tx_resp_cons) == (XENNET_TX_RING_SIZE - 1) )
  24.321 +    {
  24.322 +        np->tx_full = 1;
  24.323 +        netif_stop_queue(dev);
  24.324 +    }
  24.325 +
  24.326 +    spin_unlock_irq(&np->tx_lock);
  24.327 +
  24.328 +    np->stats.tx_bytes += skb->len;
  24.329 +    np->stats.tx_packets++;
  24.330 +
  24.331 +    /* Only notify Xen if there are no outstanding responses. */
  24.332 +    mb();
  24.333 +    if ( np->net_idx->tx_resp_prod == i )
  24.334 +    {
  24.335 +        netop.cmd = NETOP_PUSH_BUFFERS;
  24.336 +        netop.vif = np->idx;
  24.337 +        (void)HYPERVISOR_net_io_op(&netop);
  24.338 +    }
  24.339 +
  24.340 +    return 0;
  24.341 +}
  24.342 +
  24.343 +
  24.344 +static inline void _network_interrupt(struct net_device *dev)
  24.345 +{
  24.346 +    struct net_private *np = dev->priv;
  24.347 +    unsigned long flags;
  24.348 +    struct sk_buff *skb;
  24.349 +    rx_resp_entry_t *rx;
  24.350 +    NET_RING_IDX i;
  24.351 +
  24.352 +    if ( unlikely(np->state == STATE_CLOSED) )
  24.353 +        return;
  24.354 +    
  24.355 +    spin_lock_irqsave(&np->tx_lock, flags);
  24.356 +    network_tx_buf_gc(dev);
  24.357 +    spin_unlock_irqrestore(&np->tx_lock, flags);
  24.358 +
  24.359 + again:
  24.360 +    for ( i = np->rx_resp_cons; i != np->net_idx->rx_resp_prod; i++ )
  24.361 +    {
  24.362 +        rx = &np->net_ring->rx_ring[MASK_NET_RX_IDX(i)].resp;
  24.363 +
  24.364 +        skb = np->rx_skbs[rx->id];
  24.365 +        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
  24.366 +
  24.367 +        if ( unlikely(rx->status != RING_STATUS_OK) )
  24.368 +        {
  24.369 +            /* Gate this error. We get a (valid) slew of them on suspend. */
  24.370 +            if ( np->state == STATE_ACTIVE )
  24.371 +                printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
  24.372 +            dev_kfree_skb_any(skb);
  24.373 +            continue;
  24.374 +        }
  24.375 +
  24.376 +        /*
  24.377 +         * Set up shinfo -- from alloc_skb This was particularily nasty:  the
  24.378 +         * shared info is hidden at the back of the data area (presumably so it
  24.379 +         * can be shared), but on page flip it gets very spunked.
  24.380 +         */
  24.381 +        atomic_set(&(skb_shinfo(skb)->dataref), 1);
  24.382 +        skb_shinfo(skb)->nr_frags = 0;
  24.383 +        skb_shinfo(skb)->frag_list = NULL;
  24.384 +                                
  24.385 +        phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] =
  24.386 +            (*(unsigned long *)get_ppte(skb->head)) >> PAGE_SHIFT;
  24.387 +
  24.388 +        skb->data = skb->tail = skb->head + rx->offset;
  24.389 +        skb_put(skb, rx->size);
  24.390 +        skb->protocol = eth_type_trans(skb, dev);
  24.391 +
  24.392 +        np->stats.rx_packets++;
  24.393 +
  24.394 +        np->stats.rx_bytes += rx->size;
  24.395 +        netif_rx(skb);
  24.396 +        dev->last_rx = jiffies;
  24.397 +    }
  24.398 +
  24.399 +    np->rx_resp_cons = i;
  24.400 +
  24.401 +    network_alloc_rx_buffers(dev);
  24.402 +    
  24.403 +    /* Deal with hypervisor racing our resetting of rx_event. */
  24.404 +    mb();
  24.405 +    if ( np->net_idx->rx_resp_prod != i )
  24.406 +        goto again;
  24.407 +}
  24.408 +
  24.409 +
  24.410 +static void network_interrupt(int irq, void *unused, struct pt_regs *ptregs)
  24.411 +{
  24.412 +    struct list_head *ent;
  24.413 +    struct net_private *np;
  24.414 +    list_for_each ( ent, &dev_list )
  24.415 +    {
  24.416 +        np = list_entry(ent, struct net_private, list);
  24.417 +        _network_interrupt(np->dev);
  24.418 +    }
  24.419 +}
  24.420 +
  24.421 +
  24.422 +static int network_close(struct net_device *dev)
  24.423 +{
  24.424 +    struct net_private *np = dev->priv;
  24.425 +    netop_t netop;
  24.426 +
  24.427 +    np->state = STATE_SUSPENDED;
  24.428 +    wmb();
  24.429 +
  24.430 +    netif_stop_queue(np->dev);
  24.431 +
  24.432 +    netop.cmd = NETOP_FLUSH_BUFFERS;
  24.433 +    netop.vif = np->idx;
  24.434 +    (void)HYPERVISOR_net_io_op(&netop);
  24.435 +
  24.436 +    while ( (np->rx_resp_cons != np->net_idx->rx_req_prod) ||
  24.437 +            (np->tx_resp_cons != np->net_idx->tx_req_prod) )
  24.438 +    {
  24.439 +        barrier();
  24.440 +        current->state = TASK_INTERRUPTIBLE;
  24.441 +        schedule_timeout(1);
  24.442 +    }
  24.443 +
  24.444 +    wmb();
  24.445 +    np->state = STATE_CLOSED;
  24.446 +    wmb();
  24.447 +
  24.448 +    /* Now no longer safe to take interrupts for this device. */
  24.449 +    clear_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx);
  24.450 +
  24.451 +    MOD_DEC_USE_COUNT;
  24.452 +
  24.453 +    return 0;
  24.454 +}
  24.455 +
  24.456 +
  24.457 +static struct net_device_stats *network_get_stats(struct net_device *dev)
  24.458 +{
  24.459 +    struct net_private *np = (struct net_private *)dev->priv;
  24.460 +    return &np->stats;
  24.461 +}
  24.462 +
  24.463 +
  24.464 +static int __init init_module(void)
  24.465 +{
  24.466 +#if 0
  24.467 +    int i, fixmap_idx=-1, err;
  24.468 +    struct net_device *dev;
  24.469 +    struct net_private *np;
  24.470 +    netop_t netop;
  24.471 +
  24.472 +    INIT_LIST_HEAD(&dev_list);
  24.473 +
  24.474 +    err = request_irq(HYPEREVENT_IRQ(_EVENT_NET), network_interrupt, 
  24.475 +                      SA_SAMPLE_RANDOM, "network", NULL);
  24.476 +    if ( err )
  24.477 +    {
  24.478 +        printk(KERN_WARNING "Could not allocate network interrupt\n");
  24.479 +        goto fail;
  24.480 +    }
  24.481 +    
  24.482 +    err = request_irq(HYPEREVENT_IRQ(_EVENT_DEBUG), dbg_network_int, 
  24.483 +                      SA_SHIRQ, "net_dbg", &dbg_network_int);
  24.484 +    if ( err )
  24.485 +        printk(KERN_WARNING "Non-fatal error -- no debug interrupt\n");
  24.486 +
  24.487 +    for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
  24.488 +    {
  24.489 +        /* If the VIF is invalid then the query hypercall will fail. */
  24.490 +        netop.cmd = NETOP_GET_VIF_INFO;
  24.491 +        netop.vif = i;
  24.492 +        if ( HYPERVISOR_net_io_op(&netop) != 0 )
  24.493 +            continue;
  24.494 +
  24.495 +        /* We actually only support up to 4 vifs right now. */
  24.496 +        if ( ++fixmap_idx == 4 )
  24.497 +            break;
  24.498 +
  24.499 +        dev = alloc_etherdev(sizeof(struct net_private));
  24.500 +        if ( dev == NULL )
  24.501 +        {
  24.502 +            err = -ENOMEM;
  24.503 +            goto fail;
  24.504 +        }
  24.505 +
  24.506 +        np = dev->priv;
  24.507 +        np->state               = STATE_CLOSED;
  24.508 +        np->net_ring_fixmap_idx = fixmap_idx;
  24.509 +        np->idx                 = i;
  24.510 +
  24.511 +        SET_MODULE_OWNER(dev);
  24.512 +        dev->open            = network_open;
  24.513 +        dev->hard_start_xmit = network_start_xmit;
  24.514 +        dev->stop            = network_close;
  24.515 +        dev->get_stats       = network_get_stats;
  24.516 +
  24.517 +        memcpy(dev->dev_addr, netop.u.get_vif_info.vmac, ETH_ALEN);
  24.518 +
  24.519 +        if ( (err = register_netdev(dev)) != 0 )
  24.520 +        {
  24.521 +            kfree(dev);
  24.522 +            goto fail;
  24.523 +        }
  24.524 +
  24.525 +        np->dev = dev;
  24.526 +        list_add(&np->list, &dev_list);
  24.527 +    }
  24.528 +
  24.529 +    return 0;
  24.530 +
  24.531 + fail:
  24.532 +    cleanup_module();
  24.533 +    return err;
  24.534 +#endif
  24.535 +    return 0;
  24.536 +}
  24.537 +
  24.538 +
  24.539 +static void cleanup_module(void)
  24.540 +{
  24.541 +    struct net_private *np;
  24.542 +    struct net_device *dev;
  24.543 +
  24.544 +    while ( !list_empty(&dev_list) )
  24.545 +    {
  24.546 +        np = list_entry(dev_list.next, struct net_private, list);
  24.547 +        list_del(&np->list);
  24.548 +        dev = np->dev;
  24.549 +        unregister_netdev(dev);
  24.550 +        kfree(dev);
  24.551 +    }
  24.552 +}
  24.553 +
  24.554 +
  24.555 +module_init(init_module);
  24.556 +module_exit(cleanup_module);
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/Makefile	Tue Mar 23 10:40:28 2004 +0000
    25.3 @@ -0,0 +1,19 @@
    25.4 +
    25.5 +.S.o:
    25.6 +	$(CC) $(AFLAGS) -traditional -c $< -o $*.o
    25.7 +
    25.8 +all: kernel.o head.o init_task.o
    25.9 +
   25.10 +O_TARGET := kernel.o
   25.11 +
   25.12 +export-objs     := i386_ksyms.o 
   25.13 +
   25.14 +obj-y	:= process.o semaphore.o signal.o entry.o traps.o irq.o  \
   25.15 +		ptrace.o ioport.o ldt.o setup.o time.o sys_i386.o \
   25.16 +		i386_ksyms.o i387.o hypervisor.o physirq.o pci-dma.o
   25.17 +
   25.18 +ifdef CONFIG_PCI
   25.19 +obj-y	+= pci-i386.o pci-pc.o pci-irq.o
   25.20 +endif
   25.21 +
   25.22 +include $(TOPDIR)/Rules.make
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S	Tue Mar 23 10:40:28 2004 +0000
    26.3 @@ -0,0 +1,781 @@
    26.4 +/*
    26.5 + *  linux/arch/i386/entry.S
    26.6 + *
    26.7 + *  Copyright (C) 1991, 1992  Linus Torvalds
    26.8 + */
    26.9 +
   26.10 +/*
   26.11 + * entry.S contains the system-call and fault low-level handling routines.
   26.12 + * This also contains the timer-interrupt handler, as well as all interrupts
   26.13 + * and faults that can result in a task-switch.
   26.14 + *
   26.15 + * NOTE: This code handles signal-recognition, which happens every time
   26.16 + * after a timer-interrupt and after each system call.
   26.17 + *
   26.18 + * I changed all the .align's to 4 (16 byte alignment), as that's faster
   26.19 + * on a 486.
   26.20 + *
   26.21 + * Stack layout in 'ret_from_system_call':
   26.22 + * 	ptrace needs to have all regs on the stack.
   26.23 + *	if the order here is changed, it needs to be
   26.24 + *	updated in fork.c:copy_process, signal.c:do_signal,
   26.25 + *	ptrace.c and ptrace.h
   26.26 + *
   26.27 + *	 0(%esp) - %ebx
   26.28 + *	 4(%esp) - %ecx
   26.29 + *	 8(%esp) - %edx
   26.30 + *       C(%esp) - %esi
   26.31 + *	10(%esp) - %edi
   26.32 + *	14(%esp) - %ebp
   26.33 + *	18(%esp) - %eax
   26.34 + *	1C(%esp) - %ds
   26.35 + *	20(%esp) - %es
   26.36 + *	24(%esp) - orig_eax
   26.37 + *	28(%esp) - %eip
   26.38 + *	2C(%esp) - %cs
   26.39 + *	30(%esp) - %eflags
   26.40 + *	34(%esp) - %oldesp
   26.41 + *	38(%esp) - %oldss
   26.42 + *
   26.43 + * "current" is in register %ebx during any slow entries.
   26.44 + */
   26.45 +
   26.46 +#include <linux/config.h>
   26.47 +#include <linux/sys.h>
   26.48 +#include <linux/linkage.h>
   26.49 +#include <asm/segment.h>
   26.50 +#include <asm/smp.h>
   26.51 +
   26.52 +EBX		= 0x00
   26.53 +ECX		= 0x04
   26.54 +EDX		= 0x08
   26.55 +ESI		= 0x0C
   26.56 +EDI		= 0x10
   26.57 +EBP		= 0x14
   26.58 +EAX		= 0x18
   26.59 +DS		= 0x1C
   26.60 +ES		= 0x20
   26.61 +ORIG_EAX	= 0x24
   26.62 +EIP		= 0x28
   26.63 +CS		= 0x2C
   26.64 +EFLAGS		= 0x30
   26.65 +OLDESP		= 0x34
   26.66 +OLDSS		= 0x38
   26.67 +
   26.68 +CF_MASK		= 0x00000001
   26.69 +TF_MASK		= 0x00000100
   26.70 +IF_MASK		= 0x00000200
   26.71 +DF_MASK		= 0x00000400
   26.72 +NT_MASK		= 0x00004000
   26.73 +
   26.74 +/*
   26.75 + * these are offsets into the task-struct.
   26.76 + */
   26.77 +state		=  0
   26.78 +flags		=  4
   26.79 +sigpending	=  8
   26.80 +addr_limit	= 12
   26.81 +exec_domain	= 16
   26.82 +need_resched	= 20
   26.83 +tsk_ptrace	= 24
   26.84 +processor	= 52
   26.85 +
   26.86 +ENOSYS = 38
   26.87 +
   26.88 +
   26.89 +#define SAVE_ALL \
   26.90 +	cld; \
   26.91 +	pushl %es; \
   26.92 +	pushl %ds; \
   26.93 +	pushl %eax; \
   26.94 +	pushl %ebp; \
   26.95 +	pushl %edi; \
   26.96 +	pushl %esi; \
   26.97 +	pushl %edx; \
   26.98 +	pushl %ecx; \
   26.99 +	pushl %ebx; \
  26.100 +	movl $(__KERNEL_DS),%edx; \
  26.101 +	movl %edx,%ds; \
  26.102 +	movl %edx,%es;
  26.103 +
  26.104 +#define RESTORE_ALL	\
  26.105 +	popl %ebx;	\
  26.106 +	popl %ecx;	\
  26.107 +	popl %edx;	\
  26.108 +	popl %esi;	\
  26.109 +	popl %edi;	\
  26.110 +	popl %ebp;	\
  26.111 +	popl %eax;	\
  26.112 +1:	popl %ds;	\
  26.113 +2:	popl %es;	\
  26.114 +	addl $4,%esp;	\
  26.115 +3:	iret;		\
  26.116 +.section .fixup,"ax";	\
  26.117 +4:	movl $0,(%esp);	\
  26.118 +	jmp 1b;		\
  26.119 +5:	movl $0,(%esp);	\
  26.120 +	jmp 2b;		\
  26.121 +6:	pushl %ss;	\
  26.122 +	popl %ds;	\
  26.123 +	pushl %ss;	\
  26.124 +	popl %es;	\
  26.125 +	pushl $11;	\
  26.126 +	call do_exit;	\
  26.127 +.previous;		\
  26.128 +.section __ex_table,"a";\
  26.129 +	.align 4;	\
  26.130 +	.long 1b,4b;	\
  26.131 +	.long 2b,5b;	\
  26.132 +	.long 3b,6b;	\
  26.133 +.previous
  26.134 +
  26.135 +#define GET_CURRENT(reg) \
  26.136 +	movl $-8192, reg; \
  26.137 +	andl %esp, reg
  26.138 +
  26.139 +ENTRY(lcall7)
  26.140 +	pushfl			# We get a different stack layout with call
  26.141 +	pushl %eax		# gates, which has to be cleaned up later..
  26.142 +	SAVE_ALL
  26.143 +	movl EIP(%esp),%eax	# due to call gates, this is eflags, not eip..
  26.144 +	movl CS(%esp),%edx	# this is eip..
  26.145 +	movl EFLAGS(%esp),%ecx	# and this is cs..
  26.146 +	movl %eax,EFLAGS(%esp)	#
  26.147 +	andl $~(NT_MASK|TF_MASK|DF_MASK), %eax
  26.148 +	pushl %eax
  26.149 +	popfl
  26.150 +	movl %edx,EIP(%esp)	# Now we move them to their "normal" places
  26.151 +	movl %ecx,CS(%esp)	#
  26.152 +	movl %esp,%ebx
  26.153 +	pushl %ebx
  26.154 +	andl $-8192,%ebx	# GET_CURRENT
  26.155 +	movl exec_domain(%ebx),%edx	# Get the execution domain
  26.156 +	movl 4(%edx),%edx	# Get the lcall7 handler for the domain
  26.157 +	pushl $0x7
  26.158 +	call *%edx
  26.159 +	addl $4, %esp
  26.160 +	popl %eax
  26.161 +	jmp ret_from_sys_call
  26.162 +
  26.163 +ENTRY(lcall27)
  26.164 +	pushfl			# We get a different stack layout with call
  26.165 +	pushl %eax		# gates, which has to be cleaned up later..
  26.166 +	SAVE_ALL
  26.167 +	movl EIP(%esp),%eax	# due to call gates, this is eflags, not eip..
  26.168 +	movl CS(%esp),%edx	# this is eip..
  26.169 +	movl EFLAGS(%esp),%ecx	# and this is cs..
  26.170 +	movl %eax,EFLAGS(%esp)	#
  26.171 +	andl $~(NT_MASK|TF_MASK|DF_MASK), %eax
  26.172 +	pushl %eax
  26.173 +	popfl
  26.174 +	movl %edx,EIP(%esp)	# Now we move them to their "normal" places
  26.175 +	movl %ecx,CS(%esp)	#
  26.176 +	movl %esp,%ebx
  26.177 +	pushl %ebx
  26.178 +	andl $-8192,%ebx	# GET_CURRENT
  26.179 +	movl exec_domain(%ebx),%edx	# Get the execution domain
  26.180 +	movl 4(%edx),%edx	# Get the lcall7 handler for the domain
  26.181 +	pushl $0x27
  26.182 +	call *%edx
  26.183 +	addl $4, %esp
  26.184 +	popl %eax
  26.185 +	jmp ret_from_sys_call
  26.186 +
  26.187 +ENTRY(ret_from_fork)
  26.188 +	pushl %ebx
  26.189 +	call SYMBOL_NAME(schedule_tail)
  26.190 +	addl $4, %esp
  26.191 +	GET_CURRENT(%ebx)
  26.192 +	testb $0x02,tsk_ptrace(%ebx)	# PT_TRACESYS
  26.193 +	jne tracesys_exit
  26.194 +	jmp	ret_from_sys_call
  26.195 +
  26.196 +/*
  26.197 + * Return to user mode is not as complex as all this looks,
  26.198 + * but we want the default path for a system call return to
  26.199 + * go as quickly as possible which is why some of this is
  26.200 + * less clear than it otherwise should be.
  26.201 + */
  26.202 +ENTRY(system_call)
  26.203 +	pushl %eax			# save orig_eax
  26.204 +	SAVE_ALL
  26.205 +	GET_CURRENT(%ebx)
  26.206 +	testb $0x02,tsk_ptrace(%ebx)	# PT_TRACESYS
  26.207 +	jne tracesys
  26.208 +	cmpl $(NR_syscalls),%eax
  26.209 +	jae badsys
  26.210 +	call *SYMBOL_NAME(sys_call_table)(,%eax,4)
  26.211 +	movl %eax,EAX(%esp)		# save the return value
  26.212 +ENTRY(ret_from_sys_call)
  26.213 +        movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
  26.214 +        btrl $EVENTS_MASTER_ENABLE_BIT,4(%esi) # make tests atomic
  26.215 +ret_syscall_tests:
  26.216 +	cmpl $0,need_resched(%ebx)
  26.217 +	jne reschedule
  26.218 +	cmpl $0,sigpending(%ebx)
  26.219 +	je   safesti                    # ensure need_resched updates are seen
  26.220 +signal_return:
  26.221 +	btsl $EVENTS_MASTER_ENABLE_BIT,4(%esi) # reenable event callbacks
  26.222 +	movl %esp,%eax
  26.223 +	xorl %edx,%edx
  26.224 +	call SYMBOL_NAME(do_signal)
  26.225 +	jmp  ret_from_sys_call
  26.226 +
  26.227 +	ALIGN
  26.228 +restore_all:
  26.229 +	RESTORE_ALL
  26.230 +
  26.231 +	ALIGN
  26.232 +tracesys:
  26.233 +	movl $-ENOSYS,EAX(%esp)
  26.234 +	call SYMBOL_NAME(syscall_trace)
  26.235 +	movl ORIG_EAX(%esp),%eax
  26.236 +	cmpl $(NR_syscalls),%eax
  26.237 +	jae tracesys_exit
  26.238 +	call *SYMBOL_NAME(sys_call_table)(,%eax,4)
  26.239 +	movl %eax,EAX(%esp)		# save the return value
  26.240 +tracesys_exit:
  26.241 +	call SYMBOL_NAME(syscall_trace)
  26.242 +	jmp ret_from_sys_call
  26.243 +badsys:
  26.244 +	movl $-ENOSYS,EAX(%esp)
  26.245 +	jmp ret_from_sys_call
  26.246 +
  26.247 +	ALIGN
  26.248 +ENTRY(ret_from_intr)
  26.249 +	GET_CURRENT(%ebx)
  26.250 +ret_from_exception:
  26.251 +	movb CS(%esp),%al
  26.252 +	testl $2,%eax
  26.253 +	jne ret_from_sys_call
  26.254 +	jmp restore_all
  26.255 +
  26.256 +	ALIGN
  26.257 +reschedule:
  26.258 +        btsl $EVENTS_MASTER_ENABLE_BIT,4(%esi) # reenable event callbacks
  26.259 +	call SYMBOL_NAME(schedule)             # test
  26.260 +	jmp ret_from_sys_call
  26.261 +
  26.262 +ENTRY(divide_error)
  26.263 +	pushl $0		# no error code
  26.264 +	pushl $ SYMBOL_NAME(do_divide_error)
  26.265 +	ALIGN
  26.266 +error_code:
  26.267 +	pushl %ds
  26.268 +	pushl %eax
  26.269 +	xorl %eax,%eax
  26.270 +	pushl %ebp
  26.271 +	pushl %edi
  26.272 +	pushl %esi
  26.273 +	pushl %edx
  26.274 +	decl %eax			# eax = -1
  26.275 +	pushl %ecx
  26.276 +	pushl %ebx
  26.277 +	GET_CURRENT(%ebx)
  26.278 +	cld
  26.279 +	movl %es,%ecx
  26.280 +	movl ORIG_EAX(%esp), %esi	# get the error code
  26.281 +	movl ES(%esp), %edi		# get the function address
  26.282 +	movl %eax, ORIG_EAX(%esp)
  26.283 +	movl %ecx, ES(%esp)
  26.284 +	movl %esp,%edx
  26.285 +	pushl %esi			# push the error code
  26.286 +	pushl %edx			# push the pt_regs pointer
  26.287 +	movl $(__KERNEL_DS),%edx
  26.288 +	movl %edx,%ds
  26.289 +	movl %edx,%es
  26.290 +	call *%edi
  26.291 +	addl $8,%esp
  26.292 +	jmp ret_from_exception
  26.293 +
  26.294 +# A note on the "critical region" in our callback handler.
  26.295 +# We want to avoid stacking callback handlers due to events occurring
  26.296 +# during handling of the last event. To do this, we keep events disabled
  26.297 +# until we've done all processing. HOWEVER, we must enable events before
  26.298 +# popping the stack frame (can't be done atomically) and so it would still
  26.299 +# be possible to get enough handler activations to overflow the stack.
  26.300 +# Although unlikely, bugs of that kind are hard to track down, so we'd
  26.301 +# like to avoid the possibility.
  26.302 +# So, on entry to the handler we detect whether we interrupted an
  26.303 +# existing activation in its critical region -- if so, we pop the current
  26.304 +# activation and restart the handler using the previous one.
  26.305 +ENTRY(hypervisor_callback)
  26.306 +        pushl %eax
  26.307 +        SAVE_ALL
  26.308 +        GET_CURRENT(%ebx)
  26.309 +        movl EIP(%esp),%eax
  26.310 +        cmpl $scrit,%eax
  26.311 +        jb   11f
  26.312 +        cmpl $ecrit,%eax
  26.313 +        jb   critical_region_fixup
  26.314 +11:     push %esp
  26.315 +        call do_hypervisor_callback
  26.316 +        add  $4,%esp
  26.317 +        movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
  26.318 +        movb CS(%esp),%cl
  26.319 +	test $2,%cl          # slow return to ring 2 or 3
  26.320 +	jne  ret_syscall_tests
  26.321 +safesti:btsl $EVENTS_MASTER_ENABLE_BIT,4(%esi) # reenable event callbacks
  26.322 +scrit:  /**** START OF CRITICAL REGION ****/
  26.323 +        cmpl $0,(%esi)
  26.324 +        jne  14f              # process more events if necessary...
  26.325 +        RESTORE_ALL
  26.326 +14:     btrl $EVENTS_MASTER_ENABLE_BIT,4(%esi)
  26.327 +        jmp  11b
  26.328 +ecrit:  /**** END OF CRITICAL REGION ****/
  26.329 +# [How we do the fixup]. We want to merge the current stack frame with the
  26.330 +# just-interrupted frame. How we do this depends on where in the critical
  26.331 +# region the interrupted handler was executing, and so how many saved
  26.332 +# registers are in each frame. We do this quickly using the lookup table
  26.333 +# 'critical_fixup_table'. For each byte offset in the critical region, it
  26.334 +# provides the number of bytes which have already been popped from the
  26.335 +# interrupted stack frame. 
  26.336 +critical_region_fixup:
  26.337 +        addl $critical_fixup_table-scrit,%eax
  26.338 +        movzbl (%eax),%eax    # %eax contains num bytes popped
  26.339 +        mov  %esp,%esi
  26.340 +        add  %eax,%esi        # %esi points at end of src region
  26.341 +        mov  %esp,%edi
  26.342 +        add  $0x34,%edi       # %edi points at end of dst region
  26.343 +        mov  %eax,%ecx
  26.344 +        shr  $2,%ecx          # convert words to bytes
  26.345 +        je   16f              # skip loop if nothing to copy
  26.346 +15:     subl $4,%esi          # pre-decrementing copy loop
  26.347 +        subl $4,%edi
  26.348 +        movl (%esi),%eax
  26.349 +        movl %eax,(%edi)
  26.350 +        loop 15b
  26.351 +16:     movl %edi,%esp        # final %edi is top of merged stack
  26.352 +        jmp  11b
  26.353 +        
  26.354 +critical_fixup_table:        
  26.355 +        .byte 0x00,0x00,0x00                  # cmpl $0,(%esi)
  26.356 +        .byte 0x00,0x00                       # jne  14f
  26.357 +        .byte 0x00                            # pop  %ebx
  26.358 +        .byte 0x04                            # pop  %ecx
  26.359 +        .byte 0x08                            # pop  %edx
  26.360 +        .byte 0x0c                            # pop  %esi
  26.361 +        .byte 0x10                            # pop  %edi
  26.362 +        .byte 0x14                            # pop  %ebp
  26.363 +        .byte 0x18                            # pop  %eax
  26.364 +        .byte 0x1c                            # pop  %ds
  26.365 +        .byte 0x20                            # pop  %es
  26.366 +        .byte 0x24,0x24,0x24                  # add  $4,%esp
  26.367 +        .byte 0x28                            # iret
  26.368 +        .byte 0x00,0x00,0x00,0x00,0x00        # btrl $31,4(%esi)
  26.369 +        .byte 0x00,0x00                       # jmp  11b
  26.370 +
  26.371 +# Hypervisor uses this for application faults while it executes.
  26.372 +ENTRY(failsafe_callback)
  26.373 +        call SYMBOL_NAME(install_safe_pf_handler)
  26.374 +1:      pop  %ds
  26.375 +2:      pop  %es
  26.376 +3:      pop  %fs
  26.377 +4:      pop  %gs
  26.378 +        call SYMBOL_NAME(install_normal_pf_handler)
  26.379 +5:      iret
  26.380 +.section .fixup,"ax";	\
  26.381 +6:	movl $0,(%esp);	\
  26.382 +	jmp 1b;		\
  26.383 +7:	movl $0,(%esp);	\
  26.384 +	jmp 2b;		\
  26.385 +8:	movl $0,(%esp);	\
  26.386 +	jmp 3b;		\
  26.387 +9:	movl $0,(%esp);	\
  26.388 +	jmp 4b;		\
  26.389 +10:	pushl %ss;	\
  26.390 +	popl %ds;	\
  26.391 +	pushl %ss;	\
  26.392 +	popl %es;	\
  26.393 +	pushl $11;	\
  26.394 +	call do_exit;	\
  26.395 +.previous;		\
  26.396 +.section __ex_table,"a";\
  26.397 +	.align 4;	\
  26.398 +	.long 1b,6b;	\
  26.399 +	.long 2b,7b;	\
  26.400 +	.long 3b,8b;	\
  26.401 +	.long 4b,9b;	\
  26.402 +	.long 5b,10b;	\
  26.403 +.previous
  26.404 +        
  26.405 +ENTRY(coprocessor_error)
  26.406 +	pushl $0
  26.407 +	pushl $ SYMBOL_NAME(do_coprocessor_error)
  26.408 +	jmp error_code
  26.409 +
  26.410 +ENTRY(simd_coprocessor_error)
  26.411 +	pushl $0
  26.412 +	pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
  26.413 +	jmp error_code
  26.414 +
  26.415 +ENTRY(device_not_available)
  26.416 +	pushl $-1		# mark this as an int
  26.417 +	SAVE_ALL
  26.418 +	GET_CURRENT(%ebx)
  26.419 +	call SYMBOL_NAME(math_state_restore)
  26.420 +	jmp ret_from_exception
  26.421 +
  26.422 +ENTRY(debug)
  26.423 +	pushl $0
  26.424 +	pushl $ SYMBOL_NAME(do_debug)
  26.425 +	jmp error_code
  26.426 +
  26.427 +ENTRY(int3)
  26.428 +	pushl $0
  26.429 +	pushl $ SYMBOL_NAME(do_int3)
  26.430 +	jmp error_code
  26.431 +
  26.432 +ENTRY(overflow)
  26.433 +	pushl $0
  26.434 +	pushl $ SYMBOL_NAME(do_overflow)
  26.435 +	jmp error_code
  26.436 +
  26.437 +ENTRY(bounds)
  26.438 +	pushl $0
  26.439 +	pushl $ SYMBOL_NAME(do_bounds)
  26.440 +	jmp error_code
  26.441 +
  26.442 +ENTRY(invalid_op)
  26.443 +	pushl $0
  26.444 +	pushl $ SYMBOL_NAME(do_invalid_op)
  26.445 +	jmp error_code
  26.446 +
  26.447 +ENTRY(coprocessor_segment_overrun)
  26.448 +	pushl $0
  26.449 +	pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
  26.450 +	jmp error_code
  26.451 +
  26.452 +ENTRY(double_fault)
  26.453 +	pushl $ SYMBOL_NAME(do_double_fault)
  26.454 +	jmp error_code
  26.455 +
  26.456 +ENTRY(invalid_TSS)
  26.457 +	pushl $ SYMBOL_NAME(do_invalid_TSS)
  26.458 +	jmp error_code
  26.459 +
  26.460 +ENTRY(segment_not_present)
  26.461 +	pushl $ SYMBOL_NAME(do_segment_not_present)
  26.462 +	jmp error_code
  26.463 +
  26.464 +ENTRY(stack_segment)
  26.465 +	pushl $ SYMBOL_NAME(do_stack_segment)
  26.466 +	jmp error_code
  26.467 +
  26.468 +ENTRY(general_protection)
  26.469 +	pushl $ SYMBOL_NAME(do_general_protection)
  26.470 +	jmp error_code
  26.471 +
  26.472 +ENTRY(alignment_check)
  26.473 +	pushl $ SYMBOL_NAME(do_alignment_check)
  26.474 +	jmp error_code
  26.475 +
  26.476 +# This handler is special, because it gets an extra value on its stack,
  26.477 +# which is the linear faulting address.
  26.478 +#define PAGE_FAULT_STUB(_name1, _name2)                                  \
  26.479 +ENTRY(_name1)                                                            \
  26.480 +	pushl %ds                                                      ; \
  26.481 +	pushl %eax                                                     ; \
  26.482 +	xorl %eax,%eax                                                 ; \
  26.483 +	pushl %ebp                                                     ; \
  26.484 +	pushl %edi                                                     ; \
  26.485 +	pushl %esi                                                     ; \
  26.486 +	pushl %edx                                                     ; \
  26.487 +	decl %eax                      /* eax = -1 */                  ; \
  26.488 +	pushl %ecx                                                     ; \
  26.489 +	pushl %ebx                                                     ; \
  26.490 +	GET_CURRENT(%ebx)                                              ; \
  26.491 +	cld                                                            ; \
  26.492 +	movl %es,%ecx                                                  ; \
  26.493 +	movl ORIG_EAX(%esp), %esi      /* get the error code */        ; \
  26.494 +	movl ES(%esp), %edi            /* get the faulting address */  ; \
  26.495 +	movl %eax, ORIG_EAX(%esp)                                      ; \
  26.496 +	movl %ecx, ES(%esp)                                            ; \
  26.497 +	movl %esp,%edx                                                 ; \
  26.498 +        pushl %edi                     /* push the faulting address */ ; \
  26.499 +	pushl %esi                     /* push the error code */       ; \
  26.500 +	pushl %edx                     /* push the pt_regs pointer */  ; \
  26.501 +	movl $(__KERNEL_DS),%edx                                       ; \
  26.502 +	movl %edx,%ds                                                  ; \
  26.503 +	movl %edx,%es                                                  ; \
  26.504 +	call SYMBOL_NAME(_name2)                                       ; \
  26.505 +	addl $12,%esp                                                  ; \
  26.506 +	jmp ret_from_exception                                         ;
  26.507 +PAGE_FAULT_STUB(page_fault, do_page_fault)
  26.508 +PAGE_FAULT_STUB(safe_page_fault, do_safe_page_fault)
  26.509 +
  26.510 +ENTRY(machine_check)
  26.511 +	pushl $0
  26.512 +	pushl $ SYMBOL_NAME(do_machine_check)
  26.513 +	jmp error_code
  26.514 +
  26.515 +ENTRY(spurious_interrupt_bug)
  26.516 +	pushl $0
  26.517 +	pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
  26.518 +	jmp error_code
  26.519 +
  26.520 +.data
  26.521 +ENTRY(sys_call_table)
  26.522 +	.long SYMBOL_NAME(sys_ni_syscall)	/* 0  -  old "setup()" system call*/
  26.523 +	.long SYMBOL_NAME(sys_exit)
  26.524 +	.long SYMBOL_NAME(sys_fork)
  26.525 +	.long SYMBOL_NAME(sys_read)
  26.526 +	.long SYMBOL_NAME(sys_write)
  26.527 +	.long SYMBOL_NAME(sys_open)		/* 5 */
  26.528 +	.long SYMBOL_NAME(sys_close)
  26.529 +	.long SYMBOL_NAME(sys_waitpid)
  26.530 +	.long SYMBOL_NAME(sys_creat)
  26.531 +	.long SYMBOL_NAME(sys_link)
  26.532 +	.long SYMBOL_NAME(sys_unlink)		/* 10 */
  26.533 +	.long SYMBOL_NAME(sys_execve)
  26.534 +	.long SYMBOL_NAME(sys_chdir)
  26.535 +	.long SYMBOL_NAME(sys_time)
  26.536 +	.long SYMBOL_NAME(sys_mknod)
  26.537 +	.long SYMBOL_NAME(sys_chmod)		/* 15 */
  26.538 +	.long SYMBOL_NAME(sys_lchown16)
  26.539 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old break syscall holder */
  26.540 +	.long SYMBOL_NAME(sys_stat)
  26.541 +	.long SYMBOL_NAME(sys_lseek)
  26.542 +	.long SYMBOL_NAME(sys_getpid)		/* 20 */
  26.543 +	.long SYMBOL_NAME(sys_mount)
  26.544 +	.long SYMBOL_NAME(sys_oldumount)
  26.545 +	.long SYMBOL_NAME(sys_setuid16)
  26.546 +	.long SYMBOL_NAME(sys_getuid16)
  26.547 +	.long SYMBOL_NAME(sys_stime)		/* 25 */
  26.548 +	.long SYMBOL_NAME(sys_ptrace)
  26.549 +	.long SYMBOL_NAME(sys_alarm)
  26.550 +	.long SYMBOL_NAME(sys_fstat)
  26.551 +	.long SYMBOL_NAME(sys_pause)
  26.552 +	.long SYMBOL_NAME(sys_utime)		/* 30 */
  26.553 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old stty syscall holder */
  26.554 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old gtty syscall holder */
  26.555 +	.long SYMBOL_NAME(sys_access)
  26.556 +	.long SYMBOL_NAME(sys_nice)
  26.557 +	.long SYMBOL_NAME(sys_ni_syscall)	/* 35 */		/* old ftime syscall holder */
  26.558 +	.long SYMBOL_NAME(sys_sync)
  26.559 +	.long SYMBOL_NAME(sys_kill)
  26.560 +	.long SYMBOL_NAME(sys_rename)
  26.561 +	.long SYMBOL_NAME(sys_mkdir)
  26.562 +	.long SYMBOL_NAME(sys_rmdir)		/* 40 */
  26.563 +	.long SYMBOL_NAME(sys_dup)
  26.564 +	.long SYMBOL_NAME(sys_pipe)
  26.565 +	.long SYMBOL_NAME(sys_times)
  26.566 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old prof syscall holder */
  26.567 +	.long SYMBOL_NAME(sys_brk)		/* 45 */
  26.568 +	.long SYMBOL_NAME(sys_setgid16)
  26.569 +	.long SYMBOL_NAME(sys_getgid16)
  26.570 +	.long SYMBOL_NAME(sys_signal)
  26.571 +	.long SYMBOL_NAME(sys_geteuid16)
  26.572 +	.long SYMBOL_NAME(sys_getegid16)	/* 50 */
  26.573 +	.long SYMBOL_NAME(sys_acct)
  26.574 +	.long SYMBOL_NAME(sys_umount)					/* recycled never used phys() */
  26.575 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old lock syscall holder */
  26.576 +	.long SYMBOL_NAME(sys_ioctl)
  26.577 +	.long SYMBOL_NAME(sys_fcntl)		/* 55 */
  26.578 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old mpx syscall holder */
  26.579 +	.long SYMBOL_NAME(sys_setpgid)
  26.580 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old ulimit syscall holder */
  26.581 +	.long SYMBOL_NAME(sys_olduname)
  26.582 +	.long SYMBOL_NAME(sys_umask)		/* 60 */
  26.583 +	.long SYMBOL_NAME(sys_chroot)
  26.584 +	.long SYMBOL_NAME(sys_ustat)
  26.585 +	.long SYMBOL_NAME(sys_dup2)
  26.586 +	.long SYMBOL_NAME(sys_getppid)
  26.587 +	.long SYMBOL_NAME(sys_getpgrp)		/* 65 */
  26.588 +	.long SYMBOL_NAME(sys_setsid)
  26.589 +	.long SYMBOL_NAME(sys_sigaction)
  26.590 +	.long SYMBOL_NAME(sys_sgetmask)
  26.591 +	.long SYMBOL_NAME(sys_ssetmask)
  26.592 +	.long SYMBOL_NAME(sys_setreuid16)	/* 70 */
  26.593 +	.long SYMBOL_NAME(sys_setregid16)
  26.594 +	.long SYMBOL_NAME(sys_sigsuspend)
  26.595 +	.long SYMBOL_NAME(sys_sigpending)
  26.596 +	.long SYMBOL_NAME(sys_sethostname)
  26.597 +	.long SYMBOL_NAME(sys_setrlimit)	/* 75 */
  26.598 +	.long SYMBOL_NAME(sys_old_getrlimit)
  26.599 +	.long SYMBOL_NAME(sys_getrusage)
  26.600 +	.long SYMBOL_NAME(sys_gettimeofday)
  26.601 +	.long SYMBOL_NAME(sys_settimeofday)
  26.602 +	.long SYMBOL_NAME(sys_getgroups16)	/* 80 */
  26.603 +	.long SYMBOL_NAME(sys_setgroups16)
  26.604 +	.long SYMBOL_NAME(old_select)
  26.605 +	.long SYMBOL_NAME(sys_symlink)
  26.606 +	.long SYMBOL_NAME(sys_lstat)
  26.607 +	.long SYMBOL_NAME(sys_readlink)		/* 85 */
  26.608 +	.long SYMBOL_NAME(sys_uselib)
  26.609 +	.long SYMBOL_NAME(sys_swapon)
  26.610 +	.long SYMBOL_NAME(sys_reboot)
  26.611 +	.long SYMBOL_NAME(old_readdir)
  26.612 +	.long SYMBOL_NAME(old_mmap)		/* 90 */
  26.613 +	.long SYMBOL_NAME(sys_munmap)
  26.614 +	.long SYMBOL_NAME(sys_truncate)
  26.615 +	.long SYMBOL_NAME(sys_ftruncate)
  26.616 +	.long SYMBOL_NAME(sys_fchmod)
  26.617 +	.long SYMBOL_NAME(sys_fchown16)		/* 95 */
  26.618 +	.long SYMBOL_NAME(sys_getpriority)
  26.619 +	.long SYMBOL_NAME(sys_setpriority)
  26.620 +	.long SYMBOL_NAME(sys_ni_syscall)				/* old profil syscall holder */
  26.621 +	.long SYMBOL_NAME(sys_statfs)
  26.622 +	.long SYMBOL_NAME(sys_fstatfs)		/* 100 */
  26.623 +	.long SYMBOL_NAME(sys_ioperm)
  26.624 +	.long SYMBOL_NAME(sys_socketcall)
  26.625 +	.long SYMBOL_NAME(sys_syslog)
  26.626 +	.long SYMBOL_NAME(sys_setitimer)
  26.627 +	.long SYMBOL_NAME(sys_getitimer)	/* 105 */
  26.628 +	.long SYMBOL_NAME(sys_newstat)
  26.629 +	.long SYMBOL_NAME(sys_newlstat)
  26.630 +	.long SYMBOL_NAME(sys_newfstat)
  26.631 +	.long SYMBOL_NAME(sys_uname)
  26.632 +	.long SYMBOL_NAME(sys_iopl)		/* 110 */
  26.633 +	.long SYMBOL_NAME(sys_vhangup)
  26.634 +	.long SYMBOL_NAME(sys_ni_syscall)	/* old "idle" system call */
  26.635 +	.long SYMBOL_NAME(sys_ni_syscall) /* was VM86 */
  26.636 +	.long SYMBOL_NAME(sys_wait4)
  26.637 +	.long SYMBOL_NAME(sys_swapoff)		/* 115 */
  26.638 +	.long SYMBOL_NAME(sys_sysinfo)
  26.639 +	.long SYMBOL_NAME(sys_ipc)
  26.640 +	.long SYMBOL_NAME(sys_fsync)
  26.641 +	.long SYMBOL_NAME(sys_sigreturn)
  26.642 +	.long SYMBOL_NAME(sys_clone)		/* 120 */
  26.643 +	.long SYMBOL_NAME(sys_setdomainname)
  26.644 +	.long SYMBOL_NAME(sys_newuname)
  26.645 +	.long SYMBOL_NAME(sys_modify_ldt)
  26.646 +	.long SYMBOL_NAME(sys_adjtimex)
  26.647 +	.long SYMBOL_NAME(sys_mprotect)		/* 125 */
  26.648 +	.long SYMBOL_NAME(sys_sigprocmask)
  26.649 +	.long SYMBOL_NAME(sys_create_module)
  26.650 +	.long SYMBOL_NAME(sys_init_module)
  26.651 +	.long SYMBOL_NAME(sys_delete_module)
  26.652 +	.long SYMBOL_NAME(sys_get_kernel_syms)	/* 130 */
  26.653 +	.long SYMBOL_NAME(sys_quotactl)
  26.654 +	.long SYMBOL_NAME(sys_getpgid)
  26.655 +	.long SYMBOL_NAME(sys_fchdir)
  26.656 +	.long SYMBOL_NAME(sys_bdflush)
  26.657 +	.long SYMBOL_NAME(sys_sysfs)		/* 135 */
  26.658 +	.long SYMBOL_NAME(sys_personality)
  26.659 +	.long SYMBOL_NAME(sys_ni_syscall)	/* for afs_syscall */
  26.660 +	.long SYMBOL_NAME(sys_setfsuid16)
  26.661 +	.long SYMBOL_NAME(sys_setfsgid16)
  26.662 +	.long SYMBOL_NAME(sys_llseek)		/* 140 */
  26.663 +	.long SYMBOL_NAME(sys_getdents)
  26.664 +	.long SYMBOL_NAME(sys_select)
  26.665 +	.long SYMBOL_NAME(sys_flock)
  26.666 +	.long SYMBOL_NAME(sys_msync)
  26.667 +	.long SYMBOL_NAME(sys_readv)		/* 145 */
  26.668 +	.long SYMBOL_NAME(sys_writev)
  26.669 +	.long SYMBOL_NAME(sys_getsid)
  26.670 +	.long SYMBOL_NAME(sys_fdatasync)
  26.671 +	.long SYMBOL_NAME(sys_sysctl)
  26.672 +	.long SYMBOL_NAME(sys_mlock)		/* 150 */
  26.673 +	.long SYMBOL_NAME(sys_munlock)
  26.674 +	.long SYMBOL_NAME(sys_mlockall)
  26.675 +	.long SYMBOL_NAME(sys_munlockall)
  26.676 +	.long SYMBOL_NAME(sys_sched_setparam)
  26.677 +	.long SYMBOL_NAME(sys_sched_getparam)   /* 155 */
  26.678 +	.long SYMBOL_NAME(sys_sched_setscheduler)
  26.679 +	.long SYMBOL_NAME(sys_sched_getscheduler)
  26.680 +	.long SYMBOL_NAME(sys_sched_yield)
  26.681 +	.long SYMBOL_NAME(sys_sched_get_priority_max)
  26.682 +	.long SYMBOL_NAME(sys_sched_get_priority_min)  /* 160 */
  26.683 +	.long SYMBOL_NAME(sys_sched_rr_get_interval)
  26.684 +	.long SYMBOL_NAME(sys_nanosleep)
  26.685 +	.long SYMBOL_NAME(sys_mremap)
  26.686 +	.long SYMBOL_NAME(sys_setresuid16)
  26.687 +	.long SYMBOL_NAME(sys_getresuid16)	/* 165 */
  26.688 +	.long SYMBOL_NAME(sys_ni_syscall) /* was VM86 */
  26.689 +	.long SYMBOL_NAME(sys_query_module)
  26.690 +	.long SYMBOL_NAME(sys_poll)
  26.691 +	.long SYMBOL_NAME(sys_nfsservctl)
  26.692 +	.long SYMBOL_NAME(sys_setresgid16)	/* 170 */
  26.693 +	.long SYMBOL_NAME(sys_getresgid16)
  26.694 +	.long SYMBOL_NAME(sys_prctl)
  26.695 +	.long SYMBOL_NAME(sys_rt_sigreturn)
  26.696 +	.long SYMBOL_NAME(sys_rt_sigaction)
  26.697 +	.long SYMBOL_NAME(sys_rt_sigprocmask)	/* 175 */
  26.698 +	.long SYMBOL_NAME(sys_rt_sigpending)
  26.699 +	.long SYMBOL_NAME(sys_rt_sigtimedwait)
  26.700 +	.long SYMBOL_NAME(sys_rt_sigqueueinfo)
  26.701 +	.long SYMBOL_NAME(sys_rt_sigsuspend)
  26.702 +	.long SYMBOL_NAME(sys_pread)		/* 180 */
  26.703 +	.long SYMBOL_NAME(sys_pwrite)
  26.704 +	.long SYMBOL_NAME(sys_chown16)
  26.705 +	.long SYMBOL_NAME(sys_getcwd)
  26.706 +	.long SYMBOL_NAME(sys_capget)
  26.707 +	.long SYMBOL_NAME(sys_capset)           /* 185 */
  26.708 +	.long SYMBOL_NAME(sys_sigaltstack)
  26.709 +	.long SYMBOL_NAME(sys_sendfile)
  26.710 +	.long SYMBOL_NAME(sys_ni_syscall)		/* streams1 */
  26.711 +	.long SYMBOL_NAME(sys_ni_syscall)		/* streams2 */
  26.712 +	.long SYMBOL_NAME(sys_vfork)            /* 190 */
  26.713 +	.long SYMBOL_NAME(sys_getrlimit)
  26.714 +	.long SYMBOL_NAME(sys_mmap2)
  26.715 +	.long SYMBOL_NAME(sys_truncate64)
  26.716 +	.long SYMBOL_NAME(sys_ftruncate64)
  26.717 +	.long SYMBOL_NAME(sys_stat64)		/* 195 */
  26.718 +	.long SYMBOL_NAME(sys_lstat64)
  26.719 +	.long SYMBOL_NAME(sys_fstat64)
  26.720 +	.long SYMBOL_NAME(sys_lchown)
  26.721 +	.long SYMBOL_NAME(sys_getuid)
  26.722 +	.long SYMBOL_NAME(sys_getgid)		/* 200 */
  26.723 +	.long SYMBOL_NAME(sys_geteuid)
  26.724 +	.long SYMBOL_NAME(sys_getegid)
  26.725 +	.long SYMBOL_NAME(sys_setreuid)
  26.726 +	.long SYMBOL_NAME(sys_setregid)
  26.727 +	.long SYMBOL_NAME(sys_getgroups)	/* 205 */
  26.728 +	.long SYMBOL_NAME(sys_setgroups)
  26.729 +	.long SYMBOL_NAME(sys_fchown)
  26.730 +	.long SYMBOL_NAME(sys_setresuid)
  26.731 +	.long SYMBOL_NAME(sys_getresuid)
  26.732 +	.long SYMBOL_NAME(sys_setresgid)	/* 210 */
  26.733 +	.long SYMBOL_NAME(sys_getresgid)
  26.734 +	.long SYMBOL_NAME(sys_chown)
  26.735 +	.long SYMBOL_NAME(sys_setuid)
  26.736 +	.long SYMBOL_NAME(sys_setgid)
  26.737 +	.long SYMBOL_NAME(sys_setfsuid)		/* 215 */
  26.738 +	.long SYMBOL_NAME(sys_setfsgid)
  26.739 +	.long SYMBOL_NAME(sys_pivot_root)
  26.740 +	.long SYMBOL_NAME(sys_mincore)
  26.741 +	.long SYMBOL_NAME(sys_madvise)
  26.742 +	.long SYMBOL_NAME(sys_getdents64)	/* 220 */
  26.743 +	.long SYMBOL_NAME(sys_fcntl64)
  26.744 +	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for TUX */
  26.745 +	.long SYMBOL_NAME(sys_ni_syscall)	/* Reserved for Security */
  26.746 +	.long SYMBOL_NAME(sys_gettid)
  26.747 +	.long SYMBOL_NAME(sys_readahead)	/* 225 */
  26.748 +	.long SYMBOL_NAME(sys_setxattr)
  26.749 +	.long SYMBOL_NAME(sys_lsetxattr)
  26.750 +	.long SYMBOL_NAME(sys_fsetxattr)
  26.751 +	.long SYMBOL_NAME(sys_getxattr)
  26.752 +	.long SYMBOL_NAME(sys_lgetxattr)	/* 230 */
  26.753 +	.long SYMBOL_NAME(sys_fgetxattr)
  26.754 +	.long SYMBOL_NAME(sys_listxattr)
  26.755 +	.long SYMBOL_NAME(sys_llistxattr)
  26.756 +	.long SYMBOL_NAME(sys_flistxattr)
  26.757 +	.long SYMBOL_NAME(sys_removexattr)	/* 235 */
  26.758 +	.long SYMBOL_NAME(sys_lremovexattr)
  26.759 +	.long SYMBOL_NAME(sys_fremovexattr)
  26.760 + 	.long SYMBOL_NAME(sys_tkill)
  26.761 +	.long SYMBOL_NAME(sys_sendfile64)
  26.762 +	.long SYMBOL_NAME(sys_ni_syscall)	/* 240 reserved for futex */
  26.763 +	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for sched_setaffinity */
  26.764 +	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for sched_getaffinity */
  26.765 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_set_thread_area */
  26.766 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_get_thread_area */
  26.767 +	.long SYMBOL_NAME(sys_ni_syscall)	/* 245 sys_io_setup */
  26.768 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_io_destroy */
  26.769 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_io_getevents */
  26.770 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_io_submit */
  26.771 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_io_cancel */
  26.772 +	.long SYMBOL_NAME(sys_ni_syscall)	/* 250 sys_alloc_hugepages */
  26.773 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_free_hugepages */
  26.774 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_exit_group */
  26.775 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_lookup_dcookie */
  26.776 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_epoll_create */
  26.777 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_epoll_ctl 255 */
  26.778 +	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_epoll_wait */
  26.779 + 	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_remap_file_pages */
  26.780 + 	.long SYMBOL_NAME(sys_ni_syscall)	/* sys_set_tid_address */
  26.781 +
  26.782 +	.rept NR_syscalls-(.-sys_call_table)/4
  26.783 +		.long SYMBOL_NAME(sys_ni_syscall)
  26.784 +	.endr
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/head.S	Tue Mar 23 10:40:28 2004 +0000
    27.3 @@ -0,0 +1,66 @@
    27.4 +
    27.5 +.text
    27.6 +#include <linux/config.h>
    27.7 +#include <linux/threads.h>
    27.8 +#include <linux/linkage.h>
    27.9 +#include <asm/segment.h>
   27.10 +#include <asm/page.h>
   27.11 +#include <asm/pgtable.h>
   27.12 +#include <asm/desc.h>
   27.13 +
   27.14 +/* Offsets in start_info structure */
   27.15 +#define MOD_START 16
   27.16 +#define MOD_LEN   20
   27.17 +                
   27.18 +startup_32:
   27.19 +        cld
   27.20 +        
   27.21 +        lss stack_start,%esp
   27.22 +
   27.23 +        /* Copy initrd somewhere safe before it's clobbered by BSS. */
   27.24 +        mov  MOD_LEN(%esi),%ecx
   27.25 +        shr  $2,%ecx
   27.26 +        jz   2f        /* bail from copy loop if no initrd */
   27.27 +        mov  $SYMBOL_NAME(_end),%edi
   27.28 +        add  MOD_LEN(%esi),%edi
   27.29 +        mov  MOD_START(%esi),%eax
   27.30 +        add  MOD_LEN(%esi),%eax
   27.31 +1:      sub  $4,%eax
   27.32 +        sub  $4,%edi
   27.33 +        mov  (%eax),%ebx
   27.34 +        mov  %ebx,(%edi)
   27.35 +        loop 1b
   27.36 +        mov  %edi,MOD_START(%esi)
   27.37 +                
   27.38 +        /* Clear BSS first so that there are no surprises... */
   27.39 +2:      xorl %eax,%eax
   27.40 +        movl $SYMBOL_NAME(__bss_start),%edi
   27.41 +        movl $SYMBOL_NAME(_end),%ecx
   27.42 +        subl %edi,%ecx
   27.43 +        rep stosb
   27.44 +
   27.45 +        /* Copy the necessary stuff from start_info structure. */
   27.46 +        mov  $SYMBOL_NAME(start_info_union),%edi
   27.47 +        mov  $128,%ecx
   27.48 +        rep movsl
   27.49 +                        
   27.50 +        jmp SYMBOL_NAME(start_kernel)
   27.51 +
   27.52 +ENTRY(stack_start)
   27.53 +	.long SYMBOL_NAME(init_task_union)+8192, __KERNEL_DS
   27.54 +
   27.55 +.org 0x1000
   27.56 +ENTRY(empty_zero_page)
   27.57 +
   27.58 +.org 0x2000
   27.59 +ENTRY(default_ldt)
   27.60 +
   27.61 +.org 0x3000
   27.62 +ENTRY(cpu0_pte_quicklist)
   27.63 +
   27.64 +.org 0x3400
   27.65 +ENTRY(cpu0_pgd_quicklist)
   27.66 +        
   27.67 +.org 0x3800
   27.68 +ENTRY(stext)
   27.69 +ENTRY(_stext)
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/hypervisor.c	Tue Mar 23 10:40:28 2004 +0000
    28.3 @@ -0,0 +1,170 @@
    28.4 +/******************************************************************************
    28.5 + * hypervisor.c
    28.6 + * 
    28.7 + * Communication to/from hypervisor.
    28.8 + * 
    28.9 + * Copyright (c) 2002, K A Fraser
   28.10 + */
   28.11 +
   28.12 +#include <linux/config.h>
   28.13 +#include <linux/irq.h>
   28.14 +#include <linux/kernel_stat.h>
   28.15 +#include <asm/atomic.h>
   28.16 +#include <asm/hypervisor.h>
   28.17 +#include <asm/system.h>
   28.18 +#include <asm/ptrace.h>
   28.19 +
   28.20 +multicall_entry_t multicall_list[8];
   28.21 +int nr_multicall_ents = 0;
   28.22 +
   28.23 +static unsigned long event_mask = 0;
   28.24 +
   28.25 +asmlinkage unsigned int do_physirq(int irq, struct pt_regs *regs)
   28.26 +{
   28.27 +    int cpu = smp_processor_id();
   28.28 +    unsigned long irqs;
   28.29 +    shared_info_t *shared = HYPERVISOR_shared_info;
   28.30 +
   28.31 +    /* do this manually */
   28.32 +    kstat.irqs[cpu][irq]++;
   28.33 +    ack_hypervisor_event(irq);
   28.34 +
   28.35 +    barrier();
   28.36 +    irqs  = xchg(&shared->physirq_pend, 0);
   28.37 +
   28.38 +    __asm__ __volatile__ (
   28.39 +        "   push %1                            ;"
   28.40 +        "   sub  $4,%%esp                      ;"
   28.41 +        "   jmp  3f                            ;"
   28.42 +        "1: btrl %%eax,%0                      ;" /* clear bit     */
   28.43 +        "   mov  %%eax,(%%esp)                 ;"
   28.44 +        "   call do_IRQ                        ;" /* do_IRQ(event) */
   28.45 +        "3: bsfl %0,%%eax                      ;" /* %eax == bit # */
   28.46 +        "   jnz  1b                            ;"
   28.47 +        "   add  $8,%%esp                      ;"
   28.48 +        /* we use %ebx because it is callee-saved */
   28.49 +        : : "b" (irqs), "r" (regs)
   28.50 +        /* clobbered by callback function calls */
   28.51 +        : "eax", "ecx", "edx", "memory" ); 
   28.52 +
   28.53 +    /* do this manually */
   28.54 +    end_hypervisor_event(irq);
   28.55 +
   28.56 +    return 0;
   28.57 +}
   28.58 +
   28.59 +void do_hypervisor_callback(struct pt_regs *regs)
   28.60 +{
   28.61 +    unsigned long events, flags;
   28.62 +    shared_info_t *shared = HYPERVISOR_shared_info;
   28.63 +
   28.64 +    do {
   28.65 +        /* Specialised local_irq_save(). */
   28.66 +        flags = test_and_clear_bit(EVENTS_MASTER_ENABLE_BIT, 
   28.67 +                                   &shared->events_mask);
   28.68 +        barrier();
   28.69 +
   28.70 +        events  = xchg(&shared->events, 0);
   28.71 +        events &= event_mask;
   28.72 +
   28.73 +        if ( (events & EVENT_PHYSIRQ) != 0 )
   28.74 +        {
   28.75 +            do_physirq(_EVENT_PHYSIRQ, regs);
   28.76 +            events &= ~EVENT_PHYSIRQ;
   28.77 +        }
   28.78 +
   28.79 +        __asm__ __volatile__ (
   28.80 +            "   push %1                            ;"
   28.81 +            "   sub  $4,%%esp                      ;"
   28.82 +            "   jmp  2f                            ;"
   28.83 +            "1: btrl %%eax,%0                      ;" /* clear bit     */
   28.84 +            "   add  %2,%%eax                      ;"
   28.85 +            "   mov  %%eax,(%%esp)                 ;"
   28.86 +            "   call do_IRQ                        ;" /* do_IRQ(event) */
   28.87 +            "2: bsfl %0,%%eax                      ;" /* %eax == bit # */
   28.88 +            "   jnz  1b                            ;"
   28.89 +            "   add  $8,%%esp                      ;"
   28.90 +            /* we use %ebx because it is callee-saved */
   28.91 +            : : "b" (events), "r" (regs), "i" (HYPEREVENT_IRQ_BASE)
   28.92 +            /* clobbered by callback function calls */
   28.93 +            : "eax", "ecx", "edx", "memory" ); 
   28.94 +
   28.95 +        /* Specialised local_irq_restore(). */
   28.96 +        if ( flags ) set_bit(EVENTS_MASTER_ENABLE_BIT, &shared->events_mask);
   28.97 +        barrier();
   28.98 +    }
   28.99 +    while ( shared->events );
  28.100 +}
  28.101 +
  28.102 +/*
  28.103 + * Define interface to generic handling in irq.c
  28.104 + */
  28.105 +
  28.106 +static void shutdown_hypervisor_event(unsigned int irq)
  28.107 +{
  28.108 +    clear_bit(HYPEREVENT_FROM_IRQ(irq), &event_mask);
  28.109 +    clear_bit(HYPEREVENT_FROM_IRQ(irq), &HYPERVISOR_shared_info->events_mask);
  28.110 +}
  28.111 +
  28.112 +static void enable_hypervisor_event(unsigned int irq)
  28.113 +{
  28.114 +    set_bit(HYPEREVENT_FROM_IRQ(irq), &event_mask);
  28.115 +    set_bit(HYPEREVENT_FROM_IRQ(irq), &HYPERVISOR_shared_info->events_mask);
  28.116 +    if ( test_bit(EVENTS_MASTER_ENABLE_BIT,
  28.117 +                  &HYPERVISOR_shared_info->events_mask) )
  28.118 +        do_hypervisor_callback(NULL);
  28.119 +}
  28.120 +
  28.121 +static void disable_hypervisor_event(unsigned int irq)
  28.122 +{
  28.123 +    clear_bit(HYPEREVENT_FROM_IRQ(irq), &event_mask);
  28.124 +    clear_bit(HYPEREVENT_FROM_IRQ(irq), &HYPERVISOR_shared_info->events_mask);
  28.125 +}
  28.126 +
  28.127 +static void ack_hypervisor_event(unsigned int irq)
  28.128 +{
  28.129 +    int ev = HYPEREVENT_FROM_IRQ(irq);
  28.130 +    if ( !(event_mask & (1<<ev)) )
  28.131 +    {
  28.132 +        printk("Unexpected hypervisor event %d\n", ev);
  28.133 +        atomic_inc(&irq_err_count);
  28.134 +    }
  28.135 +    set_bit(ev, &HYPERVISOR_shared_info->events_mask);
  28.136 +}
  28.137 +
  28.138 +static unsigned int startup_hypervisor_event(unsigned int irq)
  28.139 +{
  28.140 +    enable_hypervisor_event(irq);
  28.141 +    return 0;
  28.142 +}
  28.143 +
  28.144 +static void end_hypervisor_event(unsigned int irq)
  28.145 +{
  28.146 +}
  28.147 +
  28.148 +static struct hw_interrupt_type hypervisor_irq_type = {
  28.149 +    "Hypervisor-event",
  28.150 +    startup_hypervisor_event,
  28.151 +    shutdown_hypervisor_event,
  28.152 +    enable_hypervisor_event,
  28.153 +    disable_hypervisor_event,
  28.154 +    ack_hypervisor_event,
  28.155 +    end_hypervisor_event,
  28.156 +    NULL
  28.157 +};
  28.158 +
  28.159 +void __init init_IRQ(void)
  28.160 +{
  28.161 +    int i;
  28.162 +
  28.163 +    for ( i = 0; i < NR_HYPEREVENT_IRQS; i++ )
  28.164 +    {
  28.165 +        irq_desc[i + HYPEREVENT_IRQ_BASE].status  = IRQ_DISABLED;
  28.166 +        irq_desc[i + HYPEREVENT_IRQ_BASE].action  = 0;
  28.167 +        irq_desc[i + HYPEREVENT_IRQ_BASE].depth   = 1;
  28.168 +        irq_desc[i + HYPEREVENT_IRQ_BASE].handler = &hypervisor_irq_type;
  28.169 +    }
  28.170 +
  28.171 +    /* Also initialise the physical IRQ handlers. */
  28.172 +    physirq_init();
  28.173 +}
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/i386_ksyms.c	Tue Mar 23 10:40:28 2004 +0000
    29.3 @@ -0,0 +1,175 @@
    29.4 +#include <linux/config.h>
    29.5 +#include <linux/module.h>
    29.6 +#include <linux/smp.h>
    29.7 +#include <linux/user.h>
    29.8 +#include <linux/elfcore.h>
    29.9 +#include <linux/mca.h>
   29.10 +#include <linux/sched.h>
   29.11 +#include <linux/in6.h>
   29.12 +#include <linux/interrupt.h>
   29.13 +#include <linux/smp_lock.h>
   29.14 +#include <linux/pm.h>
   29.15 +#include <linux/pci.h>
   29.16 +#include <linux/apm_bios.h>
   29.17 +#include <linux/kernel.h>
   29.18 +#include <linux/string.h>
   29.19 +#include <linux/tty.h>
   29.20 +
   29.21 +#include <asm/semaphore.h>
   29.22 +#include <asm/processor.h>
   29.23 +#include <asm/i387.h>
   29.24 +#include <asm/uaccess.h>
   29.25 +#include <asm/checksum.h>
   29.26 +#include <asm/io.h>
   29.27 +#include <asm/hardirq.h>
   29.28 +#include <asm/delay.h>
   29.29 +#include <asm/irq.h>
   29.30 +#include <asm/mmx.h>
   29.31 +#include <asm/desc.h>
   29.32 +#include <asm/pgtable.h>
   29.33 +#include <asm/pgalloc.h>
   29.34 +
   29.35 +extern void dump_thread(struct pt_regs *, struct user *);
   29.36 +extern spinlock_t rtc_lock;
   29.37 +
   29.38 +#if defined(CONFIG_APMXXX) || defined(CONFIG_APM_MODULEXXX)
   29.39 +extern void machine_real_restart(unsigned char *, int);
   29.40 +EXPORT_SYMBOL(machine_real_restart);
   29.41 +extern void default_idle(void);
   29.42 +EXPORT_SYMBOL(default_idle);
   29.43 +#endif
   29.44 +
   29.45 +#ifdef CONFIG_SMP
   29.46 +extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
   29.47 +extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
   29.48 +#endif
   29.49 +
   29.50 +#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
   29.51 +extern struct drive_info_struct drive_info;
   29.52 +EXPORT_SYMBOL(drive_info);
   29.53 +#endif
   29.54 +
   29.55 +// XXX extern unsigned long get_cmos_time(void);
   29.56 +
   29.57 +/* platform dependent support */
   29.58 +EXPORT_SYMBOL(boot_cpu_data);
   29.59 +EXPORT_SYMBOL(dump_thread);
   29.60 +EXPORT_SYMBOL(dump_fpu);
   29.61 +EXPORT_SYMBOL(dump_extended_fpu);
   29.62 +EXPORT_SYMBOL(__ioremap);
   29.63 +EXPORT_SYMBOL(iounmap);
   29.64 +EXPORT_SYMBOL(enable_irq);
   29.65 +EXPORT_SYMBOL(disable_irq);
   29.66 +EXPORT_SYMBOL(disable_irq_nosync);
   29.67 +EXPORT_SYMBOL(probe_irq_mask);
   29.68 +EXPORT_SYMBOL(kernel_thread);
   29.69 +EXPORT_SYMBOL(pm_idle);
   29.70 +EXPORT_SYMBOL(pm_power_off);
   29.71 +EXPORT_SYMBOL(apm_info);
   29.72 +//EXPORT_SYMBOL(gdt);
   29.73 +EXPORT_SYMBOL(empty_zero_page);
   29.74 +EXPORT_SYMBOL(phys_to_machine_mapping);
   29.75 +
   29.76 +
   29.77 +#ifdef CONFIG_DEBUG_IOVIRT
   29.78 +EXPORT_SYMBOL(__io_virt_debug);
   29.79 +#endif
   29.80 +
   29.81 +EXPORT_SYMBOL_NOVERS(__down_failed);
   29.82 +EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
   29.83 +EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
   29.84 +EXPORT_SYMBOL_NOVERS(__up_wakeup);
   29.85 +/* Networking helper routines. */
   29.86 +EXPORT_SYMBOL(csum_partial_copy_generic);
   29.87 +/* Delay loops */
   29.88 +EXPORT_SYMBOL(__ndelay);
   29.89 +EXPORT_SYMBOL(__udelay);
   29.90 +EXPORT_SYMBOL(__delay);
   29.91 +EXPORT_SYMBOL(__const_udelay);
   29.92 +
   29.93 +EXPORT_SYMBOL_NOVERS(__get_user_1);
   29.94 +EXPORT_SYMBOL_NOVERS(__get_user_2);
   29.95 +EXPORT_SYMBOL_NOVERS(__get_user_4);
   29.96 +
   29.97 +EXPORT_SYMBOL(strtok);
   29.98 +EXPORT_SYMBOL(strpbrk);
   29.99 +EXPORT_SYMBOL(strstr);
  29.100 +
  29.101 +EXPORT_SYMBOL(strncpy_from_user);
  29.102 +EXPORT_SYMBOL(__strncpy_from_user);
  29.103 +EXPORT_SYMBOL(clear_user);
  29.104 +EXPORT_SYMBOL(__clear_user);
  29.105 +EXPORT_SYMBOL(__generic_copy_from_user);
  29.106 +EXPORT_SYMBOL(__generic_copy_to_user);
  29.107 +EXPORT_SYMBOL(strnlen_user);
  29.108 +
  29.109 +
  29.110 +EXPORT_SYMBOL(pci_alloc_consistent);
  29.111 +EXPORT_SYMBOL(pci_free_consistent);
  29.112 +
  29.113 +#ifdef CONFIG_PCI
  29.114 +EXPORT_SYMBOL(pcibios_penalize_isa_irq);
  29.115 +EXPORT_SYMBOL(pci_mem_start);
  29.116 +#endif
  29.117 +
  29.118 +
  29.119 +#ifdef CONFIG_X86_USE_3DNOW
  29.120 +EXPORT_SYMBOL(_mmx_memcpy);
  29.121 +EXPORT_SYMBOL(mmx_clear_page);
  29.122 +EXPORT_SYMBOL(mmx_copy_page);
  29.123 +#endif
  29.124 +
  29.125 +#ifdef CONFIG_SMP
  29.126 +EXPORT_SYMBOL(cpu_data);
  29.127 +EXPORT_SYMBOL(kernel_flag_cacheline);
  29.128 +EXPORT_SYMBOL(smp_num_cpus);
  29.129 +EXPORT_SYMBOL(cpu_online_map);
  29.130 +EXPORT_SYMBOL_NOVERS(__write_lock_failed);
  29.131 +EXPORT_SYMBOL_NOVERS(__read_lock_failed);
  29.132 +
  29.133 +/* Global SMP irq stuff */
  29.134 +EXPORT_SYMBOL(synchronize_irq);
  29.135 +EXPORT_SYMBOL(global_irq_holder);
  29.136 +EXPORT_SYMBOL(__global_cli);
  29.137 +EXPORT_SYMBOL(__global_sti);
  29.138 +EXPORT_SYMBOL(__global_save_flags);
  29.139 +EXPORT_SYMBOL(__global_restore_flags);
  29.140 +EXPORT_SYMBOL(smp_call_function);
  29.141 +
  29.142 +/* TLB flushing */
  29.143 +EXPORT_SYMBOL(flush_tlb_page);
  29.144 +#endif
  29.145 +
  29.146 +#ifdef CONFIG_X86_IO_APIC
  29.147 +EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
  29.148 +#endif
  29.149 +
  29.150 +#ifdef CONFIG_VT
  29.151 +EXPORT_SYMBOL(screen_info);
  29.152 +#endif
  29.153 +
  29.154 +EXPORT_SYMBOL(get_wchan);
  29.155 +
  29.156 +EXPORT_SYMBOL(rtc_lock);
  29.157 +
  29.158 +#undef memcpy
  29.159 +#undef memset
  29.160 +extern void * memset(void *,int,__kernel_size_t);
  29.161 +extern void * memcpy(void *,const void *,__kernel_size_t);
  29.162 +EXPORT_SYMBOL_NOVERS(memcpy);
  29.163 +EXPORT_SYMBOL_NOVERS(memset);
  29.164 +
  29.165 +#ifdef CONFIG_HAVE_DEC_LOCK
  29.166 +EXPORT_SYMBOL(atomic_dec_and_lock);
  29.167 +#endif
  29.168 +
  29.169 +#ifdef CONFIG_MULTIQUAD
  29.170 +EXPORT_SYMBOL(xquad_portio);
  29.171 +#endif
  29.172 +
  29.173 +#include <asm/xen_proc.h>
  29.174 +EXPORT_SYMBOL(create_xen_proc_entry);
  29.175 +EXPORT_SYMBOL(remove_xen_proc_entry);
  29.176 +
  29.177 +EXPORT_SYMBOL(do_hypervisor_callback);
  29.178 +EXPORT_SYMBOL(HYPERVISOR_shared_info);
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/ioport.c	Tue Mar 23 10:40:28 2004 +0000
    30.3 @@ -0,0 +1,48 @@
    30.4 +#include <linux/sched.h>
    30.5 +#include <linux/kernel.h>
    30.6 +#include <linux/errno.h>
    30.7 +#include <linux/types.h>
    30.8 +#include <linux/stddef.h>
    30.9 +#include <asm/hypervisor-ifs/dom0_ops.h>
   30.10 +
   30.11 +
   30.12 +asmlinkage int sys_iopl(unsigned int new_io_pl)
   30.13 +{
   30.14 +    unsigned int old_io_pl = current->thread.io_pl;
   30.15 +    dom0_op_t op;
   30.16 +
   30.17 +    if ( !(start_info.flags & SIF_PRIVILEGED) )
   30.18 +        return -EPERM;
   30.19 +
   30.20 +    if ( new_io_pl > 3 )
   30.21 +        return -EINVAL;
   30.22 +
   30.23 +    /* Need "raw I/O" privileges for direct port access. */
   30.24 +    if ( (new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO) )
   30.25 +        return -EPERM;
   30.26 +
   30.27 +    /* Maintain OS privileges even if user attempts to relinquish them. */
   30.28 +    if ( (new_io_pl == 0) && (start_info.flags & SIF_PRIVILEGED) )
   30.29 +        new_io_pl = 1;
   30.30 +
   30.31 +    /* Change our version of the privilege levels. */
   30.32 +    current->thread.io_pl = new_io_pl;
   30.33 +
   30.34 +    /* Force the change at ring 0. */
   30.35 +    op.cmd           = DOM0_IOPL;
   30.36 +    op.u.iopl.domain = DOMID_SELF;
   30.37 +    op.u.iopl.iopl   = new_io_pl;
   30.38 +    HYPERVISOR_dom0_op(&op);
   30.39 +
   30.40 +    return 0;
   30.41 +}
   30.42 +
   30.43 +
   30.44 +asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
   30.45 +{
   30.46 +    printk(KERN_INFO "ioperm not fully supported - %s\n",
   30.47 +           turn_on ? "set iopl to 3" : "ignore resource release");
   30.48 +    return turn_on ? sys_iopl(3) : 0;
   30.49 +}
   30.50 +
   30.51 +
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/irq.c	Tue Mar 23 10:40:28 2004 +0000
    31.3 @@ -0,0 +1,1137 @@
    31.4 +/*
    31.5 + *	linux/arch/i386/kernel/irq.c
    31.6 + *
    31.7 + *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
    31.8 + *
    31.9 + * This file contains the code used by various IRQ handling routines:
   31.10 + * asking for different IRQ's should be done through these routines
   31.11 + * instead of just grabbing them. Thus setups with different IRQ numbers
   31.12 + * shouldn't result in any weird surprises, and installing new handlers
   31.13 + * should be easier.
   31.14 + */
   31.15 +
   31.16 +/*
   31.17 + * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
   31.18 + *
   31.19 + * IRQs are in fact implemented a bit like signal handlers for the kernel.
   31.20 + * Naturally it's not a 1:1 relation, but there are similarities.
   31.21 + */
   31.22 +
   31.23 +#include <linux/config.h>
   31.24 +#include <linux/ptrace.h>
   31.25 +#include <linux/errno.h>
   31.26 +#include <linux/signal.h>
   31.27 +#include <linux/sched.h>
   31.28 +#include <linux/ioport.h>
   31.29 +#include <linux/interrupt.h>
   31.30 +#include <linux/timex.h>
   31.31 +#include <linux/slab.h>
   31.32 +#include <linux/random.h>
   31.33 +#include <linux/smp_lock.h>
   31.34 +#include <linux/init.h>
   31.35 +#include <linux/kernel_stat.h>
   31.36 +#include <linux/irq.h>
   31.37 +#include <linux/proc_fs.h>
   31.38 +#include <linux/seq_file.h>
   31.39 +
   31.40 +#include <asm/atomic.h>
   31.41 +#include <asm/io.h>
   31.42 +#include <asm/smp.h>
   31.43 +#include <asm/system.h>
   31.44 +#include <asm/bitops.h>
   31.45 +#include <asm/uaccess.h>
   31.46 +#include <asm/pgalloc.h>
   31.47 +#include <asm/delay.h>
   31.48 +#include <asm/desc.h>
   31.49 +#include <asm/irq.h>
   31.50 +
   31.51 +
   31.52 +
   31.53 +/*
   31.54 + * Linux has a controller-independent x86 interrupt architecture.
   31.55 + * every controller has a 'controller-template', that is used
   31.56 + * by the main code to do the right thing. Each driver-visible
   31.57 + * interrupt source is transparently wired to the apropriate
   31.58 + * controller. Thus drivers need not be aware of the
   31.59 + * interrupt-controller.
   31.60 + *
   31.61 + * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
   31.62 + * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
   31.63 + * (IO-APICs assumed to be messaging to Pentium local-APICs)
   31.64 + *
   31.65 + * the code is designed to be easily extended with new/different
   31.66 + * interrupt controllers, without having to do assembly magic.
   31.67 + */
   31.68 +
   31.69 +/*
   31.70 + * Controller mappings for all interrupt sources:
   31.71 + */
   31.72 +irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
   31.73 +	{ [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
   31.74 +
   31.75 +static void register_irq_proc (unsigned int irq);
   31.76 +
   31.77 +/*
   31.78 + * Special irq handlers.
   31.79 + */
   31.80 +
   31.81 +void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
   31.82 +
   31.83 +/*
   31.84 + * Generic no controller code
   31.85 + */
   31.86 +
   31.87 +static void enable_none(unsigned int irq) { }
   31.88 +static unsigned int startup_none(unsigned int irq) { return 0; }
   31.89 +static void disable_none(unsigned int irq) { }
   31.90 +static void ack_none(unsigned int irq)
   31.91 +{
   31.92 +	printk("unexpected IRQ trap at vector %02x\n", irq);
   31.93 +}
   31.94 +
   31.95 +/* startup is the same as "enable", shutdown is same as "disable" */
   31.96 +#define shutdown_none	disable_none
   31.97 +#define end_none	enable_none
   31.98 +
   31.99 +struct hw_interrupt_type no_irq_type = {
  31.100 +	"none",
  31.101 +	startup_none,
  31.102 +	shutdown_none,
  31.103 +	enable_none,
  31.104 +	disable_none,
  31.105 +	ack_none,
  31.106 +	end_none
  31.107 +};
  31.108 +
  31.109 +atomic_t irq_err_count;
  31.110 +#ifdef CONFIG_X86_IO_APIC
  31.111 +#ifdef APIC_MISMATCH_DEBUG
  31.112 +atomic_t irq_mis_count;
  31.113 +#endif
  31.114 +#endif
  31.115 +
  31.116 +/*
  31.117 + * Generic, controller-independent functions:
  31.118 + */
  31.119 +
  31.120 +int show_interrupts(struct seq_file *p, void *v)
  31.121 +{
  31.122 +	int i, j;
  31.123 +	struct irqaction * action;
  31.124 +
  31.125 +	seq_printf(p, "           ");
  31.126 +	for (j=0; j<smp_num_cpus; j++)
  31.127 +		seq_printf(p, "CPU%d       ",j);
  31.128 +	seq_putc(p,'\n');
  31.129 +
  31.130 +	for (i = 0 ; i < NR_IRQS ; i++) {
  31.131 +		action = irq_desc[i].action;
  31.132 +		if (!action) 
  31.133 +			continue;
  31.134 +		seq_printf(p, "%3d: ",i);
  31.135 +#ifndef CONFIG_SMP
  31.136 +		seq_printf(p, "%10u ", kstat_irqs(i));
  31.137 +#else
  31.138 +		for (j = 0; j < smp_num_cpus; j++)
  31.139 +			seq_printf(p, "%10u ",
  31.140 +				kstat.irqs[cpu_logical_map(j)][i]);
  31.141 +#endif
  31.142 +		seq_printf(p, " %14s", irq_desc[i].handler->typename);
  31.143 +		seq_printf(p, "  %s", action->name);
  31.144 +
  31.145 +		for (action=action->next; action; action = action->next)
  31.146 +			seq_printf(p, ", %s", action->name);
  31.147 +		seq_putc(p,'\n');
  31.148 +	}
  31.149 +	seq_printf(p, "NMI: ");
  31.150 +	for (j = 0; j < smp_num_cpus; j++)
  31.151 +		seq_printf(p, "%10u ",
  31.152 +			nmi_count(cpu_logical_map(j)));
  31.153 +	seq_printf(p, "\n");
  31.154 +#if CONFIG_X86_LOCAL_APIC
  31.155 +	seq_printf(p, "LOC: ");
  31.156 +	for (j = 0; j < smp_num_cpus; j++)
  31.157 +		seq_printf(p, "%10u ",
  31.158 +			apic_timer_irqs[cpu_logical_map(j)]);
  31.159 +	seq_printf(p, "\n");
  31.160 +#endif
  31.161 +	seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  31.162 +#ifdef CONFIG_X86_IO_APIC
  31.163 +#ifdef APIC_MISMATCH_DEBUG
  31.164 +	seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
  31.165 +#endif
  31.166 +#endif
  31.167 +
  31.168 +	return 0;
  31.169 +}
  31.170 +
  31.171 +
  31.172 +/*
  31.173 + * Global interrupt locks for SMP. Allow interrupts to come in on any
  31.174 + * CPU, yet make cli/sti act globally to protect critical regions..
  31.175 + */
  31.176 +
  31.177 +#ifdef CONFIG_SMP
  31.178 +unsigned char global_irq_holder = NO_PROC_ID;
  31.179 +unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
  31.180 +
  31.181 +extern void show_stack(unsigned long* esp);
  31.182 +
  31.183 +static void show(char * str)
  31.184 +{
  31.185 +	int i;
  31.186 +	int cpu = smp_processor_id();
  31.187 +
  31.188 +	printk("\n%s, CPU %d:\n", str, cpu);
  31.189 +	printk("irq:  %d [",irqs_running());
  31.190 +	for(i=0;i < smp_num_cpus;i++)
  31.191 +		printk(" %d",local_irq_count(i));
  31.192 +	printk(" ]\nbh:   %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
  31.193 +	for(i=0;i < smp_num_cpus;i++)
  31.194 +		printk(" %d",local_bh_count(i));
  31.195 +
  31.196 +	printk(" ]\nStack dumps:");
  31.197 +	for(i = 0; i < smp_num_cpus; i++) {
  31.198 +		unsigned long esp;
  31.199 +		if (i == cpu)
  31.200 +			continue;
  31.201 +		printk("\nCPU %d:",i);
  31.202 +		esp = init_tss[i].esp0;
  31.203 +		if (!esp) {
  31.204 +			/* tss->esp0 is set to NULL in cpu_init(),
  31.205 +			 * it's initialized when the cpu returns to user
  31.206 +			 * space. -- manfreds
  31.207 +			 */
  31.208 +			printk(" <unknown> ");
  31.209 +			continue;
  31.210 +		}
  31.211 +		esp &= ~(THREAD_SIZE-1);
  31.212 +		esp += sizeof(struct task_struct);
  31.213 +		show_stack((void*)esp);
  31.214 + 	}
  31.215 +	printk("\nCPU %d:",cpu);
  31.216 +	show_stack(NULL);
  31.217 +	printk("\n");
  31.218 +}
  31.219 +	
  31.220 +#define MAXCOUNT 100000000
  31.221 +
  31.222 +/*
  31.223 + * I had a lockup scenario where a tight loop doing
  31.224 + * spin_unlock()/spin_lock() on CPU#1 was racing with
  31.225 + * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
  31.226 + * apparently the spin_unlock() information did not make it
  31.227 + * through to CPU#0 ... nasty, is this by design, do we have to limit
  31.228 + * 'memory update oscillation frequency' artificially like here?
  31.229 + *
  31.230 + * Such 'high frequency update' races can be avoided by careful design, but
  31.231 + * some of our major constructs like spinlocks use similar techniques,
  31.232 + * it would be nice to clarify this issue. Set this define to 0 if you
  31.233 + * want to check whether your system freezes.  I suspect the delay done
  31.234 + * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
  31.235 + * i thought that such things are guaranteed by design, since we use
  31.236 + * the 'LOCK' prefix.
  31.237 + */
  31.238 +#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
  31.239 +
  31.240 +#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
  31.241 +# define SYNC_OTHER_CORES(x) udelay(x+1)
  31.242 +#else
  31.243 +/*
  31.244 + * We have to allow irqs to arrive between __sti and __cli
  31.245 + */
  31.246 +# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
  31.247 +#endif
  31.248 +
  31.249 +static inline void wait_on_irq(int cpu)
  31.250 +{
  31.251 +	int count = MAXCOUNT;
  31.252 +
  31.253 +	for (;;) {
  31.254 +
  31.255 +		/*
  31.256 +		 * Wait until all interrupts are gone. Wait
  31.257 +		 * for bottom half handlers unless we're
  31.258 +		 * already executing in one..
  31.259 +		 */
  31.260 +		if (!irqs_running())
  31.261 +			if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
  31.262 +				break;
  31.263 +
  31.264 +		/* Duh, we have to loop. Release the lock to avoid deadlocks */
  31.265 +		clear_bit(0,&global_irq_lock);
  31.266 +
  31.267 +		for (;;) {
  31.268 +			if (!--count) {
  31.269 +				show("wait_on_irq");
  31.270 +				count = ~0;
  31.271 +			}
  31.272 +			__sti();
  31.273 +			SYNC_OTHER_CORES(cpu);
  31.274 +			__cli();
  31.275 +			if (irqs_running())
  31.276 +				continue;
  31.277 +			if (global_irq_lock)
  31.278 +				continue;
  31.279 +			if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
  31.280 +				continue;
  31.281 +			if (!test_and_set_bit(0,&global_irq_lock))
  31.282 +				break;
  31.283 +		}
  31.284 +	}
  31.285 +}
  31.286 +
  31.287 +/*
  31.288 + * This is called when we want to synchronize with
  31.289 + * interrupts. We may for example tell a device to
  31.290 + * stop sending interrupts: but to make sure there
  31.291 + * are no interrupts that are executing on another
  31.292 + * CPU we need to call this function.
  31.293 + */
  31.294 +void synchronize_irq(void)
  31.295 +{
  31.296 +	if (irqs_running()) {
  31.297 +		/* Stupid approach */
  31.298 +		cli();
  31.299 +		sti();
  31.300 +	}
  31.301 +}
  31.302 +
  31.303 +static inline void get_irqlock(int cpu)
  31.304 +{
  31.305 +	if (test_and_set_bit(0,&global_irq_lock)) {
  31.306 +		/* do we already hold the lock? */
  31.307 +		if ((unsigned char) cpu == global_irq_holder)
  31.308 +			return;
  31.309 +		/* Uhhuh.. Somebody else got it. Wait.. */
  31.310 +		do {
  31.311 +			do {
  31.312 +				rep_nop();
  31.313 +			} while (test_bit(0,&global_irq_lock));
  31.314 +		} while (test_and_set_bit(0,&global_irq_lock));		
  31.315 +	}
  31.316 +	/* 
  31.317 +	 * We also to make sure that nobody else is running
  31.318 +	 * in an interrupt context. 
  31.319 +	 */
  31.320 +	wait_on_irq(cpu);
  31.321 +
  31.322 +	/*
  31.323 +	 * Ok, finally..
  31.324 +	 */
  31.325 +	global_irq_holder = cpu;
  31.326 +}
  31.327 +
  31.328 +void __global_cli(void)
  31.329 +{
  31.330 +    panic("__global_cli");
  31.331 +}
  31.332 +
  31.333 +void __global_sti(void)
  31.334 +{
  31.335 +    panic("__global_sti");
  31.336 +}
  31.337 +
  31.338 +/*
  31.339 + * SMP flags value to restore to:
  31.340 + * 0 - global cli
  31.341 + * 1 - global sti
  31.342 + * 2 - local cli
  31.343 + * 3 - local sti
  31.344 + */
  31.345 +unsigned long __global_save_flags(void)
  31.346 +{
  31.347 +    panic("__global_save_flags");
  31.348 +}
  31.349 +
  31.350 +void __global_restore_flags(unsigned long flags)
  31.351 +{
  31.352 +    panic("__global_restore_flags");
  31.353 +}
  31.354 +
  31.355 +#endif
  31.356 +
  31.357 +/*
  31.358 + * This should really return information about whether
  31.359 + * we should do bottom half handling etc. Right now we
  31.360 + * end up _always_ checking the bottom half, which is a
  31.361 + * waste of time and is not what some drivers would
  31.362 + * prefer.
  31.363 + */
  31.364 +int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
  31.365 +{
  31.366 +	int status;
  31.367 +	int cpu = smp_processor_id();
  31.368 +
  31.369 +	irq_enter(cpu, irq);
  31.370 +
  31.371 +	status = 1;	/* Force the "do bottom halves" bit */
  31.372 +
  31.373 +	if (!(action->flags & SA_INTERRUPT))
  31.374 +		__sti();
  31.375 +
  31.376 +	do {
  31.377 +		status |= action->flags;
  31.378 +		action->handler(irq, action->dev_id, regs);
  31.379 +		action = action->next;
  31.380 +	} while (action);
  31.381 +	if (status & SA_SAMPLE_RANDOM)
  31.382 +		add_interrupt_randomness(irq);
  31.383 +	__cli();
  31.384 +
  31.385 +	irq_exit(cpu, irq);
  31.386 +
  31.387 +	return status;
  31.388 +}
  31.389 +
  31.390 +/*
  31.391 + * Generic enable/disable code: this just calls
  31.392 + * down into the PIC-specific version for the actual
  31.393 + * hardware disable after having gotten the irq
  31.394 + * controller lock. 
  31.395 + */
  31.396 + 
  31.397 +/**
  31.398 + *	disable_irq_nosync - disable an irq without waiting
  31.399 + *	@irq: Interrupt to disable
  31.400 + *
  31.401 + *	Disable the selected interrupt line.  Disables and Enables are
  31.402 + *	nested.
  31.403 + *	Unlike disable_irq(), this function does not ensure existing
  31.404 + *	instances of the IRQ handler have completed before returning.
  31.405 + *
  31.406 + *	This function may be called from IRQ context.
  31.407 + */
  31.408 + 
  31.409 +inline void disable_irq_nosync(unsigned int irq)
  31.410 +{
  31.411 +	irq_desc_t *desc = irq_desc + irq;
  31.412 +	unsigned long flags;
  31.413 +
  31.414 +	spin_lock_irqsave(&desc->lock, flags);
  31.415 +	if (!desc->depth++) {
  31.416 +		desc->status |= IRQ_DISABLED;
  31.417 +		desc->handler->disable(irq);
  31.418 +	}
  31.419 +	spin_unlock_irqrestore(&desc->lock, flags);
  31.420 +}
  31.421 +
  31.422 +/**
  31.423 + *	disable_irq - disable an irq and wait for completion
  31.424 + *	@irq: Interrupt to disable
  31.425 + *
  31.426 + *	Disable the selected interrupt line.  Enables and Disables are
  31.427 + *	nested.
  31.428 + *	This function waits for any pending IRQ handlers for this interrupt
  31.429 + *	to complete before returning. If you use this function while
  31.430 + *	holding a resource the IRQ handler may need you will deadlock.
  31.431 + *
  31.432 + *	This function may be called - with care - from IRQ context.
  31.433 + */
  31.434 + 
  31.435 +void disable_irq(unsigned int irq)
  31.436 +{
  31.437 +	disable_irq_nosync(irq);
  31.438 +
  31.439 +	if (!local_irq_count(smp_processor_id())) {
  31.440 +		do {
  31.441 +			barrier();
  31.442 +			cpu_relax();
  31.443 +		} while (irq_desc[irq].status & IRQ_INPROGRESS);
  31.444 +	}
  31.445 +}
  31.446 +
  31.447 +/**
  31.448 + *	enable_irq - enable handling of an irq
  31.449 + *	@irq: Interrupt to enable
  31.450 + *
  31.451 + *	Undoes the effect of one call to disable_irq().  If this
  31.452 + *	matches the last disable, processing of interrupts on this
  31.453 + *	IRQ line is re-enabled.
  31.454 + *
  31.455 + *	This function may be called from IRQ context.
  31.456 + */
  31.457 + 
  31.458 +void enable_irq(unsigned int irq)
  31.459 +{
  31.460 +	irq_desc_t *desc = irq_desc + irq;
  31.461 +	unsigned long flags;
  31.462 +
  31.463 +	spin_lock_irqsave(&desc->lock, flags);
  31.464 +	switch (desc->depth) {
  31.465 +	case 1: {
  31.466 +		unsigned int status = desc->status & ~IRQ_DISABLED;
  31.467 +		desc->status = status;
  31.468 +		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
  31.469 +			desc->status = status | IRQ_REPLAY;
  31.470 +			hw_resend_irq(desc->handler,irq);
  31.471 +		}
  31.472 +		desc->handler->enable(irq);
  31.473 +		/* fall-through */
  31.474 +	}
  31.475 +	default:
  31.476 +		desc->depth--;
  31.477 +		break;
  31.478 +	case 0:
  31.479 +		printk("enable_irq(%u) unbalanced from %p\n", irq,
  31.480 +		       __builtin_return_address(0));
  31.481 +	}
  31.482 +	spin_unlock_irqrestore(&desc->lock, flags);
  31.483 +}
  31.484 +
  31.485 +/*
  31.486 + * do_IRQ handles all normal device IRQ's (the special
  31.487 + * SMP cross-CPU interrupts have their own specific
  31.488 + * handlers).
  31.489 + */
  31.490 +asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
  31.491 +{	
  31.492 +	/* 
  31.493 +	 * We ack quickly, we don't want the irq controller
  31.494 +	 * thinking we're snobs just because some other CPU has
  31.495 +	 * disabled global interrupts (we have already done the
  31.496 +	 * INT_ACK cycles, it's too late to try to pretend to the
  31.497 +	 * controller that we aren't taking the interrupt).
  31.498 +	 *
  31.499 +	 * 0 return value means that this irq is already being
  31.500 +	 * handled by some other CPU. (or is disabled)
  31.501 +	 */
  31.502 +	int cpu = smp_processor_id();
  31.503 +	irq_desc_t *desc = irq_desc + irq;
  31.504 +	struct irqaction * action;
  31.505 +	unsigned int status;
  31.506 +#ifdef CONFIG_DEBUG_STACKOVERFLOW
  31.507 +	long esp;
  31.508 +
  31.509 +	/* Debugging check for stack overflow: is there less than 1KB free? */
  31.510 +	__asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
  31.511 +	if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
  31.512 +		extern void show_stack(unsigned long *);
  31.513 +
  31.514 +		printk("do_IRQ: stack overflow: %ld\n",
  31.515 +			esp - sizeof(struct task_struct));
  31.516 +		__asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
  31.517 +		show_stack((void *)esp);
  31.518 +	}
  31.519 +#endif
  31.520 +
  31.521 +	kstat.irqs[cpu][irq]++;
  31.522 +	spin_lock(&desc->lock);
  31.523 +	desc->handler->ack(irq);
  31.524 +	/*
  31.525 +	   REPLAY is when Linux resends an IRQ that was dropped earlier
  31.526 +	   WAITING is used by probe to mark irqs that are being tested
  31.527 +	   */
  31.528 +	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
  31.529 +	status |= IRQ_PENDING; /* we _want_ to handle it */
  31.530 +
  31.531 +	/*
  31.532 +	 * If the IRQ is disabled for whatever reason, we cannot
  31.533 +	 * use the action we have.
  31.534 +	 */
  31.535 +	action = NULL;
  31.536 +	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
  31.537 +		action = desc->action;
  31.538 +		status &= ~IRQ_PENDING; /* we commit to handling */
  31.539 +		status |= IRQ_INPROGRESS; /* we are handling it */
  31.540 +	}
  31.541 +	desc->status = status;
  31.542 +
  31.543 +	/*
  31.544 +	 * If there is no IRQ handler or it was disabled, exit early.
  31.545 +	   Since we set PENDING, if another processor is handling
  31.546 +	   a different instance of this same irq, the other processor
  31.547 +	   will take care of it.
  31.548 +	 */
  31.549 +	if (!action)
  31.550 +		goto out;
  31.551 +
  31.552 +	/*
  31.553 +	 * Edge triggered interrupts need to remember
  31.554 +	 * pending events.
  31.555 +	 * This applies to any hw interrupts that allow a second
  31.556 +	 * instance of the same irq to arrive while we are in do_IRQ
  31.557 +	 * or in the handler. But the code here only handles the _second_
  31.558 +	 * instance of the irq, not the third or fourth. So it is mostly
  31.559 +	 * useful for irq hardware that does not mask cleanly in an
  31.560 +	 * SMP environment.
  31.561 +	 */
  31.562 +	for (;;) {
  31.563 +		spin_unlock(&desc->lock);
  31.564 +		handle_IRQ_event(irq, regs, action);
  31.565 +		spin_lock(&desc->lock);
  31.566 +		
  31.567 +		if (!(desc->status & IRQ_PENDING))
  31.568 +			break;
  31.569 +		desc->status &= ~IRQ_PENDING;
  31.570 +	}
  31.571 +	desc->status &= ~IRQ_INPROGRESS;
  31.572 +out:
  31.573 +	/*
  31.574 +	 * The ->end() handler has to deal with interrupts which got
  31.575 +	 * disabled while the handler was running.
  31.576 +	 */
  31.577 +	desc->handler->end(irq);
  31.578 +	spin_unlock(&desc->lock);
  31.579 +
  31.580 +	if (softirq_pending(cpu))
  31.581 +		do_softirq();
  31.582 +	return 1;
  31.583 +}
  31.584 +
  31.585 +/**
  31.586 + *	request_irq - allocate an interrupt line
  31.587 + *	@irq: Interrupt line to allocate
  31.588 + *	@handler: Function to be called when the IRQ occurs
  31.589 + *	@irqflags: Interrupt type flags
  31.590 + *	@devname: An ascii name for the claiming device
  31.591 + *	@dev_id: A cookie passed back to the handler function
  31.592 + *
  31.593 + *	This call allocates interrupt resources and enables the
  31.594 + *	interrupt line and IRQ handling. From the point this
  31.595 + *	call is made your handler function may be invoked. Since
  31.596 + *	your handler function must clear any interrupt the board 
  31.597 + *	raises, you must take care both to initialise your hardware
  31.598 + *	and to set up the interrupt handler in the right order.
  31.599 + *
  31.600 + *	Dev_id must be globally unique. Normally the address of the
  31.601 + *	device data structure is used as the cookie. Since the handler
  31.602 + *	receives this value it makes sense to use it.
  31.603 + *
  31.604 + *	If your interrupt is shared you must pass a non NULL dev_id
  31.605 + *	as this is required when freeing the interrupt.
  31.606 + *
  31.607 + *	Flags:
  31.608 + *
  31.609 + *	SA_SHIRQ		Interrupt is shared
  31.610 + *
  31.611 + *	SA_INTERRUPT		Disable local interrupts while processing
  31.612 + *
  31.613 + *	SA_SAMPLE_RANDOM	The interrupt can be used for entropy
  31.614 + *
  31.615 + */
  31.616 + 
  31.617 +int request_irq(unsigned int irq, 
  31.618 +		void (*handler)(int, void *, struct pt_regs *),
  31.619 +		unsigned long irqflags, 
  31.620 +		const char * devname,
  31.621 +		void *dev_id)
  31.622 +{
  31.623 +	int retval;
  31.624 +	struct irqaction * action;
  31.625 +
  31.626 +#if 1
  31.627 +	/*
  31.628 +	 * Sanity-check: shared interrupts should REALLY pass in
  31.629 +	 * a real dev-ID, otherwise we'll have trouble later trying
  31.630 +	 * to figure out which interrupt is which (messes up the
  31.631 +	 * interrupt freeing logic etc).
  31.632 +	 */
  31.633 +	if (irqflags & SA_SHIRQ) {
  31.634 +		if (!dev_id)
  31.635 +			printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
  31.636 +	}
  31.637 +#endif
  31.638 +
  31.639 +	if (irq >= NR_IRQS)
  31.640 +		return -EINVAL;
  31.641 +	if (!handler)
  31.642 +		return -EINVAL;
  31.643 +
  31.644 +	action = (struct irqaction *)
  31.645 +			kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  31.646 +	if (!action)
  31.647 +		return -ENOMEM;
  31.648 +
  31.649 +	action->handler = handler;
  31.650 +	action->flags = irqflags;
  31.651 +	action->mask = 0;
  31.652 +	action->name = devname;
  31.653 +	action->next = NULL;
  31.654 +	action->dev_id = dev_id;
  31.655 +
  31.656 +	retval = setup_irq(irq, action);
  31.657 +	if (retval)
  31.658 +		kfree(action);
  31.659 +	return retval;
  31.660 +}
  31.661 +
  31.662 +/**
  31.663 + *	free_irq - free an interrupt
  31.664 + *	@irq: Interrupt line to free
  31.665 + *	@dev_id: Device identity to free
  31.666 + *
  31.667 + *	Remove an interrupt handler. The handler is removed and if the
  31.668 + *	interrupt line is no longer in use by any driver it is disabled.
  31.669 + *	On a shared IRQ the caller must ensure the interrupt is disabled
  31.670 + *	on the card it drives before calling this function. The function
  31.671 + *	does not return until any executing interrupts for this IRQ
  31.672 + *	have completed.
  31.673 + *
  31.674 + *	This function may be called from interrupt context. 
  31.675 + *
  31.676 + *	Bugs: Attempting to free an irq in a handler for the same irq hangs
  31.677 + *	      the machine.
  31.678 + */
  31.679 + 
  31.680 +void free_irq(unsigned int irq, void *dev_id)
  31.681 +{
  31.682 +	irq_desc_t *desc;
  31.683 +	struct irqaction **p;
  31.684 +	unsigned long flags;
  31.685 +
  31.686 +	if (irq >= NR_IRQS)
  31.687 +		return;
  31.688 +
  31.689 +	desc = irq_desc + irq;
  31.690 +	spin_lock_irqsave(&desc->lock,flags);
  31.691 +	p = &desc->action;
  31.692 +	for (;;) {
  31.693 +		struct irqaction * action = *p;
  31.694 +		if (action) {
  31.695 +			struct irqaction **pp = p;
  31.696 +			p = &action->next;
  31.697 +			if (action->dev_id != dev_id)
  31.698 +				continue;
  31.699 +
  31.700 +			/* Found it - now remove it from the list of entries */
  31.701 +			*pp = action->next;
  31.702 +			if (!desc->action) {
  31.703 +				desc->status |= IRQ_DISABLED;
  31.704 +				desc->handler->shutdown(irq);
  31.705 +			}
  31.706 +			spin_unlock_irqrestore(&desc->lock,flags);
  31.707 +
  31.708 +#ifdef CONFIG_SMP
  31.709 +			/* Wait to make sure it's not being used on another CPU */
  31.710 +			while (desc->status & IRQ_INPROGRESS) {
  31.711 +				barrier();
  31.712 +				cpu_relax();
  31.713 +			}
  31.714 +#endif
  31.715 +			kfree(action);
  31.716 +			return;
  31.717 +		}
  31.718 +		printk("Trying to free free IRQ%d\n",irq);
  31.719 +		spin_unlock_irqrestore(&desc->lock,flags);
  31.720 +		return;
  31.721 +	}
  31.722 +}
  31.723 +
  31.724 +/*
  31.725 + * IRQ autodetection code..
  31.726 + *
  31.727 + * This depends on the fact that any interrupt that
  31.728 + * comes in on to an unassigned handler will get stuck
  31.729 + * with "IRQ_WAITING" cleared and the interrupt
  31.730 + * disabled.
  31.731 + */
  31.732 +
  31.733 +static DECLARE_MUTEX(probe_sem);
  31.734 +
  31.735 +/**
  31.736 + *	probe_irq_on	- begin an interrupt autodetect
  31.737 + *
  31.738 + *	Commence probing for an interrupt. The interrupts are scanned
  31.739 + *	and a mask of potential interrupt lines is returned.
  31.740 + *
  31.741 + */
  31.742 + 
  31.743 +unsigned long probe_irq_on(void)
  31.744 +{
  31.745 +	unsigned int i;
  31.746 +	irq_desc_t *desc;
  31.747 +	unsigned long val;
  31.748 +	unsigned long delay;
  31.749 +
  31.750 +	down(&probe_sem);
  31.751 +	/* 
  31.752 +	 * something may have generated an irq long ago and we want to
  31.753 +	 * flush such a longstanding irq before considering it as spurious. 
  31.754 +	 */
  31.755 +	for (i = NR_IRQS-1; i > 0; i--)  {
  31.756 +		desc = irq_desc + i;
  31.757 +
  31.758 +		spin_lock_irq(&desc->lock);
  31.759 +		if (!irq_desc[i].action) 
  31.760 +			irq_desc[i].handler->startup(i);
  31.761 +		spin_unlock_irq(&desc->lock);
  31.762 +	}
  31.763 +
  31.764 +	/* Wait for longstanding interrupts to trigger. */
  31.765 +	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
  31.766 +		/* about 20ms delay */ synchronize_irq();
  31.767 +
  31.768 +	/*
  31.769 +	 * enable any unassigned irqs
  31.770 +	 * (we must startup again here because if a longstanding irq
  31.771 +	 * happened in the previous stage, it may have masked itself)
  31.772 +	 */
  31.773 +	for (i = NR_IRQS-1; i > 0; i--) {
  31.774 +		desc = irq_desc + i;
  31.775 +
  31.776 +		spin_lock_irq(&desc->lock);
  31.777 +		if (!desc->action) {
  31.778 +			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
  31.779 +			if (desc->handler->startup(i))
  31.780 +				desc->status |= IRQ_PENDING;
  31.781 +		}
  31.782 +		spin_unlock_irq(&desc->lock);
  31.783 +	}
  31.784 +
  31.785 +	/*
  31.786 +	 * Wait for spurious interrupts to trigger
  31.787 +	 */
  31.788 +	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
  31.789 +		/* about 100ms delay */ synchronize_irq();
  31.790 +
  31.791 +	/*
  31.792 +	 * Now filter out any obviously spurious interrupts
  31.793 +	 */
  31.794 +	val = 0;
  31.795 +	for (i = 0; i < NR_IRQS; i++) {
  31.796 +		irq_desc_t *desc = irq_desc + i;
  31.797 +		unsigned int status;
  31.798 +
  31.799 +		spin_lock_irq(&desc->lock);
  31.800 +		status = desc->status;
  31.801 +
  31.802 +		if (status & IRQ_AUTODETECT) {
  31.803 +			/* It triggered already - consider it spurious. */
  31.804 +			if (!(status & IRQ_WAITING)) {
  31.805 +				desc->status = status & ~IRQ_AUTODETECT;
  31.806 +				desc->handler->shutdown(i);
  31.807 +			} else
  31.808 +				if (i < 32)
  31.809 +					val |= 1 << i;
  31.810 +		}
  31.811 +		spin_unlock_irq(&desc->lock);
  31.812 +	}
  31.813 +
  31.814 +	return val;
  31.815 +}
  31.816 +
  31.817 +/*
  31.818 + * Return a mask of triggered interrupts (this
  31.819 + * can handle only legacy ISA interrupts).
  31.820 + */
  31.821 + 
  31.822 +/**
  31.823 + *	probe_irq_mask - scan a bitmap of interrupt lines
  31.824 + *	@val:	mask of interrupts to consider
  31.825 + *
  31.826 + *	Scan the ISA bus interrupt lines and return a bitmap of
  31.827 + *	active interrupts. The interrupt probe logic state is then
  31.828 + *	returned to its previous value.
  31.829 + *
  31.830 + *	Note: we need to scan all the irq's even though we will
  31.831 + *	only return ISA irq numbers - just so that we reset them
  31.832 + *	all to a known state.
  31.833 + */
  31.834 +unsigned int probe_irq_mask(unsigned long val)
  31.835 +{
  31.836 +	int i;
  31.837 +	unsigned int mask;
  31.838 +
  31.839 +	mask = 0;
  31.840 +	for (i = 0; i < NR_IRQS; i++) {
  31.841 +		irq_desc_t *desc = irq_desc + i;
  31.842 +		unsigned int status;
  31.843 +
  31.844 +		spin_lock_irq(&desc->lock);
  31.845 +		status = desc->status;
  31.846 +
  31.847 +		if (status & IRQ_AUTODETECT) {
  31.848 +			if (i < 16 && !(status & IRQ_WAITING))
  31.849 +				mask |= 1 << i;
  31.850 +
  31.851 +			desc->status = status & ~IRQ_AUTODETECT;
  31.852 +			desc->handler->shutdown(i);
  31.853 +		}
  31.854 +		spin_unlock_irq(&desc->lock);
  31.855 +	}
  31.856 +	up(&probe_sem);
  31.857 +
  31.858 +	return mask & val;
  31.859 +}
  31.860 +
  31.861 +/*
  31.862 + * Return the one interrupt that triggered (this can
  31.863 + * handle any interrupt source).
  31.864 + */
  31.865 +
  31.866 +/**
  31.867 + *	probe_irq_off	- end an interrupt autodetect
  31.868 + *	@val: mask of potential interrupts (unused)
  31.869 + *
  31.870 + *	Scans the unused interrupt lines and returns the line which
  31.871 + *	appears to have triggered the interrupt. If no interrupt was
  31.872 + *	found then zero is returned. If more than one interrupt is
  31.873 + *	found then minus the first candidate is returned to indicate
  31.874 + *	their is doubt.
  31.875 + *
  31.876 + *	The interrupt probe logic state is returned to its previous
  31.877 + *	value.
  31.878 + *
  31.879 + *	BUGS: When used in a module (which arguably shouldnt happen)
  31.880 + *	nothing prevents two IRQ probe callers from overlapping. The
  31.881 + *	results of this are non-optimal.
  31.882 + */
  31.883 + 
  31.884 +int probe_irq_off(unsigned long val)
  31.885 +{
  31.886 +	int i, irq_found, nr_irqs;
  31.887 +
  31.888 +	nr_irqs = 0;
  31.889 +	irq_found = 0;
  31.890 +	for (i = 0; i < NR_IRQS; i++) {
  31.891 +		irq_desc_t *desc = irq_desc + i;
  31.892 +		unsigned int status;
  31.893 +
  31.894 +		spin_lock_irq(&desc->lock);
  31.895 +		status = desc->status;
  31.896 +
  31.897 +		if (status & IRQ_AUTODETECT) {
  31.898 +			if (!(status & IRQ_WAITING)) {
  31.899 +				if (!nr_irqs)
  31.900 +					irq_found = i;
  31.901 +				nr_irqs++;
  31.902 +			}
  31.903 +			desc->status = status & ~IRQ_AUTODETECT;
  31.904 +			desc->handler->shutdown(i);
  31.905 +		}
  31.906 +		spin_unlock_irq(&desc->lock);
  31.907 +	}
  31.908 +	up(&probe_sem);
  31.909 +
  31.910 +	if (nr_irqs > 1)
  31.911 +		irq_found = -irq_found;
  31.912 +	return irq_found;
  31.913 +}
  31.914 +
  31.915 +/* this was setup_x86_irq but it seems pretty generic */
  31.916 +int setup_irq(unsigned int irq, struct irqaction * new)
  31.917 +{
  31.918 +	int shared = 0;
  31.919 +	unsigned long flags;
  31.920 +	struct irqaction *old, **p;
  31.921 +	irq_desc_t *desc = irq_desc + irq;
  31.922 +
  31.923 +	/*
  31.924 +	 * Some drivers like serial.c use request_irq() heavily,
  31.925 +	 * so we have to be careful not to interfere with a
  31.926 +	 * running system.
  31.927 +	 */
  31.928 +	if (new->flags & SA_SAMPLE_RANDOM) {
  31.929 +		/*
  31.930 +		 * This function might sleep, we want to call it first,
  31.931 +		 * outside of the atomic block.
  31.932 +		 * Yes, this might clear the entropy pool if the wrong
  31.933 +		 * driver is attempted to be loaded, without actually
  31.934 +		 * installing a new handler, but is this really a problem,
  31.935 +		 * only the sysadmin is able to do this.
  31.936 +		 */
  31.937 +		rand_initialize_irq(irq);
  31.938 +	}
  31.939 +
  31.940 +	/*
  31.941 +	 * The following block of code has to be executed atomically
  31.942 +	 */
  31.943 +	spin_lock_irqsave(&desc->lock,flags);
  31.944 +	p = &desc->action;
  31.945 +	if ((old = *p) != NULL) {
  31.946 +		/* Can't share interrupts unless both agree to */
  31.947 +		if (!(old->flags & new->flags & SA_SHIRQ)) {
  31.948 +			spin_unlock_irqrestore(&desc->lock,flags);
  31.949 +			return -EBUSY;
  31.950 +		}
  31.951 +
  31.952 +		/* add new interrupt at end of irq queue */
  31.953 +		do {
  31.954 +			p = &old->next;
  31.955 +			old = *p;
  31.956 +		} while (old);
  31.957 +		shared = 1;
  31.958 +	}
  31.959 +
  31.960 +	*p = new;
  31.961 +
  31.962 +	if (!shared) {
  31.963 +		desc->depth = 0;
  31.964 +		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
  31.965 +		desc->handler->startup(irq);
  31.966 +	}
  31.967 +	spin_unlock_irqrestore(&desc->lock,flags);
  31.968 +
  31.969 +	register_irq_proc(irq);
  31.970 +	return 0;
  31.971 +}
  31.972 +
  31.973 +static struct proc_dir_entry * root_irq_dir;
  31.974 +static struct proc_dir_entry * irq_dir [NR_IRQS];
  31.975 +
  31.976 +#define HEX_DIGITS 8
  31.977 +
  31.978 +static unsigned int parse_hex_value (const char *buffer,
  31.979 +		unsigned long count, unsigned long *ret)
  31.980 +{
  31.981 +	unsigned char hexnum [HEX_DIGITS];
  31.982 +	unsigned long value;
  31.983 +	int i;
  31.984 +
  31.985 +	if (!count)
  31.986 +		return -EINVAL;
  31.987 +	if (count > HEX_DIGITS)
  31.988 +		count = HEX_DIGITS;
  31.989 +	if (copy_from_user(hexnum, buffer, count))
  31.990 +		return -EFAULT;
  31.991 +
  31.992 +	/*
  31.993 +	 * Parse the first 8 characters as a hex string, any non-hex char
  31.994 +	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
  31.995 +	 */
  31.996 +	value = 0;
  31.997 +
  31.998 +	for (i = 0; i < count; i++) {
  31.999 +		unsigned int c = hexnum[i];
 31.1000 +
 31.1001 +		switch (c) {
 31.1002 +			case '0' ... '9': c -= '0'; break;
 31.1003 +			case 'a' ... 'f': c -= 'a'-10; break;
 31.1004 +			case 'A' ... 'F': c -= 'A'-10; break;
 31.1005 +		default:
 31.1006 +			goto out;
 31.1007 +		}
 31.1008 +		value = (value << 4) | c;
 31.1009 +	}
 31.1010 +out:
 31.1011 +	*ret = value;
 31.1012 +	return 0;
 31.1013 +}
 31.1014 +
 31.1015 +#if CONFIG_SMP
 31.1016 +
 31.1017 +static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 31.1018 +
 31.1019 +static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
 31.1020 +static int irq_affinity_read_proc (char *page, char **start, off_t off,
 31.1021 +			int count, int *eof, void *data)
 31.1022 +{
 31.1023 +	if (count < HEX_DIGITS+1)
 31.1024 +		return -EINVAL;
 31.1025 +	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
 31.1026 +}
 31.1027 +
 31.1028 +static int irq_affinity_write_proc (struct file *file, const char *buffer,
 31.1029 +					unsigned long count, void *data)
 31.1030 +{
 31.1031 +	int irq = (long) data, full_count = count, err;
 31.1032 +	unsigned long new_value;
 31.1033 +
 31.1034 +	if (!irq_desc[irq].handler->set_affinity)
 31.1035 +		return -EIO;
 31.1036 +
 31.1037 +	err = parse_hex_value(buffer, count, &new_value);
 31.1038 +
 31.1039 +	/*
 31.1040 +	 * Do not allow disabling IRQs completely - it's a too easy
 31.1041 +	 * way to make the system unusable accidentally :-) At least
 31.1042 +	 * one online CPU still has to be targeted.
 31.1043 +	 */
 31.1044 +	if (!(new_value & cpu_online_map))
 31.1045 +		return -EINVAL;
 31.1046 +
 31.1047 +	irq_affinity[irq] = new_value;
 31.1048 +	irq_desc[irq].handler->set_affinity(irq, new_value);
 31.1049 +
 31.1050 +	return full_count;
 31.1051 +}
 31.1052 +
 31.1053 +#endif
 31.1054 +
 31.1055 +static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 31.1056 +			int count, int *eof, void *data)
 31.1057 +{
 31.1058 +	unsigned long *mask = (unsigned long *) data;
 31.1059 +	if (count < HEX_DIGITS+1)
 31.1060 +		return -EINVAL;
 31.1061 +	return sprintf (page, "%08lx\n", *mask);
 31.1062 +}
 31.1063 +
 31.1064 +static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 31.1065 +					unsigned long count, void *data)
 31.1066 +{
 31.1067 +	unsigned long *mask = (unsigned long *) data, full_count = count, err;
 31.1068 +	unsigned long new_value;
 31.1069 +
 31.1070 +	err = parse_hex_value(buffer, count, &new_value);
 31.1071 +	if (err)
 31.1072 +		return err;
 31.1073 +
 31.1074 +	*mask = new_value;
 31.1075 +	return full_count;
 31.1076 +}
 31.1077 +
 31.1078 +#define MAX_NAMELEN 10
 31.1079 +
 31.1080 +static void register_irq_proc (unsigned int irq)
 31.1081 +{
 31.1082 +	char name [MAX_NAMELEN];
 31.1083 +
 31.1084 +	if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
 31.1085 +			irq_dir[irq])
 31.1086 +		return;
 31.1087 +
 31.1088 +	memset(name, 0, MAX_NAMELEN);
 31.1089 +	sprintf(name, "%d", irq);
 31.1090 +
 31.1091 +	/* create /proc/irq/1234 */
 31.1092 +	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
 31.1093 +
 31.1094 +#if CONFIG_SMP
 31.1095 +	{
 31.1096 +		struct proc_dir_entry *entry;
 31.1097 +
 31.1098 +		/* create /proc/irq/1234/smp_affinity */
 31.1099 +		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
 31.1100 +
 31.1101 +		if (entry) {
 31.1102 +			entry->nlink = 1;
 31.1103 +			entry->data = (void *)(long)irq;
 31.1104 +			entry->read_proc = irq_affinity_read_proc;
 31.1105 +			entry->write_proc = irq_affinity_write_proc;
 31.1106 +		}
 31.1107 +
 31.1108 +		smp_affinity_entry[irq] = entry;
 31.1109 +	}
 31.1110 +#endif
 31.1111 +}
 31.1112 +
 31.1113 +unsigned long prof_cpu_mask = -1;
 31.1114 +
 31.1115 +void init_irq_proc (void)
 31.1116 +{
 31.1117 +	struct proc_dir_entry *entry;
 31.1118 +	int i;
 31.1119 +
 31.1120 +	/* create /proc/irq */
 31.1121 +	root_irq_dir = proc_mkdir("irq", 0);
 31.1122 +
 31.1123 +	/* create /proc/irq/prof_cpu_mask */
 31.1124 +	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
 31.1125 +
 31.1126 +	if (!entry)
 31.1127 +	    return;
 31.1128 +
 31.1129 +	entry->nlink = 1;
 31.1130 +	entry->data = (void *)&prof_cpu_mask;
 31.1131 +	entry->read_proc = prof_cpu_mask_read_proc;
 31.1132 +	entry->write_proc = prof_cpu_mask_write_proc;
 31.1133 +
 31.1134 +	/*
 31.1135 +	 * Create entries for all existing IRQs.
 31.1136 +	 */
 31.1137 +	for (i = 0; i < NR_IRQS; i++)
 31.1138 +		register_irq_proc(i);
 31.1139 +}
 31.1140 +
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/ldt.c	Tue Mar 23 10:40:28 2004 +0000
    32.3 @@ -0,0 +1,287 @@
    32.4 +/*
    32.5 + * linux/kernel/ldt.c
    32.6 + *
    32.7 + * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
    32.8 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
    32.9 + */
   32.10 +
   32.11 +#include <linux/errno.h>
   32.12 +#include <linux/sched.h>
   32.13 +#include <linux/string.h>
   32.14 +#include <linux/mm.h>
   32.15 +#include <linux/smp.h>
   32.16 +#include <linux/smp_lock.h>
   32.17 +#include <linux/vmalloc.h>
   32.18 +#include <linux/slab.h>
   32.19 +
   32.20 +#include <asm/uaccess.h>
   32.21 +#include <asm/system.h>
   32.22 +#include <asm/ldt.h>
   32.23 +#include <asm/desc.h>
   32.24 +
   32.25 +#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
   32.26 +static void flush_ldt(void *mm)
   32.27 +{
   32.28 +	if (current->active_mm)
   32.29 +		load_LDT(&current->active_mm->context);
   32.30 +}
   32.31 +#endif
   32.32 +
   32.33 +static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
   32.34 +{
   32.35 +	void *oldldt;
   32.36 +	void *newldt;
   32.37 +	int oldsize;
   32.38 +
   32.39 +	if (mincount <= pc->size)
   32.40 +		return 0;
   32.41 +	oldsize = pc->size;
   32.42 +	mincount = (mincount+511)&(~511);
   32.43 +	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
   32.44 +		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
   32.45 +	else
   32.46 +		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
   32.47 +
   32.48 +	if (!newldt)
   32.49 +		return -ENOMEM;
   32.50 +
   32.51 +	if (oldsize)
   32.52 +		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
   32.53 +
   32.54 +	oldldt = pc->ldt;
   32.55 +	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
   32.56 +	wmb();
   32.57 +	pc->ldt = newldt;
   32.58 +	pc->size = mincount;
   32.59 +	if (reload) {
   32.60 +		make_pages_readonly(
   32.61 +			pc->ldt,
   32.62 +			(pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
   32.63 +		load_LDT(pc);
   32.64 +		flush_page_update_queue();
   32.65 +#ifdef CONFIG_SMP
   32.66 +		if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
   32.67 +			smp_call_function(flush_ldt, 0, 1, 1);
   32.68 +#endif
   32.69 +	}
   32.70 +	wmb();
   32.71 +	if (oldsize) {
   32.72 +		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
   32.73 +			vfree(oldldt);
   32.74 +		else
   32.75 +			kfree(oldldt);
   32.76 +	}
   32.77 +	return 0;
   32.78 +}
   32.79 +
   32.80 +static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
   32.81 +{
   32.82 +	int err = alloc_ldt(new, old->size, 0);
   32.83 +	if (err < 0) {
   32.84 +		printk(KERN_WARNING "ldt allocation failed\n");
   32.85 +		new->size = 0;
   32.86 +		return err;
   32.87 +	}
   32.88 +	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
   32.89 +	make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
   32.90 +	return 0;
   32.91 +}
   32.92 +
   32.93 +/*
   32.94 + * we do not have to muck with descriptors here, that is
   32.95 + * done in switch_mm() as needed.
   32.96 + */
   32.97 +int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
   32.98 +{
   32.99 +	struct mm_struct * old_mm;
  32.100 +	int retval = 0;
  32.101 +
  32.102 +	init_MUTEX(&mm->context.sem);
  32.103 +	mm->context.size = 0;
  32.104 +	old_mm = current->mm;
  32.105 +	if (old_mm && old_mm->context.size > 0) {
  32.106 +		down(&old_mm->context.sem);
  32.107 +		retval = copy_ldt(&mm->context, &old_mm->context);
  32.108 +		up(&old_mm->context.sem);
  32.109 +	}
  32.110 +	return retval;
  32.111 +}
  32.112 +
  32.113 +/*
  32.114 + * No need to lock the MM as we are the last user
  32.115 + * Do not touch the ldt register, we are already
  32.116 + * in the next thread.
  32.117 + */
  32.118 +void destroy_context(struct mm_struct *mm)
  32.119 +{
  32.120 +	if (mm->context.size) {
  32.121 +		make_pages_writeable(
  32.122 +			mm->context.ldt, 
  32.123 +			(mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
  32.124 +		flush_page_update_queue();
  32.125 +		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
  32.126 +			vfree(mm->context.ldt);
  32.127 +		else
  32.128 +			kfree(mm->context.ldt);
  32.129 +		mm->context.size = 0;
  32.130 +	}
  32.131 +}
  32.132 +
  32.133 +static int read_ldt(void * ptr, unsigned long bytecount)
  32.134 +{
  32.135 +	int err;
  32.136 +	unsigned long size;
  32.137 +	struct mm_struct * mm = current->mm;
  32.138 +
  32.139 +	if (!mm->context.size)
  32.140 +		return 0;
  32.141 +	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
  32.142 +		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
  32.143 +
  32.144 +	down(&mm->context.sem);
  32.145 +	size = mm->context.size*LDT_ENTRY_SIZE;
  32.146 +	if (size > bytecount)
  32.147 +		size = bytecount;
  32.148 +
  32.149 +	err = 0;
  32.150 +	if (copy_to_user(ptr, mm->context.ldt, size))
  32.151 +		err = -EFAULT;
  32.152 +	up(&mm->context.sem);
  32.153 +	if (err < 0)
  32.154 +		return err;
  32.155 +	if (size != bytecount) {
  32.156 +		/* zero-fill the rest */
  32.157 +		clear_user(ptr+size, bytecount-size);
  32.158 +	}
  32.159 +	return bytecount;
  32.160 +}
  32.161 +
  32.162 +
  32.163 +static int read_default_ldt(void * ptr, unsigned long bytecount)
  32.164 +{
  32.165 +    int err;
  32.166 +    unsigned long size;
  32.167 +    void *address;
  32.168 +
  32.169 +    err = 0;
  32.170 +    address = &default_ldt[0];
  32.171 +    size = 5*sizeof(struct desc_struct);
  32.172 +    if (size > bytecount)
  32.173 +        size = bytecount;
  32.174 +
  32.175 +    err = size;
  32.176 +    if (copy_to_user(ptr, address, size))
  32.177 +        err = -EFAULT;
  32.178 +
  32.179 +    return err;
  32.180 +}
  32.181 +
  32.182 +static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
  32.183 +{
  32.184 +    struct mm_struct * mm = current->mm;
  32.185 +    __u32 entry_1, entry_2, *lp;
  32.186 +    unsigned long phys_lp, max_limit;
  32.187 +    int error;
  32.188 +    struct modify_ldt_ldt_s ldt_info;
  32.189 +
  32.190 +    error = -EINVAL;
  32.191 +    if (bytecount != sizeof(ldt_info))
  32.192 +        goto out;
  32.193 +    error = -EFAULT; 	
  32.194 +    if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
  32.195 +        goto out;
  32.196 +
  32.197 +    error = -EINVAL;
  32.198 +    if (ldt_info.entry_number >= LDT_ENTRIES)
  32.199 +        goto out;
  32.200 +    if (ldt_info.contents == 3) {
  32.201 +        if (oldmode)
  32.202 +            goto out;
  32.203 +        if (ldt_info.seg_not_present == 0)
  32.204 +            goto out;
  32.205 +    }
  32.206 +
  32.207 +    /*
  32.208 +     * This makes our tests for overlap with Xen space easier. There's no good
  32.209 +     * reason to have a user segment starting this high anyway.
  32.210 +     */
  32.211 +    if (ldt_info.base_addr >= PAGE_OFFSET)
  32.212 +        goto out;
  32.213 +
  32.214 +    down(&mm->context.sem);
  32.215 +    if (ldt_info.entry_number >= mm->context.size) {
  32.216 +      error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
  32.217 +      if (error < 0)
  32.218 +	goto out_unlock;
  32.219 +    }
  32.220 +
  32.221 +
  32.222 +    lp = (__u32 *)((ldt_info.entry_number<<3) + (char *)mm->context.ldt);
  32.223 +    phys_lp = arbitrary_virt_to_phys(lp);
  32.224 +
  32.225 +    /* Allow LDTs to be cleared by the user. */
  32.226 +    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
  32.227 +        if (oldmode ||
  32.228 +            (ldt_info.contents == 0		&&
  32.229 +             ldt_info.read_exec_only == 1	&&
  32.230 +             ldt_info.seg_32bit == 0		&&
  32.231 +             ldt_info.limit_in_pages == 0	&&
  32.232 +             ldt_info.seg_not_present == 1	&&
  32.233 +             ldt_info.useable == 0 )) {
  32.234 +            entry_1 = 0;
  32.235 +            entry_2 = 0;
  32.236 +            goto install;
  32.237 +        }
  32.238 +    }
  32.239 +
  32.240 +    max_limit = HYPERVISOR_VIRT_START - ldt_info.base_addr;
  32.241 +    if ( ldt_info.limit_in_pages )
  32.242 +        max_limit >>= PAGE_SHIFT;
  32.243 +    max_limit--;
  32.244 +    if ( (ldt_info.limit & 0xfffff) > (max_limit & 0xfffff) )
  32.245 +        ldt_info.limit = max_limit;
  32.246 +
  32.247 +    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
  32.248 +        (ldt_info.limit & 0x0ffff);
  32.249 +    entry_2 = (ldt_info.base_addr & 0xff000000) |
  32.250 +        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
  32.251 +        (ldt_info.limit & 0xf0000) |
  32.252 +        ((ldt_info.read_exec_only ^ 1) << 9) |
  32.253 +        (ldt_info.contents << 10) |
  32.254 +        ((ldt_info.seg_not_present ^ 1) << 15) |
  32.255 +        (ldt_info.seg_32bit << 22) |
  32.256 +        (ldt_info.limit_in_pages << 23) |
  32.257 +        0x7000;
  32.258 +    if (!oldmode)
  32.259 +        entry_2 |= (ldt_info.useable << 20);
  32.260 +
  32.261 +    /* Install the new entry ...  */
  32.262 + install:
  32.263 +    error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
  32.264 +
  32.265 + out_unlock:
  32.266 +    up(&mm->context.sem);
  32.267 + out:
  32.268 +    return error;
  32.269 +}
  32.270 +
  32.271 +asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
  32.272 +{
  32.273 +    int ret = -ENOSYS;
  32.274 +
  32.275 +    switch (func) {
  32.276 +    case 0:
  32.277 +        ret = read_ldt(ptr, bytecount);
  32.278 +        break;
  32.279 +    case 1:
  32.280 +        ret = write_ldt(ptr, bytecount, 1);
  32.281 +        break;
  32.282 +    case 2:
  32.283 +        ret = read_default_ldt(ptr, bytecount);
  32.284 +        break;
  32.285 +    case 0x11:
  32.286 +        ret = write_ldt(ptr, bytecount, 0);
  32.287 +        break;
  32.288 +    }
  32.289 +    return ret;
  32.290 +}
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/pci-dma.c	Tue Mar 23 10:40:28 2004 +0000
    33.3 @@ -0,0 +1,37 @@
    33.4 +/*
    33.5 + * Dynamic DMA mapping support.
    33.6 + *
    33.7 + * On i386 there is no hardware dynamic DMA address translation,
    33.8 + * so consistent alloc/free are merely page allocation/freeing.
    33.9 + * The rest of the dynamic DMA mapping interface is implemented
   33.10 + * in asm/pci.h.
   33.11 + */
   33.12 +
   33.13 +#include <linux/types.h>
   33.14 +#include <linux/mm.h>
   33.15 +#include <linux/string.h>
   33.16 +#include <linux/pci.h>
   33.17 +#include <asm/io.h>
   33.18 +
   33.19 +void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   33.20 +			   dma_addr_t *dma_handle)
   33.21 +{
   33.22 +	void *ret;
   33.23 +	int gfp = GFP_ATOMIC;
   33.24 +
   33.25 +	if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
   33.26 +		gfp |= GFP_DMA;
   33.27 +	ret = (void *)__get_free_pages(gfp, get_order(size));
   33.28 +
   33.29 +	if (ret != NULL) {
   33.30 +		memset(ret, 0, size);
   33.31 +		*dma_handle = virt_to_bus(ret);
   33.32 +	}
   33.33 +	return ret;
   33.34 +}
   33.35 +
   33.36 +void pci_free_consistent(struct pci_dev *hwdev, size_t size,
   33.37 +			 void *vaddr, dma_addr_t dma_handle)
   33.38 +{
   33.39 +	free_pages((unsigned long)vaddr, get_order(size));
   33.40 +}
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/pci-i386.c	Tue Mar 23 10:40:28 2004 +0000
    34.3 @@ -0,0 +1,410 @@
    34.4 +/*
    34.5 + *	Low-Level PCI Access for i386 machines
    34.6 + *
    34.7 + * Copyright 1993, 1994 Drew Eckhardt
    34.8 + *      Visionary Computing
    34.9 + *      (Unix and Linux consulting and custom programming)
   34.10 + *      Drew@Colorado.EDU
   34.11 + *      +1 (303) 786-7975
   34.12 + *
   34.13 + * Drew's work was sponsored by:
   34.14 + *	iX Multiuser Multitasking Magazine
   34.15 + *	Hannover, Germany
   34.16 + *	hm@ix.de
   34.17 + *
   34.18 + * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
   34.19 + *
   34.20 + * For more information, please consult the following manuals (look at
   34.21 + * http://www.pcisig.com/ for how to get them):
   34.22 + *
   34.23 + * PCI BIOS Specification
   34.24 + * PCI Local Bus Specification
   34.25 + * PCI to PCI Bridge Specification
   34.26 + * PCI System Design Guide
   34.27 + *
   34.28 + *
   34.29 + * CHANGELOG :
   34.30 + * Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
   34.31 + *	Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
   34.32 + *
   34.33 + * Jan 5,  1995 : Modified to probe PCI hardware at boot time by Frederic
   34.34 + *     Potter, potter@cao-vlsi.ibp.fr
   34.35 + *
   34.36 + * Jan 10, 1995 : Modified to store the information about configured pci
   34.37 + *      devices into a list, which can be accessed via /proc/pci by
   34.38 + *      Curtis Varner, cvarner@cs.ucr.edu
   34.39 + *
   34.40 + * Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
   34.41 + *	Alpha version. Intel & UMC chipset support only.
   34.42 + *
   34.43 + * Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
   34.44 + *	moved to drivers/pci/pci.c.
   34.45 + *
   34.46 + * Dec 7, 1996  : Added support for direct configuration access of boards
   34.47 + *      with Intel compatible access schemes (tsbogend@alpha.franken.de)
   34.48 + *
   34.49 + * Feb 3, 1997  : Set internal functions to static, save/restore flags
   34.50 + *	avoid dead locks reading broken PCI BIOS, werner@suse.de 
   34.51 + *
   34.52 + * Apr 26, 1997 : Fixed case when there is BIOS32, but not PCI BIOS
   34.53 + *	(mj@atrey.karlin.mff.cuni.cz)
   34.54 + *
   34.55 + * May 7,  1997 : Added some missing cli()'s. [mj]
   34.56 + * 
   34.57 + * Jun 20, 1997 : Corrected problems in "conf1" type accesses.
   34.58 + *      (paubert@iram.es)
   34.59 + *
   34.60 + * Aug 2,  1997 : Split to PCI BIOS handling and direct PCI access parts
   34.61 + *	and cleaned it up...     Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   34.62 + *
   34.63 + * Feb 6,  1998 : No longer using BIOS to find devices and device classes. [mj]
   34.64 + *
   34.65 + * May 1,  1998 : Support for peer host bridges. [mj]
   34.66 + *
   34.67 + * Jun 19, 1998 : Changed to use spinlocks, so that PCI configuration space
   34.68 + *	can be accessed from interrupts even on SMP systems. [mj]
   34.69 + *
   34.70 + * August  1998 : Better support for peer host bridges and more paranoid
   34.71 + *	checks for direct hardware access. Ugh, this file starts to look as
   34.72 + *	a large gallery of common hardware bug workarounds (watch the comments)
   34.73 + *	-- the PCI specs themselves are sane, but most implementors should be
   34.74 + *	hit hard with \hammer scaled \magstep5. [mj]
   34.75 + *
   34.76 + * Jan 23, 1999 : More improvements to peer host bridge logic. i450NX fixup. [mj]
   34.77 + *
   34.78 + * Feb 8,  1999 : Added UM8886BF I/O address fixup. [mj]
   34.79 + *
   34.80 + * August  1999 : New resource management and configuration access stuff. [mj]
   34.81 + *
   34.82 + * Sep 19, 1999 : Use PCI IRQ routing tables for detection of peer host bridges.
   34.83 + *		  Based on ideas by Chris Frantz and David Hinds. [mj]
   34.84 + *
   34.85 + * Sep 28, 1999 : Handle unreported/unassigned IRQs. Thanks to Shuu Yamaguchi
   34.86 + *		  for a lot of patience during testing. [mj]
   34.87 + *
   34.88 + * Oct  8, 1999 : Split to pci-i386.c, pci-pc.c and pci-visws.c. [mj]
   34.89 + */
   34.90 +
   34.91 +#include <linux/types.h>
   34.92 +#include <linux/kernel.h>
   34.93 +#include <linux/pci.h>
   34.94 +#include <linux/init.h>
   34.95 +#include <linux/ioport.h>
   34.96 +#include <linux/errno.h>
   34.97 +
   34.98 +#include "pci-i386.h"
   34.99 +
  34.100 +void
  34.101 +pcibios_update_resource(struct pci_dev *dev, struct resource *root,
  34.102 +			struct resource *res, int resource)
  34.103 +{
  34.104 +    u32 new, check;
  34.105 +    int reg;
  34.106 +
  34.107 +    new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
  34.108 +    if (resource < 6) {
  34.109 +        reg = PCI_BASE_ADDRESS_0 + 4*resource;
  34.110 +    } else if (resource == PCI_ROM_RESOURCE) {
  34.111 +        res->flags |= PCI_ROM_ADDRESS_ENABLE;
  34.112 +        new |= PCI_ROM_ADDRESS_ENABLE;
  34.113 +        reg = dev->rom_base_reg;
  34.114 +    } else {
  34.115 +        /* Somebody might have asked allocation of a non-standard resource */
  34.116 +        return;
  34.117 +    }
  34.118 +	
  34.119 +    pci_write_config_dword(dev, reg, new);
  34.120 +    pci_read_config_dword(dev, reg, &check);
  34.121 +    if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
  34.122 +        printk(KERN_ERR "PCI: Error while updating region "
  34.123 +               "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
  34.124 +               new, check);
  34.125 +    }
  34.126 +}
  34.127 +
  34.128 +/*
  34.129 + * We need to avoid collisions with `mirrored' VGA ports
  34.130 + * and other strange ISA hardware, so we always want the
  34.131 + * addresses to be allocated in the 0x000-0x0ff region
  34.132 + * modulo 0x400.
  34.133 + *
  34.134 + * Why? Because some silly external IO cards only decode
  34.135 + * the low 10 bits of the IO address. The 0x00-0xff region
  34.136 + * is reserved for motherboard devices that decode all 16
  34.137 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
  34.138 + * but we want to try to avoid allocating at 0x2900-0x2bff
  34.139 + * which might have be mirrored at 0x0100-0x03ff..
  34.140 + */
  34.141 +void
  34.142 +pcibios_align_resource(void *data, struct resource *res,
  34.143 +		       unsigned long size, unsigned long align)
  34.144 +{
  34.145 +    if (res->flags & IORESOURCE_IO) {
  34.146 +        unsigned long start = res->start;
  34.147 +
  34.148 +        if (start & 0x300) {
  34.149 +            start = (start + 0x3ff) & ~0x3ff;
  34.150 +            res->start = start;
  34.151 +        }
  34.152 +    }
  34.153 +}
  34.154 +
  34.155 +
  34.156 +/*
  34.157 + *  Handle resources of PCI devices.  If the world were perfect, we could
  34.158 + *  just allocate all the resource regions and do nothing more.  It isn't.
  34.159 + *  On the other hand, we cannot just re-allocate all devices, as it would
  34.160 + *  require us to know lots of host bridge internals.  So we attempt to
  34.161 + *  keep as much of the original configuration as possible, but tweak it
  34.162 + *  when it's found to be wrong.
  34.163 + *
  34.164 + *  Known BIOS problems we have to work around:
  34.165 + *	- I/O or memory regions not configured
  34.166 + *	- regions configured, but not enabled in the command register
  34.167 + *	- bogus I/O addresses above 64K used
  34.168 + *	- expansion ROMs left enabled (this may sound harmless, but given
  34.169 + *	  the fact the PCI specs explicitly allow address decoders to be
  34.170 + *	  shared between expansion ROMs and other resource regions, it's
  34.171 + *	  at least dangerous)
  34.172 + *
  34.173 + *  Our solution:
  34.174 + *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
  34.175 + *	    This gives us fixed barriers on where we can allocate.
  34.176 + *	(2) Allocate resources for all enabled devices.  If there is
  34.177 + *	    a collision, just mark the resource as unallocated. Also
  34.178 + *	    disable expansion ROMs during this step.
  34.179 + *	(3) Try to allocate resources for disabled devices.  If the
  34.180 + *	    resources were assigned correctly, everything goes well,
  34.181 + *	    if they weren't, they won't disturb allocation of other
  34.182 + *	    resources.
  34.183 + *	(4) Assign new addresses to resources which were either
  34.184 + *	    not configured at all or misconfigured.  If explicitly
  34.185 + *	    requested by the user, configure expansion ROM address
  34.186 + *	    as well.
  34.187 + */
  34.188 +
  34.189 +static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
  34.190 +{
  34.191 +    struct list_head *ln;
  34.192 +    struct pci_bus *bus;
  34.193 +    struct pci_dev *dev;
  34.194 +    int idx;
  34.195 +    struct resource *r, *pr;
  34.196 +
  34.197 +    /* Depth-First Search on bus tree */
  34.198 +    for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
  34.199 +        bus = pci_bus_b(ln);
  34.200 +        if ((dev = bus->self)) {
  34.201 +            printk("alloc bus res: %s\n", dev->slot_name);
  34.202 +            for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
  34.203 +                r = &dev->resource[idx];
  34.204 +                if (!r->start)
  34.205 +                {
  34.206 +                    printk("  res1: 0x%08lx-0x%08lx f=%lx\n",
  34.207 +                           r->start, r->end, r->flags);
  34.208 +
  34.209 +                    continue;
  34.210 +                }
  34.211 +                pr = pci_find_parent_resource(dev, r);
  34.212 +                if (!pr || request_resource(pr, r) < 0)
  34.213 +                    printk(KERN_ERR "PCI: Cannot allocate resource region %d "
  34.214 +                           "of bridge %s (%p)\n", idx, dev->slot_name, pr);
  34.215 +                printk("  res2: %08lx-%08lx f=%lx\n",
  34.216 +                       r->start, r->end, r->flags);
  34.217 +            }
  34.218 +        }
  34.219 +        pcibios_allocate_bus_resources(&bus->children);
  34.220 +    }
  34.221 +}
  34.222 +
  34.223 +static void __init pcibios_allocate_resources(int pass)
  34.224 +{
  34.225 +    struct pci_dev *dev;
  34.226 +    int idx, disabled;
  34.227 +    u16 command;
  34.228 +    struct resource *r, *pr;
  34.229 +
  34.230 +    pci_for_each_dev(dev) {
  34.231 +        pci_read_config_word(dev, PCI_COMMAND, &command);
  34.232 +        for(idx = 0; idx < 6; idx++) {
  34.233 +            r = &dev->resource[idx];
  34.234 +            if (r->parent)		/* Already allocated */
  34.235 +                continue;
  34.236 +            if (!r->start)		/* Address not assigned at all */
  34.237 +                continue;
  34.238 +            if (r->flags & IORESOURCE_IO)
  34.239 +                disabled = !(command & PCI_COMMAND_IO);
  34.240 +            else
  34.241 +                disabled = !(command & PCI_COMMAND_MEMORY);
  34.242 +            if (pass == disabled) {
  34.243 +                printk("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d) (%s)\n",
  34.244 +                       r->start, r->end, r->flags, disabled, pass, dev->slot_name);
  34.245 +                pr = pci_find_parent_resource(dev, r);
  34.246 +                if (!pr || request_resource(pr, r) < 0) {
  34.247 +                    printk(KERN_ERR "PCI: Cannot allocate resource region %d"
  34.248 +                           " of device %s (%p)\n", idx, dev->slot_name, pr);
  34.249 +                    /* We'll assign a new address later */
  34.250 +                    r->end -= r->start;
  34.251 +                    r->start = 0;
  34.252 +                }
  34.253 +            }
  34.254 +        }
  34.255 +        if (!pass) {
  34.256 +            r = &dev->resource[PCI_ROM_RESOURCE];
  34.257 +            if (r->flags & PCI_ROM_ADDRESS_ENABLE) {
  34.258 +				/* Turn the ROM off, leave the resource region, but keep it unregistered. */
  34.259 +                u32 reg;
  34.260 +                printk("PCI: Switching off ROM of %s\n", dev->slot_name);
  34.261 +                r->flags &= ~PCI_ROM_ADDRESS_ENABLE;
  34.262 +                pci_read_config_dword(dev, dev->rom_base_reg, &reg);
  34.263 +                pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
  34.264 +            }
  34.265 +        }
  34.266 +    }
  34.267 +}
  34.268 +
  34.269 +static void __init pcibios_assign_resources(void)
  34.270 +{
  34.271 +    struct pci_dev *dev;
  34.272 +    int idx;
  34.273 +    struct resource *r;
  34.274 +
  34.275 +    pci_for_each_dev(dev) {
  34.276 +        int class = dev->class >> 8;
  34.277 +
  34.278 +        /* Don't touch classless devices and host bridges */
  34.279 +        if (!class || class == PCI_CLASS_BRIDGE_HOST)
  34.280 +            continue;
  34.281 +
  34.282 +        for(idx=0; idx<6; idx++) {
  34.283 +            r = &dev->resource[idx];
  34.284 +
  34.285 +            /*
  34.286 +             *  Don't touch IDE controllers and I/O ports of video cards!
  34.287 +             */
  34.288 +            if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
  34.289 +                (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
  34.290 +                continue;
  34.291 +
  34.292 +            /*
  34.293 +             *  We shall assign a new address to this resource, either because
  34.294 +             *  the BIOS forgot to do so or because we have decided the old
  34.295 +             *  address was unusable for some reason.
  34.296 +             */
  34.297 +            if (!r->start && r->end)
  34.298 +                pci_assign_resource(dev, idx);
  34.299 +        }
  34.300 +
  34.301 +        if (pci_probe & PCI_ASSIGN_ROMS) {
  34.302 +            r = &dev->resource[PCI_ROM_RESOURCE];
  34.303 +            r->end -= r->start;
  34.304 +            r->start = 0;
  34.305 +            if (r->end)
  34.306 +                pci_assign_resource(dev, PCI_ROM_RESOURCE);
  34.307 +        }
  34.308 +    }
  34.309 +}
  34.310 +
  34.311 +void __init pcibios_set_cacheline_size(void)
  34.312 +{
  34.313 +    struct cpuinfo_x86 *c = &boot_cpu_data;
  34.314 +
  34.315 +    pci_cache_line_size = 32 >> 2;
  34.316 +    if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
  34.317 +        pci_cache_line_size = 64 >> 2;	/* K7 & K8 */
  34.318 +    else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
  34.319 +        pci_cache_line_size = 128 >> 2;	/* P4 */
  34.320 +}
  34.321 +
  34.322 +void __init pcibios_resource_survey(void)
  34.323 +{
  34.324 +    DBG("PCI: Allocating resources\n");
  34.325 +    pcibios_allocate_bus_resources(&pci_root_buses);
  34.326 +    pcibios_allocate_resources(0);
  34.327 +    pcibios_allocate_resources(1);
  34.328 +    pcibios_assign_resources();
  34.329 +}
  34.330 +
  34.331 +int pcibios_enable_resources(struct pci_dev *dev, int mask)
  34.332 +{
  34.333 +    u16 cmd, old_cmd;
  34.334 +    int idx;
  34.335 +    struct resource *r;
  34.336 +
  34.337 +    pci_read_config_word(dev, PCI_COMMAND, &cmd);
  34.338 +    old_cmd = cmd;
  34.339 +    for(idx=0; idx<6; idx++) {
  34.340 +        /* Only set up the requested stuff */
  34.341 +        if (!(mask & (1<<idx)))
  34.342 +            continue;
  34.343 +			
  34.344 +        r = &dev->resource[idx];
  34.345 +        if (!r->start && r->end) {
  34.346 +            printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name);
  34.347 +            return -EINVAL;
  34.348 +        }
  34.349 +        if (r->flags & IORESOURCE_IO)
  34.350 +            cmd |= PCI_COMMAND_IO;
  34.351 +        if (r->flags & IORESOURCE_MEM)
  34.352 +            cmd |= PCI_COMMAND_MEMORY;
  34.353 +    }
  34.354 +    if (dev->resource[PCI_ROM_RESOURCE].start)
  34.355 +        cmd |= PCI_COMMAND_MEMORY;
  34.356 +    if (cmd != old_cmd) {
  34.357 +        printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
  34.358 +        pci_write_config_word(dev, PCI_COMMAND, cmd);
  34.359 +    }
  34.360 +    return 0;
  34.361 +}
  34.362 +
  34.363 +/*
  34.364 + *  If we set up a device for bus mastering, we need to check the latency
  34.365 + *  timer as certain crappy BIOSes forget to set it properly.
  34.366 + */
  34.367 +unsigned int pcibios_max_latency = 255;
  34.368 +
  34.369 +void pcibios_set_master(struct pci_dev *dev)
  34.370 +{
  34.371 +    u8 lat;
  34.372 +    pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
  34.373 +    if (lat < 16)
  34.374 +        lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
  34.375 +    else if (lat > pcibios_max_latency)
  34.376 +        lat = pcibios_max_latency;
  34.377 +    else
  34.378 +        return;
  34.379 +    printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", dev->slot_name, lat);
  34.380 +    pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
  34.381 +}
  34.382 +
  34.383 +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  34.384 +			enum pci_mmap_state mmap_state, int write_combine)
  34.385 +{
  34.386 +    unsigned long prot;
  34.387 +
  34.388 +    /* I/O space cannot be accessed via normal processor loads and
  34.389 +     * stores on this platform.
  34.390 +     */
  34.391 +    if (mmap_state == pci_mmap_io)
  34.392 +        return -EINVAL;
  34.393 +
  34.394 +    /* Leave vm_pgoff as-is, the PCI space address is the physical
  34.395 +     * address on this platform.
  34.396 +     */
  34.397 +    vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
  34.398 +
  34.399 +    prot = pgprot_val(vma->vm_page_prot);
  34.400 +    if (boot_cpu_data.x86 > 3)
  34.401 +        prot |= _PAGE_PCD | _PAGE_PWT;
  34.402 +    vma->vm_page_prot = __pgprot(prot);
  34.403 +
  34.404 +    /* Write-combine setting is ignored, it is changed via the mtrr
  34.405 +     * interfaces on this platform.
  34.406 +     */
  34.407 +    if (remap_page_range(vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
  34.408 +                         vma->vm_end - vma->vm_start,
  34.409 +                         vma->vm_page_prot))
  34.410 +        return -EAGAIN;
  34.411 +
  34.412 +    return 0;
  34.413 +}
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/pci-i386.h	Tue Mar 23 10:40:28 2004 +0000
    35.3 @@ -0,0 +1,71 @@
    35.4 +/*
    35.5 + *	Low-Level PCI Access for i386 machines.
    35.6 + *
    35.7 + *	(c) 1999 Martin Mares <mj@ucw.cz>
    35.8 + */
    35.9 +
   35.10 +#undef DEBUG
   35.11 +
   35.12 +#ifdef DEBUG
   35.13 +#define DBG(x...) printk(x)
   35.14 +#else
   35.15 +#define DBG(x...)
   35.16 +#endif
   35.17 +
   35.18 +#define PCI_PROBE_BIOS		0x0001
   35.19 +#define PCI_PROBE_CONF1		0x0002
   35.20 +#define PCI_PROBE_CONF2		0x0004
   35.21 +#define PCI_NO_SORT		0x0100
   35.22 +#define PCI_BIOS_SORT		0x0200
   35.23 +#define PCI_NO_CHECKS		0x0400
   35.24 +#define PCI_ASSIGN_ROMS		0x1000
   35.25 +#define PCI_BIOS_IRQ_SCAN	0x2000
   35.26 +#define PCI_ASSIGN_ALL_BUSSES	0x4000
   35.27 +
   35.28 +extern unsigned int pci_probe;
   35.29 +
   35.30 +/* pci-i386.c */
   35.31 +
   35.32 +extern unsigned int pcibios_max_latency;
   35.33 +extern u8 pci_cache_line_size;
   35.34 +
   35.35 +void pcibios_resource_survey(void);
   35.36 +void pcibios_set_cacheline_size(void);
   35.37 +int pcibios_enable_resources(struct pci_dev *, int);
   35.38 +
   35.39 +/* pci-pc.c */
   35.40 +
   35.41 +extern int pcibios_last_bus;
   35.42 +extern struct pci_bus *pci_root_bus;
   35.43 +extern struct pci_ops *pci_root_ops;
   35.44 +
   35.45 +/* pci-irq.c */
   35.46 +
   35.47 +struct irq_info {
   35.48 +	u8 bus, devfn;			/* Bus, device and function */
   35.49 +	struct {
   35.50 +		u8 link;		/* IRQ line ID, chipset dependent, 0=not routed */
   35.51 +		u16 bitmap;		/* Available IRQs */
   35.52 +	} __attribute__((packed)) irq[4];
   35.53 +	u8 slot;			/* Slot number, 0=onboard */
   35.54 +	u8 rfu;
   35.55 +} __attribute__((packed));
   35.56 +
   35.57 +struct irq_routing_table {
   35.58 +	u32 signature;			/* PIRQ_SIGNATURE should be here */
   35.59 +	u16 version;			/* PIRQ_VERSION */
   35.60 +	u16 size;			/* Table size in bytes */
   35.61 +	u8 rtr_bus, rtr_devfn;		/* Where the interrupt router lies */
   35.62 +	u16 exclusive_irqs;		/* IRQs devoted exclusively to PCI usage */
   35.63 +	u16 rtr_vendor, rtr_device;	/* Vendor and device ID of interrupt router */
   35.64 +	u32 miniport_data;		/* Crap */
   35.65 +	u8 rfu[11];
   35.66 +	u8 checksum;			/* Modulo 256 checksum must give zero */
   35.67 +	struct irq_info slots[0];
   35.68 +} __attribute__((packed));
   35.69 +
   35.70 +extern unsigned int pcibios_irq_mask;
   35.71 +
   35.72 +void pcibios_irq_init(void);
   35.73 +void pcibios_fixup_irqs(void);
   35.74 +void pcibios_enable_irq(struct pci_dev *dev);
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/pci-irq.c	Tue Mar 23 10:40:28 2004 +0000
    36.3 @@ -0,0 +1,90 @@
    36.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    36.5 + ****************************************************************************
    36.6 + * (C) 2004 - Rolf Neugebauer - Intel Research Cambridge
    36.7 + ****************************************************************************
    36.8 + *
    36.9 + *        File: phys_dev.c
   36.10 + *      Author: Rolf Neugebauer (rolf.neugebauer@intel.com)
   36.11 + *        Date: Mar 2004
   36.12 + *
   36.13 + * Description: XenoLinux wrappers for PCI interrupt handling.
   36.14 + *              very simple since someone else is doing all the hard bits
   36.15 + */
   36.16 +
   36.17 +
   36.18 +/*
   36.19 + *	Low-Level PCI Support for PC -- Routing of Interrupts
   36.20 + *
   36.21 + *	(c) 1999--2000 Martin Mares <mj@ucw.cz>
   36.22 + */
   36.23 +
   36.24 +#include <linux/config.h>
   36.25 +#include <linux/types.h>
   36.26 +#include <linux/kernel.h>
   36.27 +#include <linux/pci.h>
   36.28 +#include <linux/init.h>
   36.29 +#include <linux/interrupt.h>
   36.30 +#include <linux/irq.h>
   36.31 +
   36.32 +#include "pci-i386.h"
   36.33 +
   36.34 +#include <asm/hypervisor-ifs/physdev.h>
   36.35 +
   36.36 +unsigned int pcibios_irq_mask = 0xfff8;
   36.37 +
   36.38 +void eisa_set_level_irq(unsigned int irq)
   36.39 +{
   36.40 +    /* dummy */
   36.41 +}
   36.42 +
   36.43 +void __init pcibios_irq_init(void)
   36.44 +{
   36.45 +	printk("PCI: IRQ init\n");
   36.46 +}
   36.47 +
   36.48 +void __init pcibios_fixup_irqs(void)
   36.49 +{
   36.50 +	struct pci_dev *dev;
   36.51 +    physdev_op_t op;
   36.52 +	int ret;
   36.53 +
   36.54 +
   36.55 +	printk("PCI: IRQ fixup\n");
   36.56 +	pci_for_each_dev(dev) {
   36.57 +
   36.58 +        op.cmd  = PHYSDEVOP_FIND_IRQ;
   36.59 +        op.u.find_irq.seg  = 0;
   36.60 +        op.u.find_irq.bus  = dev->bus->number;
   36.61 +        op.u.find_irq.dev  = PCI_SLOT(dev->devfn);
   36.62 +        op.u.find_irq.func = PCI_FUNC(dev->devfn);
   36.63 +
   36.64 +        if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
   36.65 +        {
   36.66 +            printk(KERN_ALERT "pci find irq error\n");
   36.67 +            return;
   36.68 +        }
   36.69 +
   36.70 +        dev->irq = op.u.find_irq.irq;
   36.71 +        printk(KERN_INFO "PCI IRQ: [%02x:%02x:%02x] -> %d\n",
   36.72 +               dev->bus->number, PCI_SLOT(dev->devfn),
   36.73 +               PCI_FUNC(dev->devfn), dev->irq);
   36.74 +    }
   36.75 +    return;
   36.76 +}
   36.77 +
   36.78 +void pcibios_penalize_isa_irq(int irq)
   36.79 +{
   36.80 +    /* dummy */
   36.81 +}
   36.82 +
   36.83 +void pcibios_enable_irq(struct pci_dev *dev)
   36.84 +{
   36.85 +	u8 pin;
   36.86 +	
   36.87 +	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
   36.88 +
   36.89 +	if (pin  && !dev->irq) {
   36.90 +		printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of "
   36.91 +               "device %s.\n", 'A' + pin - 1, dev->slot_name);
   36.92 +	}
   36.93 +}
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/pci-pc.c	Tue Mar 23 10:40:28 2004 +0000
    37.3 @@ -0,0 +1,364 @@
    37.4 +/*
    37.5 + *	Low-Level PCI Support for PC
    37.6 + *
    37.7 + *	(c) 1999--2000 Martin Mares <mj@ucw.cz>
    37.8 + *
    37.9 + * adjusted to use Xen's interface by Rolf Neugebauer
   37.10 + */
   37.11 +
   37.12 +#include <linux/config.h>
   37.13 +#include <linux/types.h>
   37.14 +#include <linux/kernel.h>
   37.15 +#include <linux/sched.h>
   37.16 +#include <linux/pci.h>
   37.17 +#include <linux/init.h>
   37.18 +#include <linux/ioport.h>
   37.19 +
   37.20 +#include <asm/segment.h>
   37.21 +#include <asm/io.h>
   37.22 +
   37.23 +#include <asm/hypervisor-ifs/hypervisor-if.h>
   37.24 +#include <asm/hypervisor-ifs/physdev.h>
   37.25 +
   37.26 +#include "pci-i386.h"
   37.27 +
   37.28 +int pcibios_last_bus = -1;
   37.29 +struct pci_bus *pci_root_bus = NULL;
   37.30 +struct pci_ops *pci_root_ops = NULL;
   37.31 +
   37.32 +int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value) = NULL;
   37.33 +int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value) = NULL;
   37.34 +
   37.35 +static int pci_using_acpi_prt = 0;
   37.36 +
   37.37 +/*
   37.38 + * This interrupt-safe spinlock protects all accesses to PCI
   37.39 + * configuration space.
   37.40 + */
   37.41 +static spinlock_t pci_config_lock = SPIN_LOCK_UNLOCKED;
   37.42 +
   37.43 +unsigned int pci_probe = PCI_PROBE_BIOS;
   37.44 +
   37.45 +/*
   37.46 + * Functions for accessing PCI configuration space with type 1 accesses
   37.47 + */
   37.48 +
   37.49 +static int pci_confx_read (int seg, int bus, int dev, int fn, int reg, 
   37.50 +                           int len, u32 *value)
   37.51 +{
   37.52 +    int ret;
   37.53 +    physdev_op_t op;
   37.54 +
   37.55 +    if (bus > 255 || dev > 31 || fn > 7 || reg > 255)
   37.56 +        return -EINVAL;
   37.57 +
   37.58 +    op.cmd = PHYSDEVOP_CFGREG_READ;
   37.59 +    op.u.cfg_read.seg  = seg;
   37.60 +    op.u.cfg_read.bus  = bus;
   37.61 +    op.u.cfg_read.dev  = dev;
   37.62 +    op.u.cfg_read.func = fn;
   37.63 +    op.u.cfg_read.reg  = reg;
   37.64 +    op.u.cfg_read.len  = len;
   37.65 +
   37.66 +    if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
   37.67 +    {
   37.68 +        //printk(KERN_ALERT "pci config read error\n");
   37.69 +        return ret;
   37.70 +    }
   37.71 +
   37.72 +    *value = op.u.cfg_read.value;
   37.73 +
   37.74 +    return 0;
   37.75 +}
   37.76 +
   37.77 +static int pci_confx_write (int seg, int bus, int dev, int fn, int reg, 
   37.78 +                            int len, u32 value)
   37.79 +{
   37.80 +    int ret;
   37.81 +    physdev_op_t op;
   37.82 +
   37.83 +    if ((bus > 255 || dev > 31 || fn > 7 || reg > 255)) 
   37.84 +        return -EINVAL;
   37.85 +
   37.86 +    op.cmd = PHYSDEVOP_CFGREG_WRITE;
   37.87 +    op.u.cfg_write.seg   = seg;
   37.88 +    op.u.cfg_write.bus   = bus;
   37.89 +    op.u.cfg_write.dev   = dev;
   37.90 +    op.u.cfg_write.func  = fn;
   37.91 +    op.u.cfg_write.reg   = reg;
   37.92 +    op.u.cfg_write.len   = len;
   37.93 +    op.u.cfg_write.value = value;
   37.94 +
   37.95 +    if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
   37.96 +    {
   37.97 +        //printk(KERN_ALERT "pci config write error\n");
   37.98 +        return ret;
   37.99 +    }
  37.100 +    return 0;
  37.101 +}
  37.102 +
  37.103 +
  37.104 +static int pci_confx_read_config_byte(struct pci_dev *dev, int where, u8 *value)
  37.105 +{
  37.106 +    int result; 
  37.107 +    u32 data;
  37.108 +
  37.109 +    result = pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  37.110 +                            PCI_FUNC(dev->devfn), where, 1, &data);
  37.111 +
  37.112 +    *value = (u8)data;
  37.113 +
  37.114 +    return result;
  37.115 +}
  37.116 +
  37.117 +static int pci_confx_read_config_word(struct pci_dev *dev, int where, u16 *value)
  37.118 +{
  37.119 +    int result; 
  37.120 +    u32 data;
  37.121 +
  37.122 +    result = pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  37.123 +                            PCI_FUNC(dev->devfn), where, 2, &data);
  37.124 +
  37.125 +    *value = (u16)data;
  37.126 +
  37.127 +    return result;
  37.128 +}
  37.129 +
  37.130 +static int pci_confx_read_config_dword(struct pci_dev *dev, int where, u32 *value)
  37.131 +{
  37.132 +    return pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  37.133 +                          PCI_FUNC(dev->devfn), where, 4, value);
  37.134 +}
  37.135 +
  37.136 +static int pci_confx_write_config_byte(struct pci_dev *dev, int where, u8 value)
  37.137 +{
  37.138 +    return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  37.139 +                           PCI_FUNC(dev->devfn), where, 1, value);
  37.140 +}
  37.141 +
  37.142 +static int pci_confx_write_config_word(struct pci_dev *dev, int where, u16 value)
  37.143 +{
  37.144 +    return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  37.145 +                           PCI_FUNC(dev->devfn), where, 2, value);
  37.146 +}
  37.147 +
  37.148 +static int pci_confx_write_config_dword(struct pci_dev *dev, int where, u32 value)
  37.149 +{
  37.150 +    return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  37.151 +                           PCI_FUNC(dev->devfn), where, 4, value);
  37.152 +}
  37.153 +
  37.154 +static struct pci_ops pci_direct_confx = {
  37.155 +    pci_confx_read_config_byte,
  37.156 +    pci_confx_read_config_word,
  37.157 +    pci_confx_read_config_dword,
  37.158 +    pci_confx_write_config_byte,
  37.159 +    pci_confx_write_config_word,
  37.160 +    pci_confx_write_config_dword
  37.161 +};
  37.162 +
  37.163 +
  37.164 +
  37.165 +static struct pci_ops * __devinit pci_check_xen(void)
  37.166 +{
  37.167 +    unsigned long flags;
  37.168 +
  37.169 +    __save_flags(flags); __cli();
  37.170 +
  37.171 +    printk(KERN_INFO "PCI: Using Xen interface\n");
  37.172 +
  37.173 +    __restore_flags(flags);
  37.174 +
  37.175 +    return &pci_direct_confx;
  37.176 +}
  37.177 +
  37.178 +struct pci_fixup pcibios_fixups[] = { {0}};
  37.179 +
  37.180 +
  37.181 +struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
  37.182 +{
  37.183 +    return NULL;
  37.184 +}
  37.185 +
  37.186 +int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
  37.187 +{
  37.188 +    return 0;
  37.189 +}
  37.190 +
  37.191 +/*
  37.192 + * Several buggy motherboards address only 16 devices and mirror
  37.193 + * them to next 16 IDs. We try to detect this `feature' on all
  37.194 + * primary buses (those containing host bridges as they are
  37.195 + * expected to be unique) and remove the ghost devices.
  37.196 + */
  37.197 +
  37.198 +static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
  37.199 +{
  37.200 +    struct list_head *ln, *mn;
  37.201 +    struct pci_dev *d, *e;
  37.202 +    int mirror = PCI_DEVFN(16,0);
  37.203 +    int seen_host_bridge = 0;
  37.204 +    int i;
  37.205 +
  37.206 +    DBG("PCI: Scanning for ghost devices on bus %d\n", b->number);
  37.207 +    for (ln=b->devices.next; ln != &b->devices; ln=ln->next) {
  37.208 +        d = pci_dev_b(ln);
  37.209 +        if ((d->class >> 8) == PCI_CLASS_BRIDGE_HOST)
  37.210 +            seen_host_bridge++;
  37.211 +        for (mn=ln->next; mn != &b->devices; mn=mn->next) {
  37.212 +            e = pci_dev_b(mn);
  37.213 +            if (e->devfn != d->devfn + mirror ||
  37.214 +                e->vendor != d->vendor ||
  37.215 +                e->device != d->device ||
  37.216 +                e->class != d->class)
  37.217 +                continue;
  37.218 +            for(i=0; i<PCI_NUM_RESOURCES; i++)
  37.219 +                if (e->resource[i].start != d->resource[i].start ||
  37.220 +                    e->resource[i].end != d->resource[i].end ||
  37.221 +                    e->resource[i].flags != d->resource[i].flags)
  37.222 +                    continue;
  37.223 +            break;
  37.224 +        }
  37.225 +        if (mn == &b->devices)
  37.226 +            return;
  37.227 +    }
  37.228 +    if (!seen_host_bridge)
  37.229 +        return;
  37.230 +    printk(KERN_WARNING "PCI: Ignoring ghost devices on bus %02x\n", b->number);
  37.231 +
  37.232 +    ln = &b->devices;
  37.233 +    while (ln->next != &b->devices) {
  37.234 +        d = pci_dev_b(ln->next);
  37.235 +        if (d->devfn >= mirror) {
  37.236 +            list_del(&d->global_list);
  37.237 +            list_del(&d->bus_list);
  37.238 +            kfree(d);
  37.239 +        } else
  37.240 +            ln = ln->next;
  37.241 +    }
  37.242 +}
  37.243 +
  37.244 +/*
  37.245 + * Discover remaining PCI buses in case there are peer host bridges.
  37.246 + * We use the number of last PCI bus provided by the PCI BIOS.
  37.247 + */
  37.248 +static void __devinit pcibios_fixup_peer_bridges(void)
  37.249 +{
  37.250 +    int n;
  37.251 +    struct pci_bus bus;
  37.252 +    struct pci_dev dev;
  37.253 +    u16 l;
  37.254 +    
  37.255 +    if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
  37.256 +        return;
  37.257 +    DBG("PCI: Peer bridge fixup\n");
  37.258 +    for (n=0; n <= pcibios_last_bus; n++) {
  37.259 +        if (pci_bus_exists(&pci_root_buses, n))
  37.260 +            continue;
  37.261 +        bus.number = n;
  37.262 +        bus.ops = pci_root_ops;
  37.263 +        dev.bus = &bus;
  37.264 +        for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
  37.265 +            if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
  37.266 +                l != 0x0000 && l != 0xffff) {
  37.267 +                DBG("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
  37.268 +                printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
  37.269 +                pci_scan_bus(n, pci_root_ops, NULL);
  37.270 +                break;
  37.271 +            }
  37.272 +    }
  37.273 +}
  37.274 +
  37.275 +
  37.276 +/*
  37.277 + *  Called after each bus is probed, but before its children
  37.278 + *  are examined.
  37.279 + */
  37.280 +
  37.281 +void __devinit  pcibios_fixup_bus(struct pci_bus *b)
  37.282 +{
  37.283 +    pcibios_fixup_ghosts(b);
  37.284 +    pci_read_bridge_bases(b);
  37.285 +    return;
  37.286 +}
  37.287 +
  37.288 +struct pci_bus * __devinit pcibios_scan_root(int busnum)
  37.289 +{
  37.290 +    struct list_head *list;
  37.291 +    struct pci_bus *bus;
  37.292 +
  37.293 +    list_for_each(list, &pci_root_buses) {
  37.294 +        bus = pci_bus_b(list);
  37.295 +        if (bus->number == busnum) {
  37.296 +            /* Already scanned */
  37.297 +            return bus;
  37.298 +        }
  37.299 +    }
  37.300 +
  37.301 +    printk("PCI: Probing PCI hardware (bus %02x)\n", busnum);
  37.302 +
  37.303 +    return pci_scan_bus(busnum, pci_root_ops, NULL);
  37.304 +}
  37.305 +
  37.306 +void __devinit pcibios_config_init(void)
  37.307 +{
  37.308 +    /*
  37.309 +     * Try all known PCI access methods. Note that we support using 
  37.310 +     * both PCI BIOS and direct access, with a preference for direct.
  37.311 +     */
  37.312 +
  37.313 +    pci_root_ops = pci_check_xen();
  37.314 +    pci_config_read = pci_confx_read;
  37.315 +    pci_config_write = pci_confx_write;
  37.316 +
  37.317 +    return;
  37.318 +}
  37.319 +
  37.320 +void __init pcibios_init(void)
  37.321 +{
  37.322 +    if (!pci_root_ops)
  37.323 +        pcibios_config_init();
  37.324 +    if (!pci_root_ops) {
  37.325 +        printk(KERN_WARNING "PCI: System does not support PCI\n");
  37.326 +        return;
  37.327 +    }
  37.328 +
  37.329 +    pcibios_set_cacheline_size();
  37.330 +
  37.331 +    printk(KERN_INFO "PCI: Probing PCI hardware\n");
  37.332 +
  37.333 +    if (!pci_using_acpi_prt) {
  37.334 +        pci_root_bus = pcibios_scan_root(0);
  37.335 +        pcibios_irq_init();
  37.336 +        pcibios_fixup_peer_bridges();
  37.337 +        pcibios_fixup_irqs();
  37.338 +    }
  37.339 +
  37.340 +    pcibios_resource_survey();
  37.341 +}
  37.342 +
  37.343 +char * __devinit  pcibios_setup(char *str)
  37.344 +{
  37.345 +    if (!strcmp(str, "off")) {
  37.346 +        pci_probe = 0;
  37.347 +        return NULL;
  37.348 +    }
  37.349 +    return NULL;
  37.350 +}
  37.351 +
  37.352 +unsigned int pcibios_assign_all_busses(void)
  37.353 +{
  37.354 +    return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
  37.355 +}
  37.356 +
  37.357 +int pcibios_enable_device(struct pci_dev *dev, int mask)
  37.358 +{
  37.359 +    int err;
  37.360 +
  37.361 +    if ((err = pcibios_enable_resources(dev, mask)) < 0)
  37.362 +        return err;
  37.363 +
  37.364 +    pcibios_enable_irq(dev);
  37.365 +
  37.366 +    return 0;
  37.367 +}
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/physirq.c	Tue Mar 23 10:40:28 2004 +0000
    38.3 @@ -0,0 +1,172 @@
    38.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    38.5 + ****************************************************************************
    38.6 + * (C) 2004 - Rolf Neugebauer - Intel Research Cambridge
    38.7 + ****************************************************************************
    38.8 + *
    38.9 + *        File: physirq.c
   38.10 + *      Author: Rolf Neugebauer (rolf.neugebauer@intel.com)
   38.11 + *        Date: Mar 2004
   38.12 + * 
   38.13 + * Description: guests may receive virtual interrupts directly 
   38.14 + *              corresponding to physical interrupts. these virtual
   38.15 + *              interrupts require special handling provided 
   38.16 + *              by the virq irq type.
   38.17 + */
   38.18 +
   38.19 +
   38.20 +#include <linux/config.h>
   38.21 +#include <asm/atomic.h>
   38.22 +#include <asm/irq.h>
   38.23 +#include <asm/hypervisor.h>
   38.24 +#include <asm/system.h>
   38.25 +
   38.26 +#include <linux/irq.h>
   38.27 +#include <linux/sched.h>
   38.28 +
   38.29 +#include <asm/hypervisor-ifs/hypervisor-if.h>
   38.30 +#include <asm/hypervisor-ifs/physdev.h>
   38.31 +
   38.32 +static void physirq_interrupt(int irq, void *unused, struct pt_regs *ptregs);
   38.33 +
   38.34 +static int setup_event_handler = 0;
   38.35 +
   38.36 +static unsigned int startup_physirq_event(unsigned int irq)
   38.37 +{
   38.38 +    physdev_op_t op;
   38.39 +    int err;
   38.40 +
   38.41 +    printk("startup_physirq_event %d\n", irq);
   38.42 +
   38.43 +    /*
   38.44 +     * install a interrupt handler for physirq event when called first time
   38.45 +     * we actually are never executing the handler as _EVENT_PHYSIRQ is 
   38.46 +     * handled specially in hypervisor.c But we need to enable the event etc.
   38.47 +     */
   38.48 +    if ( !setup_event_handler )
   38.49 +    {
   38.50 +        printk("startup_physirq_event %d: setup event handler\n", irq);
   38.51 +        /* set up a event handler to demux virtualised physical interrupts */
   38.52 +        err = request_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ), physirq_interrupt, 
   38.53 +                          SA_SAMPLE_RANDOM, "physirq", NULL);
   38.54 +        if ( err )
   38.55 +        {
   38.56 +            printk(KERN_WARNING "Could not allocate physirq interrupt\n");
   38.57 +            return err;
   38.58 +        }
   38.59 +        setup_event_handler = 1;
   38.60 +    }
   38.61 +
   38.62 +    /*
   38.63 +     * request the irq from hypervisor
   38.64 +     */
   38.65 +    op.cmd = PHYSDEVOP_REQUEST_IRQ;
   38.66 +    op.u.request_irq.irq   = irq;
   38.67 +    if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
   38.68 +    {
   38.69 +        printk(KERN_ALERT "could not get IRQ %d from Xen\n", irq);
   38.70 +        return err;
   38.71 +    }
   38.72 +    return 0;
   38.73 +}
   38.74 +/*
   38.75 + * This is a dummy interrupt handler.
   38.76 + * It should never be called. events for physical interrupts are handled
   38.77 + * differently in hypervisor.c
   38.78 + */
   38.79 +static void physirq_interrupt(int irq, void *unused, struct pt_regs *ptregs)
   38.80 +{
   38.81 +    printk("XXX This should never be called!");
   38.82 +}
   38.83 +
   38.84 +
   38.85 +/*
   38.86 + * IRQ is not needed anymore.
   38.87 + */
   38.88 +static void shutdown_physirq_event(unsigned int irq)
   38.89 +{
   38.90 +    physdev_op_t op;
   38.91 +    int err;
   38.92 +
   38.93 +    printk("shutdown_phys_irq called.");
   38.94 +
   38.95 +    /*
   38.96 +     * tell hypervisor
   38.97 +     */
   38.98 +    op.cmd = PHYSDEVOP_FREE_IRQ;
   38.99 +    op.u.free_irq.irq   = irq;
  38.100 +    if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
  38.101 +    {
  38.102 +        printk(KERN_ALERT "could not free IRQ %d\n", irq);
  38.103 +        return;
  38.104 +    }
  38.105 +    return;
  38.106 +}
  38.107 +
  38.108 +
  38.109 +static void enable_physirq_event(unsigned int irq)
  38.110 +{
  38.111 +    /* XXX just enable all phys interrupts for now */
  38.112 +    enable_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ));
  38.113 +}
  38.114 +
  38.115 +static void disable_physirq_event(unsigned int irq)
  38.116 +{
  38.117 +    /* XXX just disable all phys interrupts for now */
  38.118 +    disable_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ));
  38.119 +}
  38.120 +
  38.121 +static void ack_physirq_event(unsigned int irq)
  38.122 +{
  38.123 +    /* clear bit */
  38.124 +    if ( irq <= 0 || irq >= 32 )
  38.125 +    {
  38.126 +        printk("wrong irq %d\n", irq);
  38.127 +    }
  38.128 +
  38.129 +    clear_bit(irq, &HYPERVISOR_shared_info->physirq_pend);
  38.130 +}
  38.131 +
  38.132 +static void end_physirq_event(unsigned int irq)
  38.133 +{
  38.134 +    int err;
  38.135 +    physdev_op_t op;
  38.136 +
  38.137 +    /* call hypervisor */
  38.138 +    op.cmd = PHYSDEVOP_FINISHED_IRQ;
  38.139 +    op.u.finished_irq.irq   = irq;
  38.140 +    if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
  38.141 +    {
  38.142 +        printk(KERN_ALERT "could not finish IRQ %d\n", irq);
  38.143 +        return;
  38.144 +    }
  38.145 +    return;
  38.146 +}
  38.147 +
  38.148 +static struct hw_interrupt_type physirq_irq_type = {
  38.149 +    "physical-irq",
  38.150 +    startup_physirq_event,
  38.151 +    shutdown_physirq_event,
  38.152 +    enable_physirq_event,
  38.153 +    disable_physirq_event,
  38.154 +    ack_physirq_event,
  38.155 +    end_physirq_event,
  38.156 +    NULL
  38.157 +};
  38.158 +
  38.159 +
  38.160 +
  38.161 +void __init physirq_init(void)
  38.162 +{
  38.163 +    int i;
  38.164 +
  38.165 +    printk("Initialise irq handlers [%d-%d] for physical interrupts.\n",
  38.166 +           PHYS_IRQ_BASE, PHYS_IRQ_BASE+NR_PHYS_IRQS-1);
  38.167 +
  38.168 +    for ( i = 0; i < NR_PHYS_IRQS; i++ )
  38.169 +    {
  38.170 +        irq_desc[i + PHYS_IRQ_BASE].status  = IRQ_DISABLED;
  38.171 +        irq_desc[i + PHYS_IRQ_BASE].action  = 0;
  38.172 +        irq_desc[i + PHYS_IRQ_BASE].depth   = 1;
  38.173 +        irq_desc[i + PHYS_IRQ_BASE].handler = &physirq_irq_type;
  38.174 +    }
  38.175 +}
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/process.c	Tue Mar 23 10:40:28 2004 +0000
    39.3 @@ -0,0 +1,474 @@
    39.4 +/*
    39.5 + *  linux/arch/i386/kernel/process.c
    39.6 + *
    39.7 + *  Copyright (C) 1995  Linus Torvalds
    39.8 + *
    39.9 + *  Pentium III FXSR, SSE support
   39.10 + *	Gareth Hughes <gareth@valinux.com>, May 2000
   39.11 + */
   39.12 +
   39.13 +/*
   39.14 + * This file handles the architecture-dependent parts of process handling..
   39.15 + */
   39.16 +
   39.17 +#define __KERNEL_SYSCALLS__
   39.18 +#include <stdarg.h>
   39.19 +
   39.20 +#include <linux/errno.h>
   39.21 +#include <linux/sched.h>
   39.22 +#include <linux/kernel.h>
   39.23 +#include <linux/mm.h>
   39.24 +#include <linux/smp.h>
   39.25 +#include <linux/smp_lock.h>
   39.26 +#include <linux/stddef.h>
   39.27 +#include <linux/unistd.h>
   39.28 +#include <linux/ptrace.h>
   39.29 +#include <linux/slab.h>
   39.30 +#include <linux/vmalloc.h>
   39.31 +#include <linux/user.h>
   39.32 +#include <linux/a.out.h>
   39.33 +#include <linux/interrupt.h>
   39.34 +#include <linux/config.h>
   39.35 +#include <linux/delay.h>
   39.36 +#include <linux/reboot.h>
   39.37 +#include <linux/init.h>
   39.38 +#include <linux/mc146818rtc.h>
   39.39 +
   39.40 +#include <asm/uaccess.h>
   39.41 +#include <asm/pgtable.h>
   39.42 +#include <asm/system.h>
   39.43 +#include <asm/io.h>
   39.44 +#include <asm/ldt.h>
   39.45 +#include <asm/processor.h>
   39.46 +#include <asm/i387.h>
   39.47 +#include <asm/desc.h>
   39.48 +#include <asm/mmu_context.h>
   39.49 +#include <asm/multicall.h>
   39.50 +#include <asm/hypervisor-ifs/dom0_ops.h>
   39.51 +
   39.52 +#include <linux/irq.h>
   39.53 +
   39.54 +asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
   39.55 +
   39.56 +int hlt_counter;
   39.57 +
   39.58 +/*
   39.59 + * Powermanagement idle function, if any..
   39.60 + */
   39.61 +void (*pm_idle)(void);
   39.62 +
   39.63 +/*
   39.64 + * Power off function, if any
   39.65 + */
   39.66 +void (*pm_power_off)(void);
   39.67 +
   39.68 +void disable_hlt(void)
   39.69 +{
   39.70 +    hlt_counter++;
   39.71 +}
   39.72 +
   39.73 +void enable_hlt(void)
   39.74 +{
   39.75 +    hlt_counter--;
   39.76 +}
   39.77 +
   39.78 +/*
   39.79 + * The idle thread. There's no useful work to be
   39.80 + * done, so just try to conserve power and have a
   39.81 + * low exit latency (ie sit in a loop waiting for
   39.82 + * somebody to say that they'd like to reschedule)
   39.83 + */
   39.84 +void cpu_idle (void)
   39.85 +{
   39.86 +    extern int set_timeout_timer(void);
   39.87 +
   39.88 +    /* Endless idle loop with no priority at all. */
   39.89 +    init_idle();
   39.90 +    current->nice = 20;
   39.91 +    current->counter = -100;
   39.92 +
   39.93 +    for ( ; ; )
   39.94 +    {
   39.95 +        while ( !current->need_resched )
   39.96 +        {
   39.97 +            __cli();
   39.98 +            if ( current->need_resched )
   39.99 +            {
  39.100 +                /* The race-free check for events failed. */
  39.101 +                __sti();
  39.102 +                break;
  39.103 +            }
  39.104 +            else if ( set_timeout_timer() == 0 )
  39.105 +            {
  39.106 +                /* NB. Blocking reenable events in a race-free manner. */
  39.107 +                HYPERVISOR_block();
  39.108 +            }
  39.109 +            else
  39.110 +            {
  39.111 +                /* No race here: yielding will get us the CPU again anyway. */
  39.112 +                __sti();
  39.113 +                HYPERVISOR_yield();
  39.114 +            }
  39.115 +        }
  39.116 +        schedule();
  39.117 +        check_pgt_cache();
  39.118 +    }
  39.119 +}
  39.120 +
  39.121 +void machine_restart(char * __unused)
  39.122 +{
  39.123 +    HYPERVISOR_exit();
  39.124 +}
  39.125 +
  39.126 +void machine_halt(void)
  39.127 +{
  39.128 +    HYPERVISOR_exit();
  39.129 +}
  39.130 +
  39.131 +void machine_power_off(void)
  39.132 +{
  39.133 +    HYPERVISOR_exit();
  39.134 +}
  39.135 +
  39.136 +extern void show_trace(unsigned long* esp);
  39.137 +
  39.138 +void show_regs(struct pt_regs * regs)
  39.139 +{
  39.140 +    printk("\n");
  39.141 +    printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
  39.142 +    printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip, smp_processor_id());
  39.143 +    if (regs->xcs & 2)
  39.144 +        printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
  39.145 +    printk(" EFLAGS: %08lx    %s\n",regs->eflags, print_tainted());
  39.146 +    printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
  39.147 +           regs->eax,regs->ebx,regs->ecx,regs->edx);
  39.148 +    printk("ESI: %08lx EDI: %08lx EBP: %08lx",
  39.149 +           regs->esi, regs->edi, regs->ebp);
  39.150 +    printk(" DS: %04x ES: %04x\n",
  39.151 +           0xffff & regs->xds,0xffff & regs->xes);
  39.152 +
  39.153 +    show_trace(&regs->esp);
  39.154 +}
  39.155 +
  39.156 +
  39.157 +/*
  39.158 + * Create a kernel thread
  39.159 + */
  39.160 +int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  39.161 +{
  39.162 +    long retval, d0;
  39.163 +
  39.164 +    __asm__ __volatile__(
  39.165 +        "movl %%esp,%%esi\n\t"
  39.166 +        "int $0x80\n\t"		/* Linux/i386 system call */
  39.167 +        "cmpl %%esp,%%esi\n\t"	/* child or parent? */
  39.168 +        "je 1f\n\t"		/* parent - jump */
  39.169 +        /* Load the argument into eax, and push it.  That way, it does
  39.170 +         * not matter whether the called function is compiled with
  39.171 +         * -mregparm or not.  */
  39.172 +        "movl %4,%%eax\n\t"
  39.173 +        "pushl %%eax\n\t"		
  39.174 +        "call *%5\n\t"		/* call fn */
  39.175 +        "movl %3,%0\n\t"	/* exit */
  39.176 +        "int $0x80\n"
  39.177 +        "1:\t"
  39.178 +        :"=&a" (retval), "=&S" (d0)
  39.179 +        :"0" (__NR_clone), "i" (__NR_exit),
  39.180 +        "r" (arg), "r" (fn),
  39.181 +        "b" (flags | CLONE_VM)
  39.182 +        : "memory");
  39.183 +
  39.184 +    return retval;
  39.185 +}
  39.186 +
  39.187 +/*
  39.188 + * Free current thread data structures etc..
  39.189 + */
  39.190 +void exit_thread(void)
  39.191 +{
  39.192 +    /* nothing to do ... */
  39.193 +}
  39.194 +
  39.195 +void flush_thread(void)
  39.196 +{
  39.197 +    struct task_struct *tsk = current;
  39.198 +
  39.199 +    memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
  39.200 +
  39.201 +    /*
  39.202 +     * Forget coprocessor state..
  39.203 +     */
  39.204 +    clear_fpu(tsk);
  39.205 +    tsk->used_math = 0;
  39.206 +}
  39.207 +
  39.208 +void release_thread(struct task_struct *dead_task)
  39.209 +{
  39.210 +    if (dead_task->mm) {
  39.211 +        // temporary debugging check
  39.212 +        if (dead_task->mm->context.size) {
  39.213 +            printk("WARNING: dead process %8s still has LDT? <%p/%p>\n",
  39.214 +                   dead_task->comm, 
  39.215 +		   dead_task->mm->context.ldt,
  39.216 +		   dead_task->mm->context.size);
  39.217 +            BUG();
  39.218 +        }
  39.219 +    }
  39.220 +    //release_x86_irqs(dead_task);
  39.221 +}
  39.222 +
  39.223 +
  39.224 +/*
  39.225 + * Save a segment.
  39.226 + */
  39.227 +#define savesegment(seg,value) \
  39.228 +	asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
  39.229 +
  39.230 +int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
  39.231 +                unsigned long unused,
  39.232 +                struct task_struct * p, struct pt_regs * regs)
  39.233 +{
  39.234 +    struct pt_regs * childregs;
  39.235 +    unsigned long eflags;
  39.236 +
  39.237 +    childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
  39.238 +    struct_cpy(childregs, regs);
  39.239 +    childregs->eax = 0;
  39.240 +    childregs->esp = esp;
  39.241 +
  39.242 +    p->thread.esp = (unsigned long) childregs;
  39.243 +    p->thread.esp0 = (unsigned long) (childregs+1);
  39.244 +
  39.245 +    p->thread.eip = (unsigned long) ret_from_fork;
  39.246 +
  39.247 +    savesegment(fs,p->thread.fs);
  39.248 +    savesegment(gs,p->thread.gs);
  39.249 +
  39.250 +    unlazy_fpu(current);
  39.251 +    struct_cpy(&p->thread.i387, &current->thread.i387);
  39.252 +
  39.253 +
  39.254 +    __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
  39.255 +    p->thread.io_pl = (eflags >> 12) & 3;
  39.256 +
  39.257 +    return 0;
  39.258 +}
  39.259 +
  39.260 +/*
  39.261 + * fill in the user structure for a core dump..
  39.262 + */
  39.263 +void dump_thread(struct pt_regs * regs, struct user * dump)
  39.264 +{
  39.265 +    int i;
  39.266 +
  39.267 +/* changed the size calculations - should hopefully work better. lbt */
  39.268 +    dump->magic = CMAGIC;
  39.269 +    dump->start_code = 0;
  39.270 +    dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
  39.271 +    dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
  39.272 +    dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
  39.273 +    dump->u_dsize -= dump->u_tsize;
  39.274 +    dump->u_ssize = 0;
  39.275 +    for (i = 0; i < 8; i++)
  39.276 +        dump->u_debugreg[i] = current->thread.debugreg[i];  
  39.277 +
  39.278 +    if (dump->start_stack < TASK_SIZE)
  39.279 +        dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
  39.280 +
  39.281 +    dump->regs.ebx = regs->ebx;
  39.282 +    dump->regs.ecx = regs->ecx;
  39.283 +    dump->regs.edx = regs->edx;
  39.284 +    dump->regs.esi = regs->esi;
  39.285 +    dump->regs.edi = regs->edi;
  39.286 +    dump->regs.ebp = regs->ebp;
  39.287 +    dump->regs.eax = regs->eax;
  39.288 +    dump->regs.ds = regs->xds;
  39.289 +    dump->regs.es = regs->xes;
  39.290 +    savesegment(fs,dump->regs.fs);
  39.291 +    savesegment(gs,dump->regs.gs);
  39.292 +    dump->regs.orig_eax = regs->orig_eax;
  39.293 +    dump->regs.eip = regs->eip;
  39.294 +    dump->regs.cs = regs->xcs;
  39.295 +    dump->regs.eflags = regs->eflags;
  39.296 +    dump->regs.esp = regs->esp;
  39.297 +    dump->regs.ss = regs->xss;
  39.298 +
  39.299 +    dump->u_fpvalid = dump_fpu (regs, &dump->i387);
  39.300 +}
  39.301 +
  39.302 +/*
  39.303 + *	switch_to(x,yn) should switch tasks from x to y.
  39.304 + *
  39.305 + * We fsave/fwait so that an exception goes off at the right time
  39.306 + * (as a call from the fsave or fwait in effect) rather than to
  39.307 + * the wrong process. Lazy FP saving no longer makes any sense
  39.308 + * with modern CPU's, and this simplifies a lot of things (SMP
  39.309 + * and UP become the same).
  39.310 + *
  39.311 + * NOTE! We used to use the x86 hardware context switching. The
  39.312 + * reason for not using it any more becomes apparent when you
  39.313 + * try to recover gracefully from saved state that is no longer
  39.314 + * valid (stale segment register values in particular). With the
  39.315 + * hardware task-switch, there is no way to fix up bad state in
  39.316 + * a reasonable manner.
  39.317 + *
  39.318 + * The fact that Intel documents the hardware task-switching to
  39.319 + * be slow is a fairly red herring - this code is not noticeably
  39.320 + * faster. However, there _is_ some room for improvement here,
  39.321 + * so the performance issues may eventually be a valid point.
  39.322 + * More important, however, is the fact that this allows us much
  39.323 + * more flexibility.
  39.324 + */
  39.325 +void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  39.326 +{
  39.327 +    struct thread_struct *next = &next_p->thread;
  39.328 +
  39.329 +    __cli();
  39.330 +
  39.331 +    /*
  39.332 +     * We clobber FS and GS here so that we avoid a GPF when restoring previous
  39.333 +     * task's FS/GS values in Xen when the LDT is switched. If we don't do this
  39.334 +     * then we can end up erroneously re-flushing the page-update queue when
  39.335 +     * we 'execute_multicall_list'.
  39.336 +     */
  39.337 +    __asm__ __volatile__ ( 
  39.338 +        "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
  39.339 +
  39.340 +    MULTICALL_flush_page_update_queue();
  39.341 +
  39.342 +    /*
  39.343 +     * This is basically 'unlazy_fpu', except that we queue a multicall to 
  39.344 +     * indicate FPU task switch, rather than synchronously trapping to Xen.
  39.345 +     */
  39.346 +    if ( prev_p->flags & PF_USEDFPU )
  39.347 +    {
  39.348 +	if ( cpu_has_fxsr )
  39.349 +            asm volatile( "fxsave %0 ; fnclex"
  39.350 +                          : "=m" (prev_p->thread.i387.fxsave) );
  39.351 +	else
  39.352 +            asm volatile( "fnsave %0 ; fwait"
  39.353 +                          : "=m" (prev_p->thread.i387.fsave) );
  39.354 +	prev_p->flags &= ~PF_USEDFPU;
  39.355 +        queue_multicall0(__HYPERVISOR_fpu_taskswitch);
  39.356 +    }
  39.357 +
  39.358 +    queue_multicall2(__HYPERVISOR_stack_switch, __KERNEL_DS, next->esp0);
  39.359 +    if ( start_info.flags & SIF_PRIVILEGED ) 
  39.360 +    {
  39.361 +        dom0_op_t op;
  39.362 +        op.cmd           = DOM0_IOPL;
  39.363 +        op.u.iopl.domain = DOMID_SELF;
  39.364 +        op.u.iopl.iopl   = next->io_pl;
  39.365 +        queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
  39.366 +    }
  39.367 +
  39.368 +    /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
  39.369 +    execute_multicall_list();
  39.370 +    __sti();
  39.371 +
  39.372 +    /*
  39.373 +     * Restore %fs and %gs.
  39.374 +     */
  39.375 +    loadsegment(fs, next->fs);
  39.376 +    loadsegment(gs, next->gs);
  39.377 +
  39.378 +    /*
  39.379 +     * Now maybe reload the debug registers
  39.380 +     */
  39.381 +    if ( next->debugreg[7] != 0 )
  39.382 +    {
  39.383 +        HYPERVISOR_set_debugreg(0, next->debugreg[0]);
  39.384 +        HYPERVISOR_set_debugreg(1, next->debugreg[1]);
  39.385 +        HYPERVISOR_set_debugreg(2, next->debugreg[2]);
  39.386 +        HYPERVISOR_set_debugreg(3, next->debugreg[3]);
  39.387 +        /* no 4 and 5 */
  39.388 +        HYPERVISOR_set_debugreg(6, next->debugreg[6]);
  39.389 +        HYPERVISOR_set_debugreg(7, next->debugreg[7]);
  39.390 +    }
  39.391 +}
  39.392 +
  39.393 +asmlinkage int sys_fork(struct pt_regs regs)
  39.394 +{
  39.395 +    return do_fork(SIGCHLD, regs.esp, &regs, 0);
  39.396 +}
  39.397 +
  39.398 +asmlinkage int sys_clone(struct pt_regs regs)
  39.399 +{
  39.400 +    unsigned long clone_flags;
  39.401 +    unsigned long newsp;
  39.402 +
  39.403 +    clone_flags = regs.ebx;
  39.404 +    newsp = regs.ecx;
  39.405 +    if (!newsp)
  39.406 +        newsp = regs.esp;
  39.407 +    return do_fork(clone_flags, newsp, &regs, 0);
  39.408 +}
  39.409 +
  39.410 +/*
  39.411 + * This is trivial, and on the face of it looks like it
  39.412 + * could equally well be done in user mode.
  39.413 + *
  39.414 + * Not so, for quite unobvious reasons - register pressure.
  39.415 + * In user mode vfork() cannot have a stack frame, and if
  39.416 + * done by calling the "clone()" system call directly, you
  39.417 + * do not have enough call-clobbered registers to hold all
  39.418 + * the information you need.
  39.419 + */
  39.420 +asmlinkage int sys_vfork(struct pt_regs regs)
  39.421 +{
  39.422 +    return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0);
  39.423 +}
  39.424 +
  39.425 +/*
  39.426 + * sys_execve() executes a new program.
  39.427 + */
  39.428 +asmlinkage int sys_execve(struct pt_regs regs)
  39.429 +{
  39.430 +    int error;
  39.431 +    char * filename;
  39.432 +
  39.433 +    filename = getname((char *) regs.ebx);
  39.434 +    error = PTR_ERR(filename);
  39.435 +    if (IS_ERR(filename))
  39.436 +        goto out;
  39.437 +    error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
  39.438 +    if (error == 0)
  39.439 +        current->ptrace &= ~PT_DTRACE;
  39.440 +    putname(filename);
  39.441 + out:
  39.442 +    return error;
  39.443 +}
  39.444 +
  39.445 +/*
  39.446 + * These bracket the sleeping functions..
  39.447 + */
  39.448 +extern void scheduling_functions_start_here(void);
  39.449 +extern void scheduling_functions_end_here(void);
  39.450 +#define first_sched	((unsigned long) scheduling_functions_start_here)
  39.451 +#define last_sched	((unsigned long) scheduling_functions_end_here)
  39.452 +
  39.453 +unsigned long get_wchan(struct task_struct *p)
  39.454 +{
  39.455 +    unsigned long ebp, esp, eip;
  39.456 +    unsigned long stack_page;
  39.457 +    int count = 0;
  39.458 +    if (!p || p == current || p->state == TASK_RUNNING)
  39.459 +        return 0;
  39.460 +    stack_page = (unsigned long)p;
  39.461 +    esp = p->thread.esp;
  39.462 +    if (!stack_page || esp < stack_page || esp > 8188+stack_page)
  39.463 +        return 0;
  39.464 +    /* include/asm-i386/system.h:switch_to() pushes ebp last. */
  39.465 +    ebp = *(unsigned long *) esp;
  39.466 +    do {
  39.467 +        if (ebp < stack_page || ebp > 8184+stack_page)
  39.468 +            return 0;
  39.469 +        eip = *(unsigned long *) (ebp+4);
  39.470 +        if (eip < first_sched || eip >= last_sched)
  39.471 +            return eip;
  39.472 +        ebp = *(unsigned long *) ebp;
  39.473 +    } while (count++ < 16);
  39.474 +    return 0;
  39.475 +}
  39.476 +#undef last_sched
  39.477 +#undef first_sched
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/setup.c	Tue Mar 23 10:40:28 2004 +0000
    40.3 @@ -0,0 +1,1257 @@
    40.4 +/*
    40.5 + *  linux/arch/i386/kernel/setup.c
    40.6 + *
    40.7 + *  Copyright (C) 1995  Linus Torvalds
    40.8 + */
    40.9 +
   40.10 +/*
   40.11 + * This file handles the architecture-dependent parts of initialization
   40.12 + */
   40.13 +
   40.14 +#include <linux/errno.h>
   40.15 +#include <linux/sched.h>
   40.16 +#include <linux/kernel.h>
   40.17 +#include <linux/mm.h>
   40.18 +#include <linux/stddef.h>
   40.19 +#include <linux/unistd.h>
   40.20 +#include <linux/ptrace.h>
   40.21 +#include <linux/slab.h>
   40.22 +#include <linux/user.h>
   40.23 +#include <linux/a.out.h>
   40.24 +#include <linux/tty.h>
   40.25 +#include <linux/ioport.h>
   40.26 +#include <linux/delay.h>
   40.27 +#include <linux/config.h>
   40.28 +#include <linux/init.h>
   40.29 +#include <linux/apm_bios.h>
   40.30 +#ifdef CONFIG_BLK_DEV_RAM
   40.31 +#include <linux/blk.h>
   40.32 +#endif
   40.33 +#include <linux/highmem.h>
   40.34 +#include <linux/bootmem.h>
   40.35 +#include <linux/seq_file.h>
   40.36 +#include <asm/processor.h>
   40.37 +#include <linux/console.h>
   40.38 +#include <linux/module.h>
   40.39 +#include <asm/mtrr.h>
   40.40 +#include <asm/uaccess.h>
   40.41 +#include <asm/system.h>
   40.42 +#include <asm/io.h>
   40.43 +#include <asm/smp.h>
   40.44 +#include <asm/msr.h>
   40.45 +#include <asm/desc.h>
   40.46 +#include <asm/dma.h>
   40.47 +#include <asm/mpspec.h>
   40.48 +#include <asm/mmu_context.h>
   40.49 +#include <asm/hypervisor.h>
   40.50 +#include <asm/hypervisor-ifs/dom0_ops.h>
   40.51 +#include <linux/netdevice.h>
   40.52 +#include <linux/rtnetlink.h>
   40.53 +#include <linux/tqueue.h>
   40.54 +#include <net/pkt_sched.h> /* dev_(de)activate */
   40.55 +
   40.56 +/*
   40.57 + * Point at the empty zero page to start with. We map the real shared_info
   40.58 + * page as soon as fixmap is up and running.
   40.59 + */
   40.60 +shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
   40.61 +
   40.62 +unsigned long *phys_to_machine_mapping;
   40.63 +
   40.64 +/*
   40.65 + * Machine setup..
   40.66 + */
   40.67 +
   40.68 +char ignore_irq13;		/* set if exception 16 works */
   40.69 +struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
   40.70 +
   40.71 +unsigned long mmu_cr4_features;
   40.72 +//EXPORT_SYMBOL(mmu_cr4_features);
   40.73 +
   40.74 +unsigned char * vgacon_mmap;
   40.75 +
   40.76 +/*
   40.77 + * Bus types ..
   40.78 + */
   40.79 +#ifdef CONFIG_EISA
   40.80 +int EISA_bus;
   40.81 +#endif
   40.82 +int MCA_bus;
   40.83 +
   40.84 +/* for MCA, but anyone else can use it if they want */
   40.85 +unsigned int machine_id;
   40.86 +unsigned int machine_submodel_id;
   40.87 +unsigned int BIOS_revision;
   40.88 +unsigned int mca_pentium_flag;
   40.89 +
   40.90 +/* For PCI or other memory-mapped resources */
   40.91 +unsigned long pci_mem_start = 0x10000000;
   40.92 +
   40.93 +/*
   40.94 + * Setup options
   40.95 + */
   40.96 +struct drive_info_struct { char dummy[32]; } drive_info;
   40.97 +struct screen_info screen_info;
   40.98 +struct apm_info apm_info;
   40.99 +struct sys_desc_table_struct {
  40.100 +    unsigned short length;
  40.101 +    unsigned char table[0];
  40.102 +};
  40.103 +
  40.104 +unsigned char aux_device_present;
  40.105 +
  40.106 +extern int root_mountflags;
  40.107 +extern char _text, _etext, _edata, _end;
  40.108 +
  40.109 +int enable_acpi_smp_table;
  40.110 +
  40.111 +/* Raw start-of-day parameters from the hypervisor. */
  40.112 +union start_info_union start_info_union;
  40.113 +
  40.114 +#define COMMAND_LINE_SIZE 256
  40.115 +static char command_line[COMMAND_LINE_SIZE];
  40.116 +char saved_command_line[COMMAND_LINE_SIZE];
  40.117 +
  40.118 +static void __init parse_mem_cmdline (char ** cmdline_p)
  40.119 +{
  40.120 +    char c = ' ', *to = command_line, *from = saved_command_line;
  40.121 +    int len = 0;
  40.122 +
  40.123 +    /* Save unparsed command line copy for /proc/cmdline */
  40.124 +    memcpy(saved_command_line, start_info.cmd_line, COMMAND_LINE_SIZE);
  40.125 +    saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
  40.126 +
  40.127 +    for (;;) {
  40.128 +        /*
  40.129 +         * "mem=nopentium" disables the 4MB page tables.
  40.130 +         * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
  40.131 +         * to <mem>, overriding the bios size.
  40.132 +         * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
  40.133 +         * <start> to <start>+<mem>, overriding the bios size.
  40.134 +         */
  40.135 +        if (c == ' ' && !memcmp(from, "mem=", 4)) {
  40.136 +            if (to != command_line)
  40.137 +                to--;
  40.138 +            if (!memcmp(from+4, "nopentium", 9)) {
  40.139 +                from += 9+4;
  40.140 +            } else if (!memcmp(from+4, "exactmap", 8)) {
  40.141 +                from += 8+4;
  40.142 +            } else {
  40.143 +                (void)memparse(from+4, &from);
  40.144 +                if (*from == '@')
  40.145 +                    (void)memparse(from+1, &from);
  40.146 +            }
  40.147 +        }
  40.148 +
  40.149 +        c = *(from++);
  40.150 +        if (!c)
  40.151 +            break;
  40.152 +        if (COMMAND_LINE_SIZE <= ++len)
  40.153 +            break;
  40.154 +        *(to++) = c;
  40.155 +    }
  40.156 +    *to = '\0';
  40.157 +    *cmdline_p = command_line;
  40.158 +}
  40.159 +
  40.160 +void __init setup_arch(char **cmdline_p)
  40.161 +{
  40.162 +    unsigned long bootmap_size, start_pfn, max_low_pfn;
  40.163 +    unsigned long i;
  40.164 +
  40.165 +    extern void hypervisor_callback(void);
  40.166 +    extern void failsafe_callback(void);
  40.167 +
  40.168 +    extern unsigned long cpu0_pte_quicklist[];
  40.169 +    extern unsigned long cpu0_pgd_quicklist[];
  40.170 +
  40.171 +    HYPERVISOR_set_callbacks(
  40.172 +        __KERNEL_CS, (unsigned long)hypervisor_callback,
  40.173 +        __KERNEL_CS, (unsigned long)failsafe_callback);
  40.174 +
  40.175 +    boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
  40.176 +    boot_cpu_data.pte_quick = cpu0_pte_quicklist;
  40.177 +
  40.178 +    ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
  40.179 +    memset(&drive_info, 0, sizeof(drive_info));
  40.180 +    memset(&screen_info, 0, sizeof(screen_info));
  40.181 +    
  40.182 +    /* This is drawn from a dump from vgacon:startup in standard Linux. */
  40.183 +    screen_info.orig_video_mode = 3; 
  40.184 +    screen_info.orig_video_isVGA = 1;
  40.185 +    screen_info.orig_video_lines = 25;
  40.186 +    screen_info.orig_video_cols = 80;
  40.187 +    screen_info.orig_video_ega_bx = 3;
  40.188 +    screen_info.orig_video_points = 16;
  40.189 +
  40.190 +    memset(&apm_info.bios, 0, sizeof(apm_info.bios));
  40.191 +    aux_device_present = 0; 
  40.192 +#ifdef CONFIG_BLK_DEV_RAM
  40.193 +    rd_image