direct-io.hg

changeset 3:1ef2026299c3

bitkeeper revision 1.2 (3ddb79c9KusG02eh7i-uXkgY0IksKA)

Import changeset
author smh22@boulderdash.cl.cam.ac.uk
date Wed Nov 20 12:02:17 2002 +0000 (2002-11-20)
parents 6c40ab0c10ae
children 2c9f83193041
files .rootkeys BitKeeper/etc/logging_ok xen-2.4.16/Makefile xen-2.4.16/README xen-2.4.16/Rules.mk xen-2.4.16/arch/i386/Makefile xen-2.4.16/arch/i386/Rules.mk xen-2.4.16/arch/i386/apic.c xen-2.4.16/arch/i386/boot/boot.S xen-2.4.16/arch/i386/delay.c xen-2.4.16/arch/i386/entry.S xen-2.4.16/arch/i386/extable.c xen-2.4.16/arch/i386/i387.c xen-2.4.16/arch/i386/i8259.c xen-2.4.16/arch/i386/idle0_task.c xen-2.4.16/arch/i386/io_apic.c xen-2.4.16/arch/i386/ioremap.c xen-2.4.16/arch/i386/irq.c xen-2.4.16/arch/i386/mm.c xen-2.4.16/arch/i386/mpparse.c xen-2.4.16/arch/i386/pci-dma.c xen-2.4.16/arch/i386/pci-i386.c xen-2.4.16/arch/i386/pci-i386.h xen-2.4.16/arch/i386/pci-irq.c xen-2.4.16/arch/i386/pci-pc.c xen-2.4.16/arch/i386/process.c xen-2.4.16/arch/i386/rwlock.c xen-2.4.16/arch/i386/setup.c xen-2.4.16/arch/i386/smp.c xen-2.4.16/arch/i386/smpboot.c xen-2.4.16/arch/i386/time.c xen-2.4.16/arch/i386/trampoline.S xen-2.4.16/arch/i386/traps.c xen-2.4.16/arch/i386/usercopy.c xen-2.4.16/arch/i386/xeno.lds xen-2.4.16/common/Makefile xen-2.4.16/common/block.c xen-2.4.16/common/brlock.c xen-2.4.16/common/dom0_ops.c xen-2.4.16/common/domain.c xen-2.4.16/common/event.c xen-2.4.16/common/kernel.c xen-2.4.16/common/lib.c xen-2.4.16/common/memory.c xen-2.4.16/common/network.c xen-2.4.16/common/page_alloc.c xen-2.4.16/common/resource.c xen-2.4.16/common/slab.c xen-2.4.16/common/softirq.c xen-2.4.16/common/timer.c xen-2.4.16/common/vsprintf.c xen-2.4.16/drivers/Makefile xen-2.4.16/drivers/block/Makefile xen-2.4.16/drivers/block/blkpg.c xen-2.4.16/drivers/block/elevator.c xen-2.4.16/drivers/block/genhd.c xen-2.4.16/drivers/block/ll_rw_blk.c xen-2.4.16/drivers/ide/Makefile xen-2.4.16/drivers/ide/ide-disk.c xen-2.4.16/drivers/ide/ide-dma.c xen-2.4.16/drivers/ide/ide-features.c xen-2.4.16/drivers/ide/ide-geometry.c xen-2.4.16/drivers/ide/ide-pci.c xen-2.4.16/drivers/ide/ide-probe.c xen-2.4.16/drivers/ide/ide-taskfile.c xen-2.4.16/drivers/ide/ide.c xen-2.4.16/drivers/ide/ide_modes.h xen-2.4.16/drivers/net/3c509.c xen-2.4.16/drivers/net/3c59x.c xen-2.4.16/drivers/net/8139cp.c xen-2.4.16/drivers/net/8139too.c xen-2.4.16/drivers/net/Makefile xen-2.4.16/drivers/net/Space.c xen-2.4.16/drivers/net/eepro100.c xen-2.4.16/drivers/net/net_init.c xen-2.4.16/drivers/net/pcnet32.c xen-2.4.16/drivers/net/setup.c xen-2.4.16/drivers/net/tulip/.depend xen-2.4.16/drivers/net/tulip/21142.c xen-2.4.16/drivers/net/tulip/ChangeLog xen-2.4.16/drivers/net/tulip/Makefile xen-2.4.16/drivers/net/tulip/eeprom.c xen-2.4.16/drivers/net/tulip/interrupt.c xen-2.4.16/drivers/net/tulip/media.c xen-2.4.16/drivers/net/tulip/pnic.c xen-2.4.16/drivers/net/tulip/pnic2.c xen-2.4.16/drivers/net/tulip/timer.c xen-2.4.16/drivers/net/tulip/tulip.h xen-2.4.16/drivers/net/tulip/tulip_core.c xen-2.4.16/drivers/pci/Makefile xen-2.4.16/drivers/pci/compat.c xen-2.4.16/drivers/pci/gen-devlist.c xen-2.4.16/drivers/pci/names.c xen-2.4.16/drivers/pci/pci.c xen-2.4.16/drivers/pci/pci.ids xen-2.4.16/drivers/pci/proc.c xen-2.4.16/drivers/pci/quirks.c xen-2.4.16/drivers/pci/setup-bus.c xen-2.4.16/drivers/pci/setup-irq.c xen-2.4.16/drivers/pci/setup-res.c xen-2.4.16/drivers/pci/syscall.c xen-2.4.16/drivers/scsi/Makefile xen-2.4.16/drivers/scsi/constants.h xen-2.4.16/drivers/scsi/hosts.h xen-2.4.16/drivers/scsi/scsi.c xen-2.4.16/drivers/scsi/scsi.h xen-2.4.16/drivers/scsi/scsi_dma.c xen-2.4.16/drivers/scsi/scsi_error.c xen-2.4.16/drivers/scsi/scsi_ioctl.c xen-2.4.16/drivers/scsi/scsi_lib.c xen-2.4.16/drivers/scsi/scsi_merge.c xen-2.4.16/drivers/scsi/scsi_module.c xen-2.4.16/drivers/scsi/scsi_obsolete.c xen-2.4.16/drivers/scsi/scsi_obsolete.h xen-2.4.16/drivers/scsi/scsi_proc.c xen-2.4.16/drivers/scsi/scsi_queue.c xen-2.4.16/drivers/scsi/scsi_scan.c xen-2.4.16/drivers/scsi/scsi_syms.c xen-2.4.16/drivers/scsi/scsicam.c xen-2.4.16/drivers/scsi/sd.c xen-2.4.16/drivers/scsi/sd.h xen-2.4.16/include/asm-i386/apic.h xen-2.4.16/include/asm-i386/apicdef.h xen-2.4.16/include/asm-i386/atomic.h xen-2.4.16/include/asm-i386/bitops.h xen-2.4.16/include/asm-i386/byteorder.h xen-2.4.16/include/asm-i386/cache.h xen-2.4.16/include/asm-i386/cpufeature.h xen-2.4.16/include/asm-i386/current.h xen-2.4.16/include/asm-i386/debugreg.h xen-2.4.16/include/asm-i386/delay.h xen-2.4.16/include/asm-i386/desc.h xen-2.4.16/include/asm-i386/elf.h xen-2.4.16/include/asm-i386/fixmap.h xen-2.4.16/include/asm-i386/hardirq.h xen-2.4.16/include/asm-i386/hdreg.h xen-2.4.16/include/asm-i386/i387.h xen-2.4.16/include/asm-i386/ide.h xen-2.4.16/include/asm-i386/io.h xen-2.4.16/include/asm-i386/io_apic.h xen-2.4.16/include/asm-i386/ioctl.h xen-2.4.16/include/asm-i386/irq.h xen-2.4.16/include/asm-i386/mc146818rtc.h xen-2.4.16/include/asm-i386/mpspec.h xen-2.4.16/include/asm-i386/msr.h xen-2.4.16/include/asm-i386/page.h xen-2.4.16/include/asm-i386/pci.h xen-2.4.16/include/asm-i386/pgalloc.h xen-2.4.16/include/asm-i386/processor.h xen-2.4.16/include/asm-i386/ptrace.h xen-2.4.16/include/asm-i386/rwlock.h xen-2.4.16/include/asm-i386/scatterlist.h xen-2.4.16/include/asm-i386/smp.h xen-2.4.16/include/asm-i386/smpboot.h xen-2.4.16/include/asm-i386/softirq.h xen-2.4.16/include/asm-i386/spinlock.h xen-2.4.16/include/asm-i386/system.h xen-2.4.16/include/asm-i386/types.h xen-2.4.16/include/asm-i386/uaccess.h xen-2.4.16/include/asm-i386/unaligned.h xen-2.4.16/include/hypervisor-ifs/block.h xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h xen-2.4.16/include/hypervisor-ifs/network.h xen-2.4.16/include/scsi/scsi.h xen-2.4.16/include/scsi/scsi_ioctl.h xen-2.4.16/include/scsi/scsicam.h xen-2.4.16/include/scsi/sg.h xen-2.4.16/include/xeno/blk.h xen-2.4.16/include/xeno/blkdev.h xen-2.4.16/include/xeno/blkpg.h xen-2.4.16/include/xeno/block.h xen-2.4.16/include/xeno/bootmem.h xen-2.4.16/include/xeno/brlock.h xen-2.4.16/include/xeno/byteorder/big_endian.h xen-2.4.16/include/xeno/byteorder/generic.h xen-2.4.16/include/xeno/byteorder/little_endian.h xen-2.4.16/include/xeno/byteorder/pdp_endian.h xen-2.4.16/include/xeno/byteorder/swab.h xen-2.4.16/include/xeno/byteorder/swabb.h xen-2.4.16/include/xeno/cache.h xen-2.4.16/include/xeno/config.h xen-2.4.16/include/xeno/ctype.h xen-2.4.16/include/xeno/delay.h xen-2.4.16/include/xeno/dom0_ops.h xen-2.4.16/include/xeno/elevator.h xen-2.4.16/include/xeno/errno.h xen-2.4.16/include/xeno/etherdevice.h xen-2.4.16/include/xeno/ethtool.h xen-2.4.16/include/xeno/event.h xen-2.4.16/include/xeno/genhd.h xen-2.4.16/include/xeno/hdreg.h xen-2.4.16/include/xeno/hdsmart.h xen-2.4.16/include/xeno/ide.h xen-2.4.16/include/xeno/if.h xen-2.4.16/include/xeno/if_ether.h xen-2.4.16/include/xeno/if_packet.h xen-2.4.16/include/xeno/init.h xen-2.4.16/include/xeno/interrupt.h xen-2.4.16/include/xeno/ioctl.h xen-2.4.16/include/xeno/ioport.h xen-2.4.16/include/xeno/irq.h xen-2.4.16/include/xeno/irq_cpustat.h xen-2.4.16/include/xeno/kdev_t.h xen-2.4.16/include/xeno/lib.h xen-2.4.16/include/xeno/list.h xen-2.4.16/include/xeno/major.h xen-2.4.16/include/xeno/mii.h xen-2.4.16/include/xeno/mm.h xen-2.4.16/include/xeno/module.h xen-2.4.16/include/xeno/multiboot.h xen-2.4.16/include/xeno/netdevice.h xen-2.4.16/include/xeno/pci.h xen-2.4.16/include/xeno/pci_ids.h xen-2.4.16/include/xeno/pkt_sched.h xen-2.4.16/include/xeno/prefetch.h xen-2.4.16/include/xeno/sched.h xen-2.4.16/include/xeno/skbuff.h xen-2.4.16/include/xeno/slab.h xen-2.4.16/include/xeno/smp.h xen-2.4.16/include/xeno/socket.h xen-2.4.16/include/xeno/sockios.h xen-2.4.16/include/xeno/spinlock.h xen-2.4.16/include/xeno/time.h xen-2.4.16/include/xeno/timer.h xen-2.4.16/include/xeno/timex.h xen-2.4.16/include/xeno/tqueue.h xen-2.4.16/include/xeno/types.h xen-2.4.16/include/xeno/vif.h xen-2.4.16/net/Makefile xen-2.4.16/net/dev.c xen-2.4.16/net/dev_mcast.c xen-2.4.16/net/eth.c xen-2.4.16/net/sch_generic.c xen-2.4.16/net/skbuff.c xen-2.4.16/net/utils.c xen-2.4.16/tools/Makefile xen-2.4.16/tools/elf-reloc.c xenolinux-2.4.16-sparse/Makefile xenolinux-2.4.16-sparse/arch/xeno/Makefile xenolinux-2.4.16-sparse/arch/xeno/boot/Makefile xenolinux-2.4.16-sparse/arch/xeno/config.in xenolinux-2.4.16-sparse/arch/xeno/defconfig xenolinux-2.4.16-sparse/arch/xeno/drivers/block/Makefile xenolinux-2.4.16-sparse/arch/xeno/drivers/block/block.c xenolinux-2.4.16-sparse/arch/xeno/drivers/console/Makefile xenolinux-2.4.16-sparse/arch/xeno/drivers/console/console.c xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h xenolinux-2.4.16-sparse/arch/xeno/drivers/network/Makefile xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c xenolinux-2.4.16-sparse/arch/xeno/kernel/Makefile xenolinux-2.4.16-sparse/arch/xeno/kernel/entry.S xenolinux-2.4.16-sparse/arch/xeno/kernel/head.S xenolinux-2.4.16-sparse/arch/xeno/kernel/hypervisor.c xenolinux-2.4.16-sparse/arch/xeno/kernel/i386_ksyms.c xenolinux-2.4.16-sparse/arch/xeno/kernel/i387.c xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c xenolinux-2.4.16-sparse/arch/xeno/kernel/ioport.c xenolinux-2.4.16-sparse/arch/xeno/kernel/irq.c xenolinux-2.4.16-sparse/arch/xeno/kernel/ldt.c xenolinux-2.4.16-sparse/arch/xeno/kernel/process.c xenolinux-2.4.16-sparse/arch/xeno/kernel/ptrace.c xenolinux-2.4.16-sparse/arch/xeno/kernel/semaphore.c xenolinux-2.4.16-sparse/arch/xeno/kernel/setup.c xenolinux-2.4.16-sparse/arch/xeno/kernel/signal.c xenolinux-2.4.16-sparse/arch/xeno/kernel/sys_i386.c xenolinux-2.4.16-sparse/arch/xeno/kernel/time.c xenolinux-2.4.16-sparse/arch/xeno/kernel/traps.c xenolinux-2.4.16-sparse/arch/xeno/lib/Makefile xenolinux-2.4.16-sparse/arch/xeno/lib/checksum.S xenolinux-2.4.16-sparse/arch/xeno/lib/dec_and_lock.c xenolinux-2.4.16-sparse/arch/xeno/lib/delay.c xenolinux-2.4.16-sparse/arch/xeno/lib/getuser.S xenolinux-2.4.16-sparse/arch/xeno/lib/iodebug.c xenolinux-2.4.16-sparse/arch/xeno/lib/memcpy.c xenolinux-2.4.16-sparse/arch/xeno/lib/mmx.c xenolinux-2.4.16-sparse/arch/xeno/lib/old-checksum.c xenolinux-2.4.16-sparse/arch/xeno/lib/strstr.c xenolinux-2.4.16-sparse/arch/xeno/lib/usercopy.c xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c xenolinux-2.4.16-sparse/arch/xeno/mm/init.c xenolinux-2.4.16-sparse/arch/xeno/vmlinux.lds xenolinux-2.4.16-sparse/drivers/block/ll_rw_blk.c xenolinux-2.4.16-sparse/drivers/block/rd.c xenolinux-2.4.16-sparse/drivers/char/tty_io.c xenolinux-2.4.16-sparse/fs/nfs/nfsroot.c xenolinux-2.4.16-sparse/include/asm-xeno/a.out.h xenolinux-2.4.16-sparse/include/asm-xeno/apic.h xenolinux-2.4.16-sparse/include/asm-xeno/apicdef.h xenolinux-2.4.16-sparse/include/asm-xeno/atomic.h xenolinux-2.4.16-sparse/include/asm-xeno/bitops.h xenolinux-2.4.16-sparse/include/asm-xeno/boot.h xenolinux-2.4.16-sparse/include/asm-xeno/bugs.h xenolinux-2.4.16-sparse/include/asm-xeno/byteorder.h xenolinux-2.4.16-sparse/include/asm-xeno/cache.h xenolinux-2.4.16-sparse/include/asm-xeno/checksum.h xenolinux-2.4.16-sparse/include/asm-xeno/cpufeature.h xenolinux-2.4.16-sparse/include/asm-xeno/current.h xenolinux-2.4.16-sparse/include/asm-xeno/debugreg.h xenolinux-2.4.16-sparse/include/asm-xeno/delay.h xenolinux-2.4.16-sparse/include/asm-xeno/desc.h xenolinux-2.4.16-sparse/include/asm-xeno/div64.h xenolinux-2.4.16-sparse/include/asm-xeno/dma.h xenolinux-2.4.16-sparse/include/asm-xeno/elf.h xenolinux-2.4.16-sparse/include/asm-xeno/errno.h xenolinux-2.4.16-sparse/include/asm-xeno/fcntl.h xenolinux-2.4.16-sparse/include/asm-xeno/fixmap.h xenolinux-2.4.16-sparse/include/asm-xeno/floppy.h xenolinux-2.4.16-sparse/include/asm-xeno/hardirq.h xenolinux-2.4.16-sparse/include/asm-xeno/hdreg.h xenolinux-2.4.16-sparse/include/asm-xeno/highmem.h xenolinux-2.4.16-sparse/include/asm-xeno/hw_irq.h xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor-ifs/block.h xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor-ifs/hypervisor-if.h xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor-ifs/network.h xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor.h xenolinux-2.4.16-sparse/include/asm-xeno/i387.h xenolinux-2.4.16-sparse/include/asm-xeno/ide.h xenolinux-2.4.16-sparse/include/asm-xeno/init.h xenolinux-2.4.16-sparse/include/asm-xeno/io.h xenolinux-2.4.16-sparse/include/asm-xeno/io_apic.h xenolinux-2.4.16-sparse/include/asm-xeno/ioctl.h xenolinux-2.4.16-sparse/include/asm-xeno/ioctls.h xenolinux-2.4.16-sparse/include/asm-xeno/ipc.h xenolinux-2.4.16-sparse/include/asm-xeno/ipcbuf.h xenolinux-2.4.16-sparse/include/asm-xeno/irq.h xenolinux-2.4.16-sparse/include/asm-xeno/kdb.h xenolinux-2.4.16-sparse/include/asm-xeno/kdbprivate.h xenolinux-2.4.16-sparse/include/asm-xeno/keyboard.h xenolinux-2.4.16-sparse/include/asm-xeno/kmap_types.h xenolinux-2.4.16-sparse/include/asm-xeno/ldt.h xenolinux-2.4.16-sparse/include/asm-xeno/linux_logo.h xenolinux-2.4.16-sparse/include/asm-xeno/locks.h xenolinux-2.4.16-sparse/include/asm-xeno/math_emu.h xenolinux-2.4.16-sparse/include/asm-xeno/mc146818rtc.h xenolinux-2.4.16-sparse/include/asm-xeno/mca_dma.h xenolinux-2.4.16-sparse/include/asm-xeno/mman.h xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h xenolinux-2.4.16-sparse/include/asm-xeno/mmx.h xenolinux-2.4.16-sparse/include/asm-xeno/module.h xenolinux-2.4.16-sparse/include/asm-xeno/mpspec.h xenolinux-2.4.16-sparse/include/asm-xeno/msgbuf.h xenolinux-2.4.16-sparse/include/asm-xeno/msr.h xenolinux-2.4.16-sparse/include/asm-xeno/mtrr.h xenolinux-2.4.16-sparse/include/asm-xeno/namei.h xenolinux-2.4.16-sparse/include/asm-xeno/page.h xenolinux-2.4.16-sparse/include/asm-xeno/param.h xenolinux-2.4.16-sparse/include/asm-xeno/parport.h xenolinux-2.4.16-sparse/include/asm-xeno/pgalloc.h xenolinux-2.4.16-sparse/include/asm-xeno/pgtable-2level.h xenolinux-2.4.16-sparse/include/asm-xeno/pgtable-3level.h xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h xenolinux-2.4.16-sparse/include/asm-xeno/poll.h xenolinux-2.4.16-sparse/include/asm-xeno/posix_types.h xenolinux-2.4.16-sparse/include/asm-xeno/processor.h xenolinux-2.4.16-sparse/include/asm-xeno/ptrace.h xenolinux-2.4.16-sparse/include/asm-xeno/resource.h xenolinux-2.4.16-sparse/include/asm-xeno/rwlock.h xenolinux-2.4.16-sparse/include/asm-xeno/rwsem.h xenolinux-2.4.16-sparse/include/asm-xeno/scatterlist.h xenolinux-2.4.16-sparse/include/asm-xeno/segment.h xenolinux-2.4.16-sparse/include/asm-xeno/semaphore.h xenolinux-2.4.16-sparse/include/asm-xeno/sembuf.h xenolinux-2.4.16-sparse/include/asm-xeno/serial.h xenolinux-2.4.16-sparse/include/asm-xeno/setup.h xenolinux-2.4.16-sparse/include/asm-xeno/shmbuf.h xenolinux-2.4.16-sparse/include/asm-xeno/shmparam.h xenolinux-2.4.16-sparse/include/asm-xeno/sigcontext.h xenolinux-2.4.16-sparse/include/asm-xeno/siginfo.h xenolinux-2.4.16-sparse/include/asm-xeno/signal.h xenolinux-2.4.16-sparse/include/asm-xeno/smp.h xenolinux-2.4.16-sparse/include/asm-xeno/smplock.h xenolinux-2.4.16-sparse/include/asm-xeno/socket.h xenolinux-2.4.16-sparse/include/asm-xeno/sockios.h xenolinux-2.4.16-sparse/include/asm-xeno/softirq.h xenolinux-2.4.16-sparse/include/asm-xeno/spinlock.h xenolinux-2.4.16-sparse/include/asm-xeno/stat.h xenolinux-2.4.16-sparse/include/asm-xeno/statfs.h xenolinux-2.4.16-sparse/include/asm-xeno/string-486.h xenolinux-2.4.16-sparse/include/asm-xeno/string.h xenolinux-2.4.16-sparse/include/asm-xeno/system.h xenolinux-2.4.16-sparse/include/asm-xeno/termbits.h xenolinux-2.4.16-sparse/include/asm-xeno/termios.h xenolinux-2.4.16-sparse/include/asm-xeno/timex.h xenolinux-2.4.16-sparse/include/asm-xeno/tlb.h xenolinux-2.4.16-sparse/include/asm-xeno/types.h xenolinux-2.4.16-sparse/include/asm-xeno/uaccess.h xenolinux-2.4.16-sparse/include/asm-xeno/ucontext.h xenolinux-2.4.16-sparse/include/asm-xeno/unaligned.h xenolinux-2.4.16-sparse/include/asm-xeno/unistd.h xenolinux-2.4.16-sparse/include/asm-xeno/user.h xenolinux-2.4.16-sparse/include/asm-xeno/vga.h xenolinux-2.4.16-sparse/include/asm-xeno/xor.h xenolinux-2.4.16-sparse/include/linux/sunrpc/debug.h xenolinux-2.4.16-sparse/kernel/panic.c xenolinux-2.4.16-sparse/mk
line diff
     1.1 --- a/.rootkeys	Wed Nov 20 10:59:23 2002 +0000
     1.2 +++ b/.rootkeys	Wed Nov 20 12:02:17 2002 +0000
     1.3 @@ -1,2 +1,402 @@
     1.4  3ddb6b0bKlMz_dz-M59a1mkUa1lASw BitKeeper/etc/config
     1.5  3ddb6b0buTaC5zg1_a8FoAR9FWi_mw BitKeeper/etc/ignore
     1.6 +3ddb79c9_hgSp-gsQm8HqWM_9W3B_A BitKeeper/etc/logging_ok
     1.7 +3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen-2.4.16/Makefile
     1.8 +3ddb79bcCa2VbsMp7mWKlhgwLQUQGA xen-2.4.16/README
     1.9 +3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen-2.4.16/Rules.mk
    1.10 +3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen-2.4.16/arch/i386/Makefile
    1.11 +3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen-2.4.16/arch/i386/Rules.mk
    1.12 +3ddb79bcsjinG9k1KcvbVBuas1R2dA xen-2.4.16/arch/i386/apic.c
    1.13 +3ddb79bcSC_LvnmFlX-T5iTgaR0SKg xen-2.4.16/arch/i386/boot/boot.S
    1.14 +3ddb79bcUrk2EIaM5VsT6wUudH1kkg xen-2.4.16/arch/i386/delay.c
    1.15 +3ddb79bcecupHj56ZbTa3B0FxDowMg xen-2.4.16/arch/i386/entry.S
    1.16 +3ddb79bcY5zW7KhvI9gvfuPi3ZumEg xen-2.4.16/arch/i386/extable.c
    1.17 +3ddb79bcesE5E-lS4QhRhlqXxqj9cA xen-2.4.16/arch/i386/i387.c
    1.18 +3ddb79bcCAq6IpdkHueChoVTfXqEQQ xen-2.4.16/arch/i386/i8259.c
    1.19 +3ddb79bcBit4xJXbwtX0kb1hh2uO1Q xen-2.4.16/arch/i386/idle0_task.c
    1.20 +3ddb79bcKIkRR0kqWaJhe5VUDkMdxg xen-2.4.16/arch/i386/io_apic.c
    1.21 +3ddb79bc1uNlAtc-84Ioq4qfcnI_CQ xen-2.4.16/arch/i386/ioremap.c
    1.22 +3ddb79bdqfIcjkz_h9Hvtp8Tk_19Zw xen-2.4.16/arch/i386/irq.c
    1.23 +3ddb79bcHwuCQDjBICDTSis52hWguw xen-2.4.16/arch/i386/mm.c
    1.24 +3ddb79bdS4UeWWXDH-FaBKqcpMFcnw xen-2.4.16/arch/i386/mpparse.c
    1.25 +3ddb79bcnL-_Dtsbtjgxl7vJU3vBiQ xen-2.4.16/arch/i386/pci-dma.c
    1.26 +3ddb79bdeJ7_86z03yTAPIeeywOg3Q xen-2.4.16/arch/i386/pci-i386.c
    1.27 +3ddb79bdIKgipvGoqExEQ7jawfVowA xen-2.4.16/arch/i386/pci-i386.h
    1.28 +3ddb79bdHe6_Uij4-glW91vInNtBYQ xen-2.4.16/arch/i386/pci-irq.c
    1.29 +3ddb79bcZ_2FxINljqNSkqa17ISyJw xen-2.4.16/arch/i386/pci-pc.c
    1.30 +3ddb79bc1_2bAt67x9MFCP4AZrQnvQ xen-2.4.16/arch/i386/process.c
    1.31 +3ddb79bc7KxGCEJsgBnkDX7XjD_ZEQ xen-2.4.16/arch/i386/rwlock.c
    1.32 +3ddb79bcrD6Z_rUvSDgrvjyb4846Eg xen-2.4.16/arch/i386/setup.c
    1.33 +3ddb79bcSx2e8JSR3pdSGa8x1ScYzA xen-2.4.16/arch/i386/smp.c
    1.34 +3ddb79bcfUN3-UBCPzX26IU8bq-3aw xen-2.4.16/arch/i386/smpboot.c
    1.35 +3ddb79bc-Udq7ol-NX4q9XsYnN7A2Q xen-2.4.16/arch/i386/time.c
    1.36 +3ddb79bccYVzXZJyVaxuv5T42Z1Fsw xen-2.4.16/arch/i386/trampoline.S
    1.37 +3ddb79bcOftONV9h4QCxXOfiT0h91w xen-2.4.16/arch/i386/traps.c
    1.38 +3ddb79bc4nTpGQOe6_-MbyZzkhlhFQ xen-2.4.16/arch/i386/usercopy.c
    1.39 +3ddb79bcOMCu9-5mKpjIh5d0qqBDPg xen-2.4.16/arch/i386/xeno.lds
    1.40 +3ddb79bdff-gj-jFGKjOejeHLqL8Lg xen-2.4.16/common/Makefile
    1.41 +3ddb79bddEYJbcURvqqcx99Yl2iAhQ xen-2.4.16/common/block.c
    1.42 +3ddb79bdrqnW93GR9gZk1OJe1qK-iQ xen-2.4.16/common/brlock.c
    1.43 +3ddb79bdLX_P6iB7ILiblRLWvebapg xen-2.4.16/common/dom0_ops.c
    1.44 +3ddb79bdYO5D8Av12NHqPeSviav7cg xen-2.4.16/common/domain.c
    1.45 +3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen-2.4.16/common/event.c
    1.46 +3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen-2.4.16/common/kernel.c
    1.47 +3ddb79bduhSEZI8xa7IbGQCpap5y2A xen-2.4.16/common/lib.c
    1.48 +3ddb79bdS39UXxUtZnaScie83-7VTQ xen-2.4.16/common/memory.c
    1.49 +3ddb79bdN51qpRC-6bOH-v5hl_AK6A xen-2.4.16/common/network.c
    1.50 +3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen-2.4.16/common/page_alloc.c
    1.51 +3ddb79bdHqdQpATqC0rmUZNbsb6L6A xen-2.4.16/common/resource.c
    1.52 +3ddb79bdB9RNMnkQnUyZ5C9hhMSQQw xen-2.4.16/common/slab.c
    1.53 +3ddb79bd0gVQYmL2zvuJnldvD0AGxQ xen-2.4.16/common/softirq.c
    1.54 +3ddb79bdQqFHtHRGEO2dsxGgo6eAhw xen-2.4.16/common/timer.c
    1.55 +3ddb79bd3zgV33PHdt-cgh3sxcb1hw xen-2.4.16/common/vsprintf.c
    1.56 +3ddb79c0ppNeJtjC4va8j41ADCnchA xen-2.4.16/drivers/Makefile
    1.57 +3ddb79beWzgPS8ozf2BL2g3ZkiWhhQ xen-2.4.16/drivers/block/Makefile
    1.58 +3ddb79be04dyXzyXqDbMRS_1funwXQ xen-2.4.16/drivers/block/blkpg.c
    1.59 +3ddb79beME_0abStePF6fU8XLuQnWw xen-2.4.16/drivers/block/elevator.c
    1.60 +3ddb79beNQVrdGyoI4njXhgAjD6a4A xen-2.4.16/drivers/block/genhd.c
    1.61 +3ddb79beyWwLRP_BiM2t1JKgr_plEw xen-2.4.16/drivers/block/ll_rw_blk.c
    1.62 +3ddb79bdhcqD9ebrslr0O0oHqTiiXg xen-2.4.16/drivers/ide/Makefile
    1.63 +3ddb79bdErDn_WC3G-fWxKNR3viLnA xen-2.4.16/drivers/ide/ide-disk.c
    1.64 +3ddb79bdIPNW36FrlId94jTXaW8HoA xen-2.4.16/drivers/ide/ide-dma.c
    1.65 +3ddb79be5Ysvhn4se_Z-LQY_hI6UPw xen-2.4.16/drivers/ide/ide-features.c
    1.66 +3ddb79bdh1ohsWYRH_KdaXr7cqs12w xen-2.4.16/drivers/ide/ide-geometry.c
    1.67 +3ddb79bdYcxXT-2UEaDcG0Ic4MIK1g xen-2.4.16/drivers/ide/ide-pci.c
    1.68 +3ddb79bdOXTbcImJo8DwmlNX88k78Q xen-2.4.16/drivers/ide/ide-probe.c
    1.69 +3ddb79bdDWFwINnKn29RlFDwGJhjYg xen-2.4.16/drivers/ide/ide-taskfile.c
    1.70 +3ddb79bdkDY1bSOYkToP1Cc49VdBxg xen-2.4.16/drivers/ide/ide.c
    1.71 +3ddb79bdPyAvT_WZTAFhaX0jp-yXSw xen-2.4.16/drivers/ide/ide_modes.h
    1.72 +3ddb79bfogeJNHTIepPjd8fy1TyoTw xen-2.4.16/drivers/net/3c509.c
    1.73 +3ddb79bfMlOcWUwjtg6oMYhGySHDDw xen-2.4.16/drivers/net/3c59x.c
    1.74 +3ddb79bfl_DWxZQFKiJ2BXrSedV4lg xen-2.4.16/drivers/net/8139cp.c
    1.75 +3ddb79bfLVGtyXNJS4NQg-lP21rndA xen-2.4.16/drivers/net/8139too.c
    1.76 +3ddb79c0tWiE8xIFHszxipeVCGKTSA xen-2.4.16/drivers/net/Makefile
    1.77 +3ddb79bfU-H1Hms4BuJEPPydjXUEaQ xen-2.4.16/drivers/net/Space.c
    1.78 +3ddb79c0GejJrp1U6W4G6dYi-RiH4A xen-2.4.16/drivers/net/eepro100.c
    1.79 +3ddb79bfKvn9mt0kofpkw0QaWjxO6A xen-2.4.16/drivers/net/net_init.c
    1.80 +3ddb79c0fQgORkFlqWZdP-6cDHyFIQ xen-2.4.16/drivers/net/pcnet32.c
    1.81 +3ddb79bf_CBcu3QWYwq4bNAOnM2RqQ xen-2.4.16/drivers/net/setup.c
    1.82 +3ddb79bfh8ucmq_HqRSaURalpeAmPg xen-2.4.16/drivers/net/tulip/.depend
    1.83 +3ddb79bfsJ-hdQ17EXTFiUOHisjNgQ xen-2.4.16/drivers/net/tulip/21142.c
    1.84 +3ddb79bf0lzTL-ywAdOO7vctTYAmJA xen-2.4.16/drivers/net/tulip/ChangeLog
    1.85 +3ddb79bfRbGBTu5mznHtxpFPtnQYSQ xen-2.4.16/drivers/net/tulip/Makefile
    1.86 +3ddb79bfLLamkCaJZDJ7i6qrCUhwBw xen-2.4.16/drivers/net/tulip/eeprom.c
    1.87 +3ddb79bf-zt39-zIUgWC9Kb4As--Ew xen-2.4.16/drivers/net/tulip/interrupt.c
    1.88 +3ddb79bfdr1I4DtFnXCzpaHkEkHE2Q xen-2.4.16/drivers/net/tulip/media.c
    1.89 +3ddb79bftPnJDLCAo0Do4KPr5raERA xen-2.4.16/drivers/net/tulip/pnic.c
    1.90 +3ddb79bf5lr4NIjy1oZOM_jhpxGidw xen-2.4.16/drivers/net/tulip/pnic2.c
    1.91 +3ddb79bfBhawkz-2DT9lakMPbqzljQ xen-2.4.16/drivers/net/tulip/timer.c
    1.92 +3ddb79bf7yw7hGBMM60aMgMgxY_G2g xen-2.4.16/drivers/net/tulip/tulip.h
    1.93 +3ddb79bfhUr3baP8Lf4oZjhBX7i5kw xen-2.4.16/drivers/net/tulip/tulip_core.c
    1.94 +3ddb79beUWngyIhMHgyPtuTem4o4JA xen-2.4.16/drivers/pci/Makefile
    1.95 +3ddb79beU9td0Mnm0VUMklerBa37qQ xen-2.4.16/drivers/pci/compat.c
    1.96 +3ddb79beHkGQE58z5t5gyUCYiwOxvw xen-2.4.16/drivers/pci/gen-devlist.c
    1.97 +3ddb79bfoQcFKLf5P6wZlDl36alWdQ xen-2.4.16/drivers/pci/names.c
    1.98 +3ddb79bfyX7-pD6XdxY_mdNrJR20iw xen-2.4.16/drivers/pci/pci.c
    1.99 +3ddb79bf2AS7YBGwooE_Kbv7XgUqNQ xen-2.4.16/drivers/pci/pci.ids
   1.100 +3ddb79bfGf5-CZSdzn0DGBYWjQiDjw xen-2.4.16/drivers/pci/proc.c
   1.101 +3ddb79bf7sTn85WtP_8Nc2YEmmVExQ xen-2.4.16/drivers/pci/quirks.c
   1.102 +3ddb79bfkVLMq5CWjZLACPDivqxq_w xen-2.4.16/drivers/pci/setup-bus.c
   1.103 +3ddb79bfl1H1arbB0pzAEC2uPmY_3g xen-2.4.16/drivers/pci/setup-irq.c
   1.104 +3ddb79bfJaf0bkE1Y67bnll8-kjEPg xen-2.4.16/drivers/pci/setup-res.c
   1.105 +3ddb79bfIcCWJsBDNcQQE3ok2Azn-Q xen-2.4.16/drivers/pci/syscall.c
   1.106 +3ddb79be3kwzyKagpMHGoXZFdan7dg xen-2.4.16/drivers/scsi/Makefile
   1.107 +3ddb79beXZxwKh7cGyPfr40bhDyRrA xen-2.4.16/drivers/scsi/constants.h
   1.108 +3ddb79beGiGljlTNq_kRnCBZECgC9Q xen-2.4.16/drivers/scsi/hosts.h
   1.109 +3ddb79bexarQo1tQ541PPUyK9HXNDA xen-2.4.16/drivers/scsi/scsi.c
   1.110 +3ddb79beBOiYxQUiWTHosepRlJyuGA xen-2.4.16/drivers/scsi/scsi.h
   1.111 +3ddb79beVTYJj6_KMxYLJmCP7p9MuQ xen-2.4.16/drivers/scsi/scsi_dma.c
   1.112 +3ddb79beDrImFCFGgB_GLgUbeuHjog xen-2.4.16/drivers/scsi/scsi_error.c
   1.113 +3ddb79bepDvUltYDsInaUsH9lII9Sw xen-2.4.16/drivers/scsi/scsi_ioctl.c
   1.114 +3ddb79berPStE_-ILQHgcl1BLDLywA xen-2.4.16/drivers/scsi/scsi_lib.c
   1.115 +3ddb79beRXjB7_nNUbJMIRyjDmeByQ xen-2.4.16/drivers/scsi/scsi_merge.c
   1.116 +3ddb79beGNb7Es1bATZAGsPZEu5F2Q xen-2.4.16/drivers/scsi/scsi_module.c
   1.117 +3ddb79beZ--AZB0twliIm3qmQJO8Zg xen-2.4.16/drivers/scsi/scsi_obsolete.c
   1.118 +3ddb79beQgG_st0eBZUX8AQI7kBkHA xen-2.4.16/drivers/scsi/scsi_obsolete.h
   1.119 +3ddb79beK65cNRldY0CFGXjZ3-A74Q xen-2.4.16/drivers/scsi/scsi_proc.c
   1.120 +3ddb79beeIuwGDE0Ldl8wy6mt86Bag xen-2.4.16/drivers/scsi/scsi_queue.c
   1.121 +3ddb79beQVxjXLLSY896cqce3j6Ehg xen-2.4.16/drivers/scsi/scsi_scan.c
   1.122 +3ddb79beVrSvakLg_9MSo22vJ_TGrA xen-2.4.16/drivers/scsi/scsi_syms.c
   1.123 +3ddb79beC6PIqDEaxAfO3bLKcmMLeA xen-2.4.16/drivers/scsi/scsicam.c
   1.124 +3ddb79bedAG8DPsr3S1N4IASxUuBug xen-2.4.16/drivers/scsi/sd.c
   1.125 +3ddb79beA27dAK0xtNh4k6SJniKnlA xen-2.4.16/drivers/scsi/sd.h
   1.126 +3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen-2.4.16/include/asm-i386/apic.h
   1.127 +3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen-2.4.16/include/asm-i386/apicdef.h
   1.128 +3ddb79c3OiG9eTsi9Dy3F_OkuRAzKA xen-2.4.16/include/asm-i386/atomic.h
   1.129 +3ddb79c3rM-Ote0Xn6Ytg8Y6YqAG-A xen-2.4.16/include/asm-i386/bitops.h
   1.130 +3ddb79c3pXaTAGGWSIZF9EnRV5PlRw xen-2.4.16/include/asm-i386/byteorder.h
   1.131 +3ddb79c3KhTI0F_Iw_hRL9QEyOVK-g xen-2.4.16/include/asm-i386/cache.h
   1.132 +3ddb79c2LLt11EQHjrd6sB7FUqvFfA xen-2.4.16/include/asm-i386/cpufeature.h
   1.133 +3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen-2.4.16/include/asm-i386/current.h
   1.134 +3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen-2.4.16/include/asm-i386/debugreg.h
   1.135 +3ddb79c3r9-31dIsewPV3P3i8HALsQ xen-2.4.16/include/asm-i386/delay.h
   1.136 +3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen-2.4.16/include/asm-i386/desc.h
   1.137 +3ddb79c2O729EttZTYu1c8LcsUO_GQ xen-2.4.16/include/asm-i386/elf.h
   1.138 +3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen-2.4.16/include/asm-i386/fixmap.h
   1.139 +3ddb79c39o75zPP0T1aQQ4mNrCAN2w xen-2.4.16/include/asm-i386/hardirq.h
   1.140 +3ddb79c3BFEIwXR4IsWbwp4BoL4DkA xen-2.4.16/include/asm-i386/hdreg.h
   1.141 +3ddb79c3TMDjkxVndKFKnGiwY0HzDg xen-2.4.16/include/asm-i386/i387.h
   1.142 +3ddb79c3otbjpnqFDSzSeD0J-0xcwg xen-2.4.16/include/asm-i386/ide.h
   1.143 +3ddb79c3fQ_O3o5NHK2N8AJdk0Ea4Q xen-2.4.16/include/asm-i386/io.h
   1.144 +3ddb79c2TKeScYHQZreTdHqYNLbehQ xen-2.4.16/include/asm-i386/io_apic.h
   1.145 +3ddb79c3S9Tga4XZRPrD4-aN3XIV6w xen-2.4.16/include/asm-i386/ioctl.h
   1.146 +3ddb79c2L7rTlFzazOLW1XuSZefpFw xen-2.4.16/include/asm-i386/irq.h
   1.147 +3ddb79c3I98vWcQR8xEo34JMJ4Ahyw xen-2.4.16/include/asm-i386/mc146818rtc.h
   1.148 +3ddb79c3n_UbPuxlkNxvvLycClIkxA xen-2.4.16/include/asm-i386/mpspec.h
   1.149 +3ddb79c2wa0dA_LGigxOelSGbJ284Q xen-2.4.16/include/asm-i386/msr.h
   1.150 +3ddb79c3xjYnrv5t3VqYlR4tNEOl4Q xen-2.4.16/include/asm-i386/page.h
   1.151 +3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen-2.4.16/include/asm-i386/pci.h
   1.152 +3ddb79c3nm2zdzeO6Mj8g7ex3txgGw xen-2.4.16/include/asm-i386/pgalloc.h
   1.153 +3ddb79c2QF5-pZGzuX4QukPCDAl59A xen-2.4.16/include/asm-i386/processor.h
   1.154 +3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen-2.4.16/include/asm-i386/ptrace.h
   1.155 +3ddb79c2plf7ciNgoNjU-RsbUzawsw xen-2.4.16/include/asm-i386/rwlock.h
   1.156 +3ddb79c2mJI9YuGMScjofPlD8EdtgA xen-2.4.16/include/asm-i386/scatterlist.h
   1.157 +3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen-2.4.16/include/asm-i386/smp.h
   1.158 +3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen-2.4.16/include/asm-i386/smpboot.h
   1.159 +3ddb79c3e9DCEoR-WzNxcOQDzLu7BQ xen-2.4.16/include/asm-i386/softirq.h
   1.160 +3ddb79c3NiyQE2vQnyGiaBnNjBO1rA xen-2.4.16/include/asm-i386/spinlock.h
   1.161 +3ddb79c3ezddh34MdelJpa5tNR00Dw xen-2.4.16/include/asm-i386/system.h
   1.162 +3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen-2.4.16/include/asm-i386/types.h
   1.163 +3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen-2.4.16/include/asm-i386/uaccess.h
   1.164 +3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen-2.4.16/include/asm-i386/unaligned.h
   1.165 +3ddb79c2YTaZwOqWin9-QNgHge5RVw xen-2.4.16/include/hypervisor-ifs/block.h
   1.166 +3ddb79c25UE59iu4JJcbRalx95mvcg xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h
   1.167 +3ddb79c2oRPrzClk3zbTkRHlpumzKA xen-2.4.16/include/hypervisor-ifs/network.h
   1.168 +3ddb79c4qbCoOFHrv9sCGshbWzBVlQ xen-2.4.16/include/scsi/scsi.h
   1.169 +3ddb79c4R4iVwqIIeychVQYmIH4FUg xen-2.4.16/include/scsi/scsi_ioctl.h
   1.170 +3ddb79c4yw_mfd4Uikn3v_IOPRpa1Q xen-2.4.16/include/scsi/scsicam.h
   1.171 +3ddb79c4HKPMLvDBP9LxzPi_szVxGA xen-2.4.16/include/scsi/sg.h
   1.172 +3ddb79c0nTsjSpVK4ZVTI9WwN24xtQ xen-2.4.16/include/xeno/blk.h
   1.173 +3ddb79c0dVhTHLsv6CPTf4baKix4mA xen-2.4.16/include/xeno/blkdev.h
   1.174 +3ddb79c18ePBgitnOs7GiOCFilODVw xen-2.4.16/include/xeno/blkpg.h
   1.175 +3ddb79c2SisDOHDyTeK5-MV3m7pNbA xen-2.4.16/include/xeno/block.h
   1.176 +3ddb79c2JOriBs0mWh-Tlolq78tg3w xen-2.4.16/include/xeno/bootmem.h
   1.177 +3ddb79c1oOjpQbp68MW7yiUpoi-S-w xen-2.4.16/include/xeno/brlock.h
   1.178 +3ddb79c1x7Ie3kifu7dQRx8y7HVyvA xen-2.4.16/include/xeno/byteorder/big_endian.h
   1.179 +3ddb79c1qFXOEX1eD0yXJ_gsGkUt8w xen-2.4.16/include/xeno/byteorder/generic.h
   1.180 +3ddb79c1VbwFALNpgx6uC_iZKFHD-A xen-2.4.16/include/xeno/byteorder/little_endian.h
   1.181 +3ddb79c1VvNRMM35bpdZMekirCXP-A xen-2.4.16/include/xeno/byteorder/pdp_endian.h
   1.182 +3ddb79c116WbJV8bwGZXFFJy_GNNvw xen-2.4.16/include/xeno/byteorder/swab.h
   1.183 +3ddb79c1pwmlw8VXW8aaSKAVGVmjDA xen-2.4.16/include/xeno/byteorder/swabb.h
   1.184 +3ddb79c0c0cX_DZE209-Bb-Rx1v-Aw xen-2.4.16/include/xeno/cache.h
   1.185 +3ddb79c259jh8hE7vre_8NuE7nwNSA xen-2.4.16/include/xeno/config.h
   1.186 +3ddb79c1V44RD26YqCUm-kqIupM37A xen-2.4.16/include/xeno/ctype.h
   1.187 +3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen-2.4.16/include/xeno/delay.h
   1.188 +3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen-2.4.16/include/xeno/dom0_ops.h
   1.189 +3ddb79c1uaWQZj551j1O0B5z8AnHOg xen-2.4.16/include/xeno/elevator.h
   1.190 +3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen-2.4.16/include/xeno/errno.h
   1.191 +3ddb79c0rMjudDKkJku_mkm0J-BZgw xen-2.4.16/include/xeno/etherdevice.h
   1.192 +3ddb79c0T3X07lFnM9OSE-W5bqIDSQ xen-2.4.16/include/xeno/ethtool.h
   1.193 +3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen-2.4.16/include/xeno/event.h
   1.194 +3ddb79c1J4I_AjNflZL-1c1jOIlSyg xen-2.4.16/include/xeno/genhd.h
   1.195 +3ddb79c1i-chIoeniqgYwMM3EgaR5w xen-2.4.16/include/xeno/hdreg.h
   1.196 +3ddb79c12GuUuaxBKiMuwf-Qvuwpng xen-2.4.16/include/xeno/hdsmart.h
   1.197 +3ddb79c0MM575N4YvMSiw9EqKH4JDA xen-2.4.16/include/xeno/ide.h
   1.198 +3ddb79c1yHLp08JhgPxIMcZ8DwN9hg xen-2.4.16/include/xeno/if.h
   1.199 +3ddb79c1RCWOkWPQRzbYVTX_e-E7CA xen-2.4.16/include/xeno/if_ether.h
   1.200 +3ddb79c2IYah7z7hkzPyOiG8szKkyw xen-2.4.16/include/xeno/if_packet.h
   1.201 +3ddb79c0GurNF9tDWqQbAwJFH8ugfA xen-2.4.16/include/xeno/init.h
   1.202 +3ddb79c1Vi5VleJAOKHAlY0G2zAsgw xen-2.4.16/include/xeno/interrupt.h
   1.203 +3ddb79c2J6EnruiygRhBCgftzMzTeQ xen-2.4.16/include/xeno/ioctl.h
   1.204 +3ddb79c1nzaWu8NoF4xCCMSFJR4MlA xen-2.4.16/include/xeno/ioport.h
   1.205 +3ddb79c2qAxCOABlkKtD8Txohe-qEw xen-2.4.16/include/xeno/irq.h
   1.206 +3ddb79c2b3qe-6Ann09FqZBF4IrJaQ xen-2.4.16/include/xeno/irq_cpustat.h
   1.207 +3ddb79c11w_O7z7YZJnzuDSxaK5LlA xen-2.4.16/include/xeno/kdev_t.h
   1.208 +3ddb79c1NfYlOrWNqgZkj9EwtFfJow xen-2.4.16/include/xeno/lib.h
   1.209 +3ddb79c18Ajy7micDGQQfJ0zWgEHtA xen-2.4.16/include/xeno/list.h
   1.210 +3ddb79c0_s2_wgV0cA6tztEaeyy1NA xen-2.4.16/include/xeno/major.h
   1.211 +3ddb79c1fsWuKI2sGlW5bqoG2lPVNA xen-2.4.16/include/xeno/mii.h
   1.212 +3ddb79c1gs2VbLbQlw0dcDUXYIepDA xen-2.4.16/include/xeno/mm.h
   1.213 +3ddb79c13p9iHn1XAp0IS1qvj4yDsg xen-2.4.16/include/xeno/module.h
   1.214 +3ddb79c1ieLZfGSFwfvvSQ2NK1BMSg xen-2.4.16/include/xeno/multiboot.h
   1.215 +3ddb79c0CLfAlJLg1ohdPD-Jjn-jxg xen-2.4.16/include/xeno/netdevice.h
   1.216 +3ddb79c2Fg44_PBPVxHSC0gTOMq4Ow xen-2.4.16/include/xeno/pci.h
   1.217 +3ddb79c0MOVXq8qZDQRGb6z64_xAwg xen-2.4.16/include/xeno/pci_ids.h
   1.218 +3ddb79c2byJwwNNkiES__A9H4Cvc4g xen-2.4.16/include/xeno/pkt_sched.h
   1.219 +3ddb79c04nQVR3EYM5L4zxDV_MCo1g xen-2.4.16/include/xeno/prefetch.h
   1.220 +3ddb79c0LzqqS0LhAQ50ekgj4oGl7Q xen-2.4.16/include/xeno/sched.h
   1.221 +3ddb79c0VDeD-Oft5eNfMneTU3D1dQ xen-2.4.16/include/xeno/skbuff.h
   1.222 +3ddb79c14dXIhP7C2ahnoD08K90G_w xen-2.4.16/include/xeno/slab.h
   1.223 +3ddb79c09xbS-xxfKxuV3JETIhBzmg xen-2.4.16/include/xeno/smp.h
   1.224 +3ddb79c1-yIt89RT02wIPp2xDR8YjQ xen-2.4.16/include/xeno/socket.h
   1.225 +3ddb79c2V2P9F2xMCzDJ9vbUofSg_Q xen-2.4.16/include/xeno/sockios.h
   1.226 +3ddb79c2iIcESrDAB8samy_yAh6olQ xen-2.4.16/include/xeno/spinlock.h
   1.227 +3ddb79c0BnA20PbgmuMPSGIBljNRQw xen-2.4.16/include/xeno/time.h
   1.228 +3ddb79c2HFkXuRxi1CriJtSFmY6Ybw xen-2.4.16/include/xeno/timer.h
   1.229 +3ddb79c2_m8lT9jDKse_tePj7zcnNQ xen-2.4.16/include/xeno/timex.h
   1.230 +3ddb79c2e2C14HkndNEJlYwXaPrF5A xen-2.4.16/include/xeno/tqueue.h
   1.231 +3ddb79c1-kVvF8cVa0k3ZHDdBMj01Q xen-2.4.16/include/xeno/types.h
   1.232 +3ddb79c2Ae5KpzhC9LCYG7mP_Vi4Aw xen-2.4.16/include/xeno/vif.h
   1.233 +3ddb79c4YQCQ6r0xNLLu0jfbM7pVmA xen-2.4.16/net/Makefile
   1.234 +3ddb79c4AkfDkTCw0comx4L8wsUOMg xen-2.4.16/net/dev.c
   1.235 +3ddb79c4x1L_soh8b-r_1jQW_37Icw xen-2.4.16/net/dev_mcast.c
   1.236 +3ddb79c4NSDwiQ-AmrYdxcRAwLPzwQ xen-2.4.16/net/eth.c
   1.237 +3ddb79c4KZhNxUuYJ7lul8cc-wRkyg xen-2.4.16/net/sch_generic.c
   1.238 +3ddb79c4TZj1wXPKQt36O72SddtBNQ xen-2.4.16/net/skbuff.c
   1.239 +3ddb79c4ARyIHqv3Y6YFckIUbyA8Tw xen-2.4.16/net/utils.c
   1.240 +3ddb79c4x8dvwPtzclghWAKFWpEBFA xen-2.4.16/tools/Makefile
   1.241 +3ddb79c4yGZ7_22QAFFwPzqP4NSHwA xen-2.4.16/tools/elf-reloc.c
   1.242 +3ddb79bbYMXGmQTsr5BeGS_RuZ5f_w xenolinux-2.4.16-sparse/Makefile
   1.243 +3ddb79b7e0ssyz3Q1GoqjDds-x1PLQ xenolinux-2.4.16-sparse/arch/xeno/Makefile
   1.244 +3ddb79b7_rLvYZU3tOY6Wwuw_Sg3_w xenolinux-2.4.16-sparse/arch/xeno/boot/Makefile
   1.245 +3ddb79b8L4xnwrcvWk6nAbgKVbNkSA xenolinux-2.4.16-sparse/arch/xeno/config.in
   1.246 +3ddb79b7v_Be34as7_mlzFlw65hOjQ xenolinux-2.4.16-sparse/arch/xeno/defconfig
   1.247 +3ddb79b7KUvtx0knQJoRaBDZQeNidg xenolinux-2.4.16-sparse/arch/xeno/drivers/block/Makefile
   1.248 +3ddb79b6Rc0uAOGFthIFxq1KGWZ_Iw xenolinux-2.4.16-sparse/arch/xeno/drivers/block/block.c
   1.249 +3ddb79b7LLVJBGynxHSOh9A9l97sug xenolinux-2.4.16-sparse/arch/xeno/drivers/console/Makefile
   1.250 +3ddb79b7UG2QiRAU-Wvc1Y_BLigu1Q xenolinux-2.4.16-sparse/arch/xeno/drivers/console/console.c
   1.251 +3ddb79b75eo4PRXkT6Th9popt_SJhg xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile
   1.252 +3ddb79b7Xyaoep6U0kLvx6Kx7OauDw xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c
   1.253 +3ddb79b7PulSkF9m3c7K5MkxHRf4hA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h
   1.254 +3ddb79b7s7yYBioHidSkIoHtQxYmOw xenolinux-2.4.16-sparse/arch/xeno/drivers/network/Makefile
   1.255 +3ddb79b7CpLL98ScdpbKkVBktlbCtQ xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
   1.256 +3ddb79b7hqi9krq6h98lnpONHGzvEA xenolinux-2.4.16-sparse/arch/xeno/kernel/Makefile
   1.257 +3ddb79b7eyEv5bsN8EQkjIG0y11Q1A xenolinux-2.4.16-sparse/arch/xeno/kernel/entry.S
   1.258 +3ddb79b70XAg9bJwp0-DWHe0LtzlBw xenolinux-2.4.16-sparse/arch/xeno/kernel/head.S
   1.259 +3ddb79b7dDsKjU22VxQ-C5BMFaMUmw xenolinux-2.4.16-sparse/arch/xeno/kernel/hypervisor.c
   1.260 +3ddb79b7xzwEc8-lo1vu3BxB-gBURQ xenolinux-2.4.16-sparse/arch/xeno/kernel/i386_ksyms.c
   1.261 +3ddb79b7i7pfPEcy_zjDfW9JHD305g xenolinux-2.4.16-sparse/arch/xeno/kernel/i387.c
   1.262 +3ddb79b7Ti2i5ztQzM-w67zN-cJD8A xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c
   1.263 +3ddb79b7MEQGMZrsF94atNJZ4-OGzA xenolinux-2.4.16-sparse/arch/xeno/kernel/ioport.c
   1.264 +3ddb79b7DOz-Mz5jsjRd5W8jN0XbPw xenolinux-2.4.16-sparse/arch/xeno/kernel/irq.c
   1.265 +3ddb79b7bOAPk_YAUUsruhVGO2GOOg xenolinux-2.4.16-sparse/arch/xeno/kernel/ldt.c
   1.266 +3ddb79b7qf2WK6vMKcmOLIeKN5GSjg xenolinux-2.4.16-sparse/arch/xeno/kernel/process.c
   1.267 +3ddb79b7HdaBR7yk_u51auihbr31aQ xenolinux-2.4.16-sparse/arch/xeno/kernel/ptrace.c
   1.268 +3ddb79b7yJunvnrgWD2VTy6yot0PMg xenolinux-2.4.16-sparse/arch/xeno/kernel/semaphore.c
   1.269 +3ddb79b7BIitpVygiksiMBQYvh5Z2A xenolinux-2.4.16-sparse/arch/xeno/kernel/setup.c
   1.270 +3ddb79b7DTevmwhNla67jZxjBSIKFg xenolinux-2.4.16-sparse/arch/xeno/kernel/signal.c
   1.271 +3ddb79b76mCf-gZPR4KLjL8ktZ37GA xenolinux-2.4.16-sparse/arch/xeno/kernel/sys_i386.c
   1.272 +3ddb79b7xDsEKErRFeqcSm6eRrTEIg xenolinux-2.4.16-sparse/arch/xeno/kernel/time.c
   1.273 +3ddb79b7V4dv_KAQu4Msa2Ebhd0aKw xenolinux-2.4.16-sparse/arch/xeno/kernel/traps.c
   1.274 +3ddb79b8oUnwncDaZuRWF3-n3jPsIA xenolinux-2.4.16-sparse/arch/xeno/lib/Makefile
   1.275 +3ddb79b8BMxi8qW3_NT44SSd1uMD3Q xenolinux-2.4.16-sparse/arch/xeno/lib/checksum.S
   1.276 +3ddb79b8WcWel2g9zU9pBJb-yA8jBw xenolinux-2.4.16-sparse/arch/xeno/lib/dec_and_lock.c
   1.277 +3ddb79b8oxeiaIW6Au95OM0GlsMrMw xenolinux-2.4.16-sparse/arch/xeno/lib/delay.c
   1.278 +3ddb79b8XPasnRhvK-_6xYksf3S6qA xenolinux-2.4.16-sparse/arch/xeno/lib/getuser.S
   1.279 +3ddb79b8HFLUh8mwcl4X44ta-ny1KA xenolinux-2.4.16-sparse/arch/xeno/lib/iodebug.c
   1.280 +3ddb79b879qeoLlarHXvNIDEva6ssA xenolinux-2.4.16-sparse/arch/xeno/lib/memcpy.c
   1.281 +3ddb79b869CY_yr4HymV6k98pfpMgg xenolinux-2.4.16-sparse/arch/xeno/lib/mmx.c
   1.282 +3ddb79b8S77yf1--Qa4C0ZYmqKXCww xenolinux-2.4.16-sparse/arch/xeno/lib/old-checksum.c
   1.283 +3ddb79b8ffZ79cU2ZzfA2ekeo6pqeg xenolinux-2.4.16-sparse/arch/xeno/lib/strstr.c
   1.284 +3ddb79b82kQ5oIXpxq3TUmlgxsLzLg xenolinux-2.4.16-sparse/arch/xeno/lib/usercopy.c
   1.285 +3ddb79b8qdD_svLCCAja_oP2w4Tn8Q xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile
   1.286 +3ddb79b8ukY8dsPYmR8eNk-aCzFPsQ xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c
   1.287 +3ddb79b856Zta9b3s0bgUCGbG1blvQ xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c
   1.288 +3ddb79b85fpsKT8A9WYnuJg03b715g xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c
   1.289 +3ddb79b83Zj7Xn2QVhU4HeMuAC9FjA xenolinux-2.4.16-sparse/arch/xeno/mm/init.c
   1.290 +3ddb79b7aKdTkbr3u6aze8tVwGh_TQ xenolinux-2.4.16-sparse/arch/xeno/vmlinux.lds
   1.291 +3ddb79bbx682YH6vR2zbVOXwg73ULg xenolinux-2.4.16-sparse/drivers/block/ll_rw_blk.c
   1.292 +3ddb79bcJfHdwrPsjqgI33_OsGdVCg xenolinux-2.4.16-sparse/drivers/block/rd.c
   1.293 +3ddb79bcpVu-IbnqwQqpRqsEbLpsuw xenolinux-2.4.16-sparse/drivers/char/tty_io.c
   1.294 +3ddb79bba_zKpuurHVeWfgDkyPoq8A xenolinux-2.4.16-sparse/fs/nfs/nfsroot.c
   1.295 +3ddb79b8VFtfWSCrXKPN2K21zd_vtw xenolinux-2.4.16-sparse/include/asm-xeno/a.out.h
   1.296 +3ddb79b8Zzi13p3OAPV25QgiC3THAQ xenolinux-2.4.16-sparse/include/asm-xeno/apic.h
   1.297 +3ddb79baZDlsdV_m6C5CXnWMl15p1g xenolinux-2.4.16-sparse/include/asm-xeno/apicdef.h
   1.298 +3ddb79baZM88u4CnriVA8ZXBdnMNvg xenolinux-2.4.16-sparse/include/asm-xeno/atomic.h
   1.299 +3ddb79baYHyZsDCiXiq8Y8_XxHE-jQ xenolinux-2.4.16-sparse/include/asm-xeno/bitops.h
   1.300 +3ddb79b8vFGtGb6pg3GZFXSiwOZfcg xenolinux-2.4.16-sparse/include/asm-xeno/boot.h
   1.301 +3ddb79baW8tf6PiBQUF50QQM5nY9sw xenolinux-2.4.16-sparse/include/asm-xeno/bugs.h
   1.302 +3ddb79b80msOlzTZRoVudYdemzgOlA xenolinux-2.4.16-sparse/include/asm-xeno/byteorder.h
   1.303 +3ddb79b8brNSUEujnq8f_zr8kA-cUg xenolinux-2.4.16-sparse/include/asm-xeno/cache.h
   1.304 +3ddb79bayhr6C6prVhAYlFRChhf3wg xenolinux-2.4.16-sparse/include/asm-xeno/checksum.h
   1.305 +3ddb79b8RNUaDbpPjdVVwKAsbiTBKQ xenolinux-2.4.16-sparse/include/asm-xeno/cpufeature.h
   1.306 +3ddb79b8pJe4aNsUKkfHEoBT9Y-UMA xenolinux-2.4.16-sparse/include/asm-xeno/current.h
   1.307 +3ddb79b8KL7icUfxKRoWDIkHkLQ1kQ xenolinux-2.4.16-sparse/include/asm-xeno/debugreg.h
   1.308 +3ddb79baDUP_cRdFgqaH0rXUvMxx4A xenolinux-2.4.16-sparse/include/asm-xeno/delay.h
   1.309 +3ddb79b89CgBTFsS3joEJ1ZniSHEgA xenolinux-2.4.16-sparse/include/asm-xeno/desc.h
   1.310 +3ddb79ba6xyT4mJOYSp1Fg2l0ta93A xenolinux-2.4.16-sparse/include/asm-xeno/div64.h
   1.311 +3ddb79b80Z4ZUIqbD1Xu_t4OCuEHeQ xenolinux-2.4.16-sparse/include/asm-xeno/dma.h
   1.312 +3ddb79bac26NkKcPIEsfxETc5Snyag xenolinux-2.4.16-sparse/include/asm-xeno/elf.h
   1.313 +3ddb79ba722pCJ_g_xI8ebsE31IK-Q xenolinux-2.4.16-sparse/include/asm-xeno/errno.h
   1.314 +3ddb79b8vIpUpgaSNEneFkg5hYSvNg xenolinux-2.4.16-sparse/include/asm-xeno/fcntl.h
   1.315 +3ddb79b8c_oKu2_BGNJctM4DBET31Q xenolinux-2.4.16-sparse/include/asm-xeno/fixmap.h
   1.316 +3ddb79b8780YvqvK1g5KPIWzQ6P15w xenolinux-2.4.16-sparse/include/asm-xeno/floppy.h
   1.317 +3ddb79bas-nFywnmilbUeT34PEAA0g xenolinux-2.4.16-sparse/include/asm-xeno/hardirq.h
   1.318 +3ddb79batzR40ZFY9dvgs5f1aM9I6g xenolinux-2.4.16-sparse/include/asm-xeno/hdreg.h
   1.319 +3ddb79b90xBgbeYgCcImS2ZxJakxBA xenolinux-2.4.16-sparse/include/asm-xeno/highmem.h
   1.320 +3ddb79baXLZV3dUKQI2gIYpAy67RuA xenolinux-2.4.16-sparse/include/asm-xeno/hw_irq.h
   1.321 +3ddb79b82xfEY3yBet-2FXY4p8b7yg xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor-ifs/block.h
   1.322 +3ddb79b8KUSolAgH19qEzo1Ey0f1Ng xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor-ifs/hypervisor-if.h
   1.323 +3ddb79b8J0Y2UA8NKoN5Ng71WFQRIg xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor-ifs/network.h
   1.324 +3ddb79bapQ9Z9ewa5O1pqAVaNBTazg xenolinux-2.4.16-sparse/include/asm-xeno/hypervisor.h
   1.325 +3ddb79baL-pjPI8hg5xjPgd4__SlOA xenolinux-2.4.16-sparse/include/asm-xeno/i387.h
   1.326 +3ddb79ba66TwvG7HpbBo04fRhmj3KQ xenolinux-2.4.16-sparse/include/asm-xeno/ide.h
   1.327 +3ddb79bahFfCuRcmd9kBYA-CuGtCSg xenolinux-2.4.16-sparse/include/asm-xeno/init.h
   1.328 +3ddb79ba3wnwflaNW6QheYvxoj5S8Q xenolinux-2.4.16-sparse/include/asm-xeno/io.h
   1.329 +3ddb79ba1MhHpElCXFlijej2zWVk4g xenolinux-2.4.16-sparse/include/asm-xeno/io_apic.h
   1.330 +3ddb79baiyfcZN9rJwhq0UeFjI6GkQ xenolinux-2.4.16-sparse/include/asm-xeno/ioctl.h
   1.331 +3ddb79badReKYSok3yHShb4jg0vA-A xenolinux-2.4.16-sparse/include/asm-xeno/ioctls.h
   1.332 +3ddb79b9iuIxteTXg8_myIsrWF5uxg xenolinux-2.4.16-sparse/include/asm-xeno/ipc.h
   1.333 +3ddb79baw5Dxo78880UMSanDV70WdA xenolinux-2.4.16-sparse/include/asm-xeno/ipcbuf.h
   1.334 +3ddb79ban9FUBuEaznRZvPfry3xnHQ xenolinux-2.4.16-sparse/include/asm-xeno/irq.h
   1.335 +3ddb79banJ5r-mqE7LZ8nPGTADaGBA xenolinux-2.4.16-sparse/include/asm-xeno/kdb.h
   1.336 +3ddb79baoaoHGTW0oiBXFPUg4KWydw xenolinux-2.4.16-sparse/include/asm-xeno/kdbprivate.h
   1.337 +3ddb79bam2K6c7tS2HX6jis2Bqx71w xenolinux-2.4.16-sparse/include/asm-xeno/keyboard.h
   1.338 +3ddb79baQ8gGrZm2Jlo3I1ntD0H5HA xenolinux-2.4.16-sparse/include/asm-xeno/kmap_types.h
   1.339 +3ddb79b9MeQEYrafy-Mx9OoeVFM_uw xenolinux-2.4.16-sparse/include/asm-xeno/ldt.h
   1.340 +3ddb79baAsy5W-cJ9ML_w9chqqUh4A xenolinux-2.4.16-sparse/include/asm-xeno/linux_logo.h
   1.341 +3ddb79b9uhsumPVE0wGgarfIkYDSFA xenolinux-2.4.16-sparse/include/asm-xeno/locks.h
   1.342 +3ddb79b954ISbF9e68hB0WTulCJRgg xenolinux-2.4.16-sparse/include/asm-xeno/math_emu.h
   1.343 +3ddb79b9EZDlC6RGn_y0OYr0nyQWlw xenolinux-2.4.16-sparse/include/asm-xeno/mc146818rtc.h
   1.344 +3ddb79b9M6pTF4maDgh8TYbg_HHUbw xenolinux-2.4.16-sparse/include/asm-xeno/mca_dma.h
   1.345 +3ddb79baA7dlps8FkicOUEXKEQuQsA xenolinux-2.4.16-sparse/include/asm-xeno/mman.h
   1.346 +3ddb79baKfF36-eRvkxnEvMRQRai-w xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h
   1.347 +3ddb79baQyKbT5U4EmZNePY9Txp-tA xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h
   1.348 +3ddb79bberC3Ghs4vy-06Pu-LSiWtw xenolinux-2.4.16-sparse/include/asm-xeno/mmx.h
   1.349 +3ddb79bbsJLF10xQcKDoV8f_7gcOXg xenolinux-2.4.16-sparse/include/asm-xeno/module.h
   1.350 +3ddb79bbY5RffJ8_F1oC4VI7q3Eejg xenolinux-2.4.16-sparse/include/asm-xeno/mpspec.h
   1.351 +3ddb79b90vB4Vzzy_wL6SYXQMy9N9Q xenolinux-2.4.16-sparse/include/asm-xeno/msgbuf.h
   1.352 +3ddb79bbZ5a_vTk0xcgOHuPokaqwMw xenolinux-2.4.16-sparse/include/asm-xeno/msr.h
   1.353 +3ddb79b9y1xAKaPiBd79MBcCopNNYw xenolinux-2.4.16-sparse/include/asm-xeno/mtrr.h
   1.354 +3ddb79b90hX0QBJlWQN_VsHtX5Ijkw xenolinux-2.4.16-sparse/include/asm-xeno/namei.h
   1.355 +3ddb79bbG2p9MNq7tuISz8md1Oj2lg xenolinux-2.4.16-sparse/include/asm-xeno/page.h
   1.356 +3ddb79bb_iUa2piFSwaB8YPw-rB5SQ xenolinux-2.4.16-sparse/include/asm-xeno/param.h
   1.357 +3ddb79b9Y8UU0S9AoDznoqqcYxg9-A xenolinux-2.4.16-sparse/include/asm-xeno/parport.h
   1.358 +3ddb79b9K9_edWxBHS7TdCpyDmfp6g xenolinux-2.4.16-sparse/include/asm-xeno/pgalloc.h
   1.359 +3ddb79bahaS_P3UYp9VEU6kHxXbajA xenolinux-2.4.16-sparse/include/asm-xeno/pgtable-2level.h
   1.360 +3ddb79b9MjXUB_rk29GJgaNY24feCw xenolinux-2.4.16-sparse/include/asm-xeno/pgtable-3level.h
   1.361 +3ddb79bbPF2ENpNHBru8K3hyYVPmkQ xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h
   1.362 +3ddb79b9t9xKrOf8aP3X5jEit9tj-w xenolinux-2.4.16-sparse/include/asm-xeno/poll.h
   1.363 +3ddb79b9xHtTDWFaI9ncAtxyUth0Yg xenolinux-2.4.16-sparse/include/asm-xeno/posix_types.h
   1.364 +3ddb79b9VwZ9YsSpA7CkJmYXVadrCA xenolinux-2.4.16-sparse/include/asm-xeno/processor.h
   1.365 +3ddb79bbKaOkHrGG9j05AGlii-voaQ xenolinux-2.4.16-sparse/include/asm-xeno/ptrace.h
   1.366 +3ddb79bbVpJCVTXmc2yNf1rsC00YEg xenolinux-2.4.16-sparse/include/asm-xeno/resource.h
   1.367 +3ddb79bbtU0Kh27NbNpqKAIhshJvXQ xenolinux-2.4.16-sparse/include/asm-xeno/rwlock.h
   1.368 +3ddb79b97LhdmW6hYmybQOPDkK6plg xenolinux-2.4.16-sparse/include/asm-xeno/rwsem.h
   1.369 +3ddb79bbgRzM1NoXYbLoOCbZt8s5NA xenolinux-2.4.16-sparse/include/asm-xeno/scatterlist.h
   1.370 +3ddb79b9xCi3F80Z6xxx6nzkCAo8vQ xenolinux-2.4.16-sparse/include/asm-xeno/segment.h
   1.371 +3ddb79b9WxySbCKDoMgTgPtn1G3BFw xenolinux-2.4.16-sparse/include/asm-xeno/semaphore.h
   1.372 +3ddb79b9gNK3dtgXONloSBaNgZPjAg xenolinux-2.4.16-sparse/include/asm-xeno/sembuf.h
   1.373 +3ddb79bb2UTDgfwju2J3weDXemw3LA xenolinux-2.4.16-sparse/include/asm-xeno/serial.h
   1.374 +3ddb79bbAPFyA_n4dNVCRoee8obOKA xenolinux-2.4.16-sparse/include/asm-xeno/setup.h
   1.375 +3ddb79b9XTOB5DwWBGzPLLR4rNEkDQ xenolinux-2.4.16-sparse/include/asm-xeno/shmbuf.h
   1.376 +3ddb79b9-_jDlAj3qVZe4opi3zectQ xenolinux-2.4.16-sparse/include/asm-xeno/shmparam.h
   1.377 +3ddb79b9AW75ErwlTRX4McxO15sEaQ xenolinux-2.4.16-sparse/include/asm-xeno/sigcontext.h
   1.378 +3ddb79b9NBJW-KAI3mgveUCr7sIOwA xenolinux-2.4.16-sparse/include/asm-xeno/siginfo.h
   1.379 +3ddb79bbj0i8tUVNMKtZVrLJqv3Nsw xenolinux-2.4.16-sparse/include/asm-xeno/signal.h
   1.380 +3ddb79bbfAmpotdy-No2dwGez2fnIg xenolinux-2.4.16-sparse/include/asm-xeno/smp.h
   1.381 +3ddb79b9pDERXiqSumFWMTFJ1X9xIw xenolinux-2.4.16-sparse/include/asm-xeno/smplock.h
   1.382 +3ddb79bbuCOIWTlWEHgOTexEBbdDow xenolinux-2.4.16-sparse/include/asm-xeno/socket.h
   1.383 +3ddb79b9ExeUznVBlSn1e2nvOCrJ4A xenolinux-2.4.16-sparse/include/asm-xeno/sockios.h
   1.384 +3ddb79b9kL3xvucBb-Gmg4_vo-99vw xenolinux-2.4.16-sparse/include/asm-xeno/softirq.h
   1.385 +3ddb79b9rJ8AfSzGzA0arI8mazYLlQ xenolinux-2.4.16-sparse/include/asm-xeno/spinlock.h
   1.386 +3ddb79bbXaA_zUHNPkAKRNz1h0gIJw xenolinux-2.4.16-sparse/include/asm-xeno/stat.h
   1.387 +3ddb79b9G004IlCplrjWgF1aXbp8dA xenolinux-2.4.16-sparse/include/asm-xeno/statfs.h
   1.388 +3ddb79bbRsy3GlCFrQEbVMVp--xlwQ xenolinux-2.4.16-sparse/include/asm-xeno/string-486.h
   1.389 +3ddb79bb4xug4cDph6ODLQFQIan_sg xenolinux-2.4.16-sparse/include/asm-xeno/string.h
   1.390 +3ddb79b9JhjJtJUO3g5LmrHPkdxgKg xenolinux-2.4.16-sparse/include/asm-xeno/system.h
   1.391 +3ddb79b9tbjCU9zSbqKbbv4m8tijlg xenolinux-2.4.16-sparse/include/asm-xeno/termbits.h
   1.392 +3ddb79bbi0mW10tH4xX1_KHXKM_xPg xenolinux-2.4.16-sparse/include/asm-xeno/termios.h
   1.393 +3ddb79b9JuR1VvNzlkyMlA-Dnlmy9Q xenolinux-2.4.16-sparse/include/asm-xeno/timex.h
   1.394 +3ddb79b9Bofq-p3sCTF0ELVuf_iBYA xenolinux-2.4.16-sparse/include/asm-xeno/tlb.h
   1.395 +3ddb79b9tpBUqS8-S6euSqyk2hFkKg xenolinux-2.4.16-sparse/include/asm-xeno/types.h
   1.396 +3ddb79bb5bkAaEzD7pdqQZdWyA_0eQ xenolinux-2.4.16-sparse/include/asm-xeno/uaccess.h
   1.397 +3ddb79bbiDIz1dxgFixHKyGuqRqfDQ xenolinux-2.4.16-sparse/include/asm-xeno/ucontext.h
   1.398 +3ddb79ba_Smn-GiYtr5ZTMaZXn-AHg xenolinux-2.4.16-sparse/include/asm-xeno/unaligned.h
   1.399 +3ddb79bb3cMSs_k2X5Oq2hOIBvmPYA xenolinux-2.4.16-sparse/include/asm-xeno/unistd.h
   1.400 +3ddb79ba2qYtIQAT_-vCFkkZUXu_UQ xenolinux-2.4.16-sparse/include/asm-xeno/user.h
   1.401 +3ddb79bbqhb9X9qWOz5Bv4wOzrkITg xenolinux-2.4.16-sparse/include/asm-xeno/vga.h
   1.402 +3ddb79bbA52x94o6uwDYsbzrH2hjzA xenolinux-2.4.16-sparse/include/asm-xeno/xor.h
   1.403 +3ddb79bb_7YG4U75ZmEic9YXWTW7Vw xenolinux-2.4.16-sparse/include/linux/sunrpc/debug.h
   1.404 +3ddb79bcxkVPfWlZ1PQKvDrfArzOVw xenolinux-2.4.16-sparse/kernel/panic.c
   1.405 +3ddb79bbP31im-mx2NbfthSeqty1Dg xenolinux-2.4.16-sparse/mk
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/BitKeeper/etc/logging_ok	Wed Nov 20 12:02:17 2002 +0000
     2.3 @@ -0,0 +1,1 @@
     2.4 +smh22@boulderdash.cl.cam.ac.uk
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen-2.4.16/Makefile	Wed Nov 20 12:02:17 2002 +0000
     3.3 @@ -0,0 +1,39 @@
     3.4 +
     3.5 +export BASEDIR := $(shell pwd)
     3.6 +
     3.7 +include Rules.mk
     3.8 +
     3.9 +default: $(TARGET)
    3.10 +
    3.11 +install: $(TARGET)
    3.12 +	gzip -f -9 < $(TARGET) > $(TARGET).gz
    3.13 +	cp $(TARGET).gz ../../install/images/image
    3.14 +
    3.15 +clean: delete-links
    3.16 +	$(MAKE) -C tools clean
    3.17 +	$(MAKE) -C common clean
    3.18 +	$(MAKE) -C net clean
    3.19 +	$(MAKE) -C drivers clean
    3.20 +	$(MAKE) -C arch/$(ARCH) clean
    3.21 +	rm -f *.o $(TARGET)* *~ core
    3.22 +
    3.23 +$(TARGET): make-links
    3.24 +	$(MAKE) -C tools
    3.25 +	$(MAKE) -C common
    3.26 +	$(MAKE) -C net
    3.27 +	$(MAKE) -C drivers
    3.28 +	$(MAKE) -C arch/$(ARCH)
    3.29 +
    3.30 +make-links:
    3.31 +	ln -sf xeno include/linux
    3.32 +	ln -sf asm-$(ARCH) include/asm
    3.33 +
    3.34 +delete-links:
    3.35 +	rm -f include/linux include/asm
    3.36 +
    3.37 +SUBDIRS         =arch common drivers net 
    3.38 +TAGS: 
    3.39 +	etags `find include/asm-$(ARCH) -name '*.h'`
    3.40 +	find include -type d \( -name "asm-*" -o -name config \) -prune -o -name '*.h' -print | xargs etags -a
    3.41 +	find $(SUBDIRS) -name '*.[ch]' | xargs etags -a
    3.42 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen-2.4.16/README	Wed Nov 20 12:02:17 2002 +0000
     4.3 @@ -0,0 +1,145 @@
     4.4 +
     4.5 +*****************************************************
     4.6 +   Xeno Hypervisor (18/7/02)
     4.7 +
     4.8 +1) Tree layout
     4.9 +Looks rather like a simplified Linux :-)
    4.10 +Headers are in include/xeno and include asm-<arch>.
    4.11 +At build time we create symlinks:
    4.12 + include/linux -> include/xeno
    4.13 + include/asm   -> include/asm-<arch>
    4.14 +In this way, Linux device drivers should need less tweaking of
    4.15 +their #include lines.
    4.16 +
    4.17 +For source files, mapping between hypervisor and Linux is:
    4.18 + Linux                 Hypervisor
    4.19 + -----                 ----------
    4.20 + kernel/init/mm/lib -> common
    4.21 + net/*              -> net/*
    4.22 + drivers/*          -> drivers/*
    4.23 + arch/*             -> arch/*
    4.24 +
    4.25 +Note that the use of #include <asm/...> and #include <linux/...> can
    4.26 +lead to confusion, as such files will often exist on the system include
    4.27 +path, even if a version doesn't exist within the hypervisor tree.
    4.28 +Unfortunately '-nostdinc' cannot be specified to the compiler, as that
    4.29 +prevents us using stdarg.h in the compiler's own header directory.
    4.30 +
    4.31 +We try to not modify things in driver/* as much as possible, so we can
    4.32 +easily take updates from Linux. arch/* is basically straight from
    4.33 +Linux, with fingers in Linux-specific pies hacked off. common/* has
    4.34 +a lot of Linux code in it, but certain subsystems (task maintenance,
    4.35 +low-level memory handling) have been replaced. net/* contains enough
    4.36 +Linux-like gloop to get network drivers to work with little/no
    4.37 +modification.
    4.38 +
    4.39 +2) Building
    4.40 +'make': Builds ELF executable called 'image' in base directory
    4.41 +'make install': gzip-compresses 'image' and copies it to TFTP server
    4.42 +'make clean': removes *all* build and target files
    4.43 +
    4.44 +
    4.45 +*****************************************************
    4.46 +Random thoughts and stuff from here down...
    4.47 +
    4.48 +Todo list
    4.49 +---------
    4.50 +* Hypervisor need only directly map its own memory pool
    4.51 +  (maybe 128MB, tops). That would need 0x08000000....
    4.52 +  This would allow 512MB Linux with plenty room for vmalloc'ed areas.
    4.53 +* Network device -- port drivers to hypervisor, implement virtual
    4.54 +  driver for xeno-linux. Looks like Ethernet.
    4.55 +  -- Hypervisor needs to do (at a minimum):
    4.56 +       - packet filtering on tx (unicast IP only)
    4.57 +       - packet demux on rx     (unicast IP only)
    4.58 +       - provide DHCP [maybedo something simpler?]
    4.59 +         and ARP [at least for hypervisor IP address]
    4.60 +
    4.61 +
    4.62 +Segment descriptor tables
    4.63 +-------------------------
    4.64 +We want to allow guest OSes to specify GDT and LDT tables using their
    4.65 +own pages of memory (just like with page tables). So allow the following:
    4.66 + * new_table_entry(ptr, val)
    4.67 +   [Allows insertion of a code, data, or LDT descriptor into given
    4.68 +    location. Can simply be checked then poked, with no need to look at
    4.69 +    page type.]
    4.70 + * new_GDT() -- relevent virtual pages are resolved to frames. Either
    4.71 +    (i) page not present; or (ii) page is only mapped read-only and checks
    4.72 +    out okay (then marked as special page). Old table is resolved first,
    4.73 +    and the pages are unmarked (no longer special type).
    4.74 + * new_LDT() -- same as for new_GDT(), with same special page type.
    4.75 +
    4.76 +Page table updates must be hooked, so we look for updates to virtual page
    4.77 +addresses in the GDT/LDT range. If map to not present, then old physpage
    4.78 +has type_count decremented. If map to present, ensure read-only, check the
    4.79 +page, and set special type.
    4.80 +
    4.81 +Merge set_{LDT,GDT} into update_baseptr, by passing four args:
    4.82 + update_baseptrs(mask, ptab, gdttab, ldttab);
    4.83 +Update of ptab requires update of gtab (or set to internal default).
    4.84 +Update of gtab requires update of ltab (or set to internal default).
    4.85 +
    4.86 +
    4.87 +The hypervisor page cache
    4.88 +-------------------------
    4.89 +This will allow guest OSes to make use of spare pages in the system, but
    4.90 +allow them to be immediately used for any new domains or memory requests.
    4.91 +The idea is that, when a page is laundered and falls off Linux's clean_LRU
    4.92 +list, rather than freeing it it becomes a candidate for passing down into
    4.93 +the hypervisor. In return, xeno-linux may ask for one of its previously-
    4.94 +cached pages back:
    4.95 + (page, new_id) = cache_query(page, old_id);
    4.96 +If the requested page couldn't be kept, a blank page is returned.
    4.97 +When would Linux make the query? Whenever it wants a page back without
    4.98 +the delay or going to disc. Also, whenever a page would otherwise be
    4.99 +flushed to disc.
   4.100 +
   4.101 +To try and add to the cache: (blank_page, new_id) = cache_query(page, NULL);
   4.102 + [NULL means "give me a blank page"].
   4.103 +To try and retrieve from the cache: (page, new_id) = cache_query(x_page, id)
   4.104 + [we may request that x_page just be discarded, and therefore not impinge
   4.105 +  on this domain's cache quota].
   4.106 +
   4.107 +
   4.108 +Booting secondary processors
   4.109 +----------------------------
   4.110 +
   4.111 +start_of_day (i386/setup.c)
   4.112 +smp_boot_cpus (i386/smpboot.c)
   4.113 + * initialises boot CPU data
   4.114 + * parses APIC tables
   4.115 + * for each cpu:
   4.116 +   do_boot_cpu (i386/smpboot.c)
   4.117 +    * forks a new idle process
   4.118 +    * points initial stack inside new task struct
   4.119 +    * points initial EIP at a trampoline in very low memory
   4.120 +    * frobs remote APIC....
   4.121 +
   4.122 +On other processor:
   4.123 + * trampoline sets GDT and IDT
   4.124 + * jumps at main boot address with magic register value
   4.125 + * after setting proper page and descriptor tables, jumps at...
   4.126 +   initialize_secondary (i386/smpboot.c)
   4.127 +    * simply reads ESP/EIP out of the (new) idle task
   4.128 +    * this causes a jump to...
   4.129 +      start_secondary (i386/smpboot.c)
   4.130 +       * reset all processor state
   4.131 +       * barrier, then write bitmasks to signal back to boot cpu
   4.132 +       * then barrel into...
   4.133 +         cpu_idle (i386/process.c)
   4.134 +         [THIS IS PROBABLY REASONABLE -- BOOT CPU SHOULD KICK
   4.135 +          SECONDARIES TO GET WORK DONE]
   4.136 +
   4.137 +
   4.138 +SMP capabilities
   4.139 +----------------
   4.140 +
   4.141 +Current intention is to allow hypervisor to schedule on all processors in
   4.142 +SMP boxen, but to tie each domain to a single processor. This simplifies
   4.143 +many SMP intricacies both in terms of correctness and efficiency (eg.
   4.144 +TLB flushing, network packet delivery, ...).
   4.145 +
   4.146 +Clients can still make use of SMP by installing multiple domains on a single
   4.147 +machine, and treating it as a fast cluster (at the very least, the
   4.148 +hypervisor will have fast routing of locally-destined packets).
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen-2.4.16/Rules.mk	Wed Nov 20 12:02:17 2002 +0000
     5.3 @@ -0,0 +1,31 @@
     5.4 +
     5.5 +ARCH    := i386
     5.6 +
     5.7 +TARGET  := $(BASEDIR)/image
     5.8 +HDRS    := $(wildcard $(BASEDIR)/include/xeno/*.h)
     5.9 +HDRS    += $(wildcard $(BASEDIR)/include/scsi/*.h)
    5.10 +HDRS    += $(wildcard $(BASEDIR)/include/hypervisor-ifs/*.h)
    5.11 +HDRS    += $(wildcard $(BASEDIR)/include/asm-$(ARCH)/*.h)
    5.12 +
    5.13 +C_SRCS  := $(wildcard *.c)
    5.14 +S_SRCS  := $(wildcard *.S)
    5.15 +OBJS    := $(patsubst %.S,%.o,$(S_SRCS))
    5.16 +OBJS    += $(patsubst %.c,%.o,$(C_SRCS))
    5.17 +
    5.18 +# Note that link order matters!
    5.19 +ALL_OBJS := $(BASEDIR)/common/common.o
    5.20 +ALL_OBJS += $(BASEDIR)/net/network.o
    5.21 +ALL_OBJS += $(BASEDIR)/drivers/pci/driver.o
    5.22 +ALL_OBJS += $(BASEDIR)/drivers/net/driver.o
    5.23 +ALL_OBJS += $(BASEDIR)/drivers/block/driver.o
    5.24 +ALL_OBJS += $(BASEDIR)/drivers/ide/driver.o
    5.25 +ALL_OBJS += $(BASEDIR)/arch/$(ARCH)/arch.o
    5.26 +
    5.27 +include $(BASEDIR)/arch/$(ARCH)/Rules.mk
    5.28 +
    5.29 +%.o: %.c $(HDRS) Makefile
    5.30 +	$(CC) $(CFLAGS) -c $< -o $@
    5.31 +
    5.32 +%.o: %.S $(HDRS) Makefile
    5.33 +	$(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
    5.34 +
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen-2.4.16/arch/i386/Makefile	Wed Nov 20 12:02:17 2002 +0000
     6.3 @@ -0,0 +1,17 @@
     6.4 +
     6.5 +include $(BASEDIR)/Rules.mk
     6.6 +
     6.7 +# What happens here? We link monitor object files together, starting
     6.8 +# at MONITOR_BASE (a very high address). But bootloader cannot put
     6.9 +# things there, so we initially load at LOAD_BASE. A hacky little
    6.10 +# tool called `elf-reloc' is used to modify segment offsets from
    6.11 +# MONITOR_BASE-relative to LOAD_BASE-relative.
    6.12 +# (NB. Linux gets round this by turning its image into raw binary, then 
    6.13 +# wrapping that with a low-memory bootstrapper.)
    6.14 +default: boot/boot.o $(OBJS)
    6.15 +	$(LD) -r -o arch.o $(OBJS)
    6.16 +	$(LD) $(LDFLAGS) boot/boot.o $(ALL_OBJS) -o $(TARGET)
    6.17 +	$(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET)
    6.18 +
    6.19 +clean:
    6.20 +	rm -f *.o *~ core boot/*.o boot/*~ boot/core
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen-2.4.16/arch/i386/Rules.mk	Wed Nov 20 12:02:17 2002 +0000
     7.3 @@ -0,0 +1,14 @@
     7.4 +########################################
     7.5 +# x86-specific definitions
     7.6 +
     7.7 +CC := gcc
     7.8 +LD := ld
     7.9 +# Linker should relocate monitor to this address
    7.10 +MONITOR_BASE := 0xE0100000
    7.11 +# Bootloader should load monitor to this real address
    7.12 +LOAD_BASE    := 0x00100000
    7.13 +CFLAGS  := -fno-builtin -O3 -Wall -DMONITOR_BASE=$(MONITOR_BASE) 
    7.14 +CFLAGS  += -I$(BASEDIR)/include -D__KERNEL__
    7.15 +LDFLAGS := -T xeno.lds -N
    7.16 +
    7.17 +
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen-2.4.16/arch/i386/apic.c	Wed Nov 20 12:02:17 2002 +0000
     8.3 @@ -0,0 +1,836 @@
     8.4 +/*
     8.5 + *	Local APIC handling, local APIC timers
     8.6 + *
     8.7 + *	(c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
     8.8 + *
     8.9 + *	Fixes
    8.10 + *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
    8.11 + *					thanks to Eric Gilmore
    8.12 + *					and Rolf G. Tews
    8.13 + *					for testing these extensively.
    8.14 + */
    8.15 +
    8.16 +#include <xeno/config.h>
    8.17 +#include <xeno/init.h>
    8.18 +#include <xeno/sched.h>
    8.19 +#include <xeno/irq.h>
    8.20 +#include <xeno/delay.h>
    8.21 +#include <asm/mc146818rtc.h>
    8.22 +#include <asm/msr.h>
    8.23 +#include <xeno/errno.h>
    8.24 +#include <asm/atomic.h>
    8.25 +#include <xeno/smp.h>
    8.26 +#include <xeno/interrupt.h>
    8.27 +#include <asm/mpspec.h>
    8.28 +#include <asm/pgalloc.h>
    8.29 +#include <asm/hardirq.h>
    8.30 +
    8.31 +/* Using APIC to generate smp_local_timer_interrupt? */
    8.32 +int using_apic_timer = 0;
    8.33 +
    8.34 +int get_maxlvt(void)
    8.35 +{
    8.36 +    unsigned int v, ver, maxlvt;
    8.37 +
    8.38 +    v = apic_read(APIC_LVR);
    8.39 +    ver = GET_APIC_VERSION(v);
    8.40 +    /* 82489DXs do not report # of LVT entries. */
    8.41 +    maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
    8.42 +    return maxlvt;
    8.43 +}
    8.44 +
    8.45 +void clear_local_APIC(void)
    8.46 +{
    8.47 +    int maxlvt;
    8.48 +    unsigned long v;
    8.49 +
    8.50 +    maxlvt = get_maxlvt();
    8.51 +
    8.52 +    /*
    8.53 +     * Careful: we have to set masks only first to deassert
    8.54 +     * any level-triggered sources.
    8.55 +     */
    8.56 +    v = apic_read(APIC_LVTT);
    8.57 +    apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
    8.58 +    v = apic_read(APIC_LVT0);
    8.59 +    apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
    8.60 +    v = apic_read(APIC_LVT1);
    8.61 +    apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
    8.62 +    if (maxlvt >= 3) {
    8.63 +        v = apic_read(APIC_LVTERR);
    8.64 +        apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
    8.65 +    }
    8.66 +    if (maxlvt >= 4) {
    8.67 +        v = apic_read(APIC_LVTPC);
    8.68 +        apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
    8.69 +    }
    8.70 +
    8.71 +    /*
    8.72 +     * Clean APIC state for other OSs:
    8.73 +     */
    8.74 +    apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
    8.75 +    apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
    8.76 +    apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
    8.77 +    if (maxlvt >= 3)
    8.78 +        apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
    8.79 +    if (maxlvt >= 4)
    8.80 +        apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
    8.81 +}
    8.82 +
    8.83 +void __init connect_bsp_APIC(void)
    8.84 +{
    8.85 +    if (pic_mode) {
    8.86 +        /*
    8.87 +         * Do not trust the local APIC being empty at bootup.
    8.88 +         */
    8.89 +        clear_local_APIC();
    8.90 +        /*
    8.91 +         * PIC mode, enable APIC mode in the IMCR, i.e.
    8.92 +         * connect BSP's local APIC to INT and NMI lines.
    8.93 +         */
    8.94 +        printk("leaving PIC mode, enabling APIC mode.\n");
    8.95 +        outb(0x70, 0x22);
    8.96 +        outb(0x01, 0x23);
    8.97 +    }
    8.98 +}
    8.99 +
   8.100 +void disconnect_bsp_APIC(void)
   8.101 +{
   8.102 +    if (pic_mode) {
   8.103 +        /*
   8.104 +         * Put the board back into PIC mode (has an effect
   8.105 +         * only on certain older boards).  Note that APIC
   8.106 +         * interrupts, including IPIs, won't work beyond
   8.107 +         * this point!  The only exception are INIT IPIs.
   8.108 +         */
   8.109 +        printk("disabling APIC mode, entering PIC mode.\n");
   8.110 +        outb(0x70, 0x22);
   8.111 +        outb(0x00, 0x23);
   8.112 +    }
   8.113 +}
   8.114 +
   8.115 +void disable_local_APIC(void)
   8.116 +{
   8.117 +    unsigned long value;
   8.118 +
   8.119 +    clear_local_APIC();
   8.120 +
   8.121 +    /*
   8.122 +     * Disable APIC (implies clearing of registers
   8.123 +     * for 82489DX!).
   8.124 +     */
   8.125 +    value = apic_read(APIC_SPIV);
   8.126 +    value &= ~APIC_SPIV_APIC_ENABLED;
   8.127 +    apic_write_around(APIC_SPIV, value);
   8.128 +}
   8.129 +
   8.130 +/*
   8.131 + * This is to verify that we're looking at a real local APIC.
   8.132 + * Check these against your board if the CPUs aren't getting
   8.133 + * started for no apparent reason.
   8.134 + */
   8.135 +int __init verify_local_APIC(void)
   8.136 +{
   8.137 +    unsigned int reg0, reg1;
   8.138 +
   8.139 +    /*
   8.140 +     * The version register is read-only in a real APIC.
   8.141 +     */
   8.142 +    reg0 = apic_read(APIC_LVR);
   8.143 +    Dprintk("Getting VERSION: %x\n", reg0);
   8.144 +    apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
   8.145 +    reg1 = apic_read(APIC_LVR);
   8.146 +    Dprintk("Getting VERSION: %x\n", reg1);
   8.147 +
   8.148 +    /*
   8.149 +     * The two version reads above should print the same
   8.150 +     * numbers.  If the second one is different, then we
   8.151 +     * poke at a non-APIC.
   8.152 +     */
   8.153 +    if (reg1 != reg0)
   8.154 +        return 0;
   8.155 +
   8.156 +    /*
   8.157 +     * Check if the version looks reasonably.
   8.158 +     */
   8.159 +    reg1 = GET_APIC_VERSION(reg0);
   8.160 +    if (reg1 == 0x00 || reg1 == 0xff)
   8.161 +        return 0;
   8.162 +    reg1 = get_maxlvt();
   8.163 +    if (reg1 < 0x02 || reg1 == 0xff)
   8.164 +        return 0;
   8.165 +
   8.166 +    /*
   8.167 +     * The ID register is read/write in a real APIC.
   8.168 +     */
   8.169 +    reg0 = apic_read(APIC_ID);
   8.170 +    Dprintk("Getting ID: %x\n", reg0);
   8.171 +    apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
   8.172 +    reg1 = apic_read(APIC_ID);
   8.173 +    Dprintk("Getting ID: %x\n", reg1);
   8.174 +    apic_write(APIC_ID, reg0);
   8.175 +    if (reg1 != (reg0 ^ APIC_ID_MASK))
   8.176 +        return 0;
   8.177 +
   8.178 +    /*
   8.179 +     * The next two are just to see if we have sane values.
   8.180 +     * They're only really relevant if we're in Virtual Wire
   8.181 +     * compatibility mode, but most boxes are anymore.
   8.182 +     */
   8.183 +    reg0 = apic_read(APIC_LVT0);
   8.184 +    Dprintk("Getting LVT0: %x\n", reg0);
   8.185 +    reg1 = apic_read(APIC_LVT1);
   8.186 +    Dprintk("Getting LVT1: %x\n", reg1);
   8.187 +
   8.188 +    return 1;
   8.189 +}
   8.190 +
   8.191 +void __init sync_Arb_IDs(void)
   8.192 +{
   8.193 +    /*
   8.194 +     * Wait for idle.
   8.195 +	 */
   8.196 +    apic_wait_icr_idle();
   8.197 +
   8.198 +    Dprintk("Synchronizing Arb IDs.\n");
   8.199 +    apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
   8.200 +                      | APIC_DM_INIT);
   8.201 +}
   8.202 +
   8.203 +extern void __error_in_apic_c (void);
   8.204 +
   8.205 +/*
   8.206 + * An initial setup of the virtual wire mode.
   8.207 + */
   8.208 +void __init init_bsp_APIC(void)
   8.209 +{
   8.210 +    unsigned long value, ver;
   8.211 +
   8.212 +    /*
   8.213 +     * Don't do the setup now if we have a SMP BIOS as the
   8.214 +     * through-I/O-APIC virtual wire mode might be active.
   8.215 +     */
   8.216 +    if (smp_found_config || !cpu_has_apic)
   8.217 +        return;
   8.218 +
   8.219 +    value = apic_read(APIC_LVR);
   8.220 +    ver = GET_APIC_VERSION(value);
   8.221 +
   8.222 +    /*
   8.223 +     * Do not trust the local APIC being empty at bootup.
   8.224 +     */
   8.225 +    clear_local_APIC();
   8.226 +
   8.227 +    /*
   8.228 +     * Enable APIC.
   8.229 +     */
   8.230 +    value = apic_read(APIC_SPIV);
   8.231 +    value &= ~APIC_VECTOR_MASK;
   8.232 +    value |= APIC_SPIV_APIC_ENABLED;
   8.233 +    value |= APIC_SPIV_FOCUS_DISABLED;
   8.234 +    value |= SPURIOUS_APIC_VECTOR;
   8.235 +    apic_write_around(APIC_SPIV, value);
   8.236 +
   8.237 +    /*
   8.238 +     * Set up the virtual wire mode.
   8.239 +     */
   8.240 +    apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
   8.241 +    value = APIC_DM_NMI;
   8.242 +    if (!APIC_INTEGRATED(ver))		/* 82489DX */
   8.243 +        value |= APIC_LVT_LEVEL_TRIGGER;
   8.244 +    apic_write_around(APIC_LVT1, value);
   8.245 +}
   8.246 +
   8.247 +void __init setup_local_APIC (void)
   8.248 +{
   8.249 +    unsigned long value, ver, maxlvt;
   8.250 +
   8.251 +    value = apic_read(APIC_LVR);
   8.252 +    ver = GET_APIC_VERSION(value);
   8.253 +
   8.254 +    if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
   8.255 +        __error_in_apic_c();
   8.256 +
   8.257 +    /* Double-check wether this APIC is really registered. */
   8.258 +    if (!test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
   8.259 +        BUG();
   8.260 +
   8.261 +    /*
   8.262 +     * Intel recommends to set DFR, LDR and TPR before enabling
   8.263 +     * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
   8.264 +     * document number 292116).  So here it goes...
   8.265 +     */
   8.266 +
   8.267 +    /*
   8.268 +     * In clustered apic mode, the firmware does this for us 
   8.269 +     * Put the APIC into flat delivery mode.
   8.270 +     * Must be "all ones" explicitly for 82489DX.
   8.271 +     */
   8.272 +    apic_write_around(APIC_DFR, 0xffffffff);
   8.273 +
   8.274 +    /*
   8.275 +     * Set up the logical destination ID.
   8.276 +     */
   8.277 +    value = apic_read(APIC_LDR);
   8.278 +    value &= ~APIC_LDR_MASK;
   8.279 +    value |= (1<<(smp_processor_id()+24));
   8.280 +    apic_write_around(APIC_LDR, value);
   8.281 +
   8.282 +    /*
   8.283 +     * Set Task Priority to 'accept all'. We never change this
   8.284 +     * later on.
   8.285 +     */
   8.286 +    value = apic_read(APIC_TASKPRI);
   8.287 +    value &= ~APIC_TPRI_MASK;
   8.288 +    apic_write_around(APIC_TASKPRI, value);
   8.289 +
   8.290 +    /*
   8.291 +     * Now that we are all set up, enable the APIC
   8.292 +     */
   8.293 +    value = apic_read(APIC_SPIV);
   8.294 +    value &= ~APIC_VECTOR_MASK;
   8.295 +    /*
   8.296 +     * Enable APIC
   8.297 +     */
   8.298 +    value |= APIC_SPIV_APIC_ENABLED;
   8.299 +
   8.300 +    /* Enable focus processor (bit==0) */
   8.301 +    value &= ~APIC_SPIV_FOCUS_DISABLED;
   8.302 +
   8.303 +    /* Set spurious IRQ vector */
   8.304 +    value |= SPURIOUS_APIC_VECTOR;
   8.305 +    apic_write_around(APIC_SPIV, value);
   8.306 +
   8.307 +    /*
   8.308 +     * Set up LVT0, LVT1:
   8.309 +     *
   8.310 +     * set up through-local-APIC on the BP's LINT0. This is not
   8.311 +     * strictly necessery in pure symmetric-IO mode, but sometimes
   8.312 +     * we delegate interrupts to the 8259A.
   8.313 +     */
   8.314 +    /*
   8.315 +     * TODO: set up through-local-APIC from through-I/O-APIC? --macro
   8.316 +     */
   8.317 +    value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
   8.318 +    if (!smp_processor_id()) { 
   8.319 +/* && (pic_mode || !value)) { */
   8.320 +        value = APIC_DM_EXTINT;
   8.321 +        printk("enabled ExtINT on CPU#%d\n", smp_processor_id());
   8.322 +    } else {
   8.323 +        value = APIC_DM_EXTINT | APIC_LVT_MASKED;
   8.324 +        printk("masked ExtINT on CPU#%d\n", smp_processor_id());
   8.325 +    }
   8.326 +    apic_write_around(APIC_LVT0, value);
   8.327 +
   8.328 +    /*
   8.329 +     * only the BP should see the LINT1 NMI signal, obviously.
   8.330 +     */
   8.331 +    if (!smp_processor_id())
   8.332 +        value = APIC_DM_NMI;
   8.333 +    else
   8.334 +        value = APIC_DM_NMI | APIC_LVT_MASKED;
   8.335 +    if (!APIC_INTEGRATED(ver))		/* 82489DX */
   8.336 +        value |= APIC_LVT_LEVEL_TRIGGER;
   8.337 +    apic_write_around(APIC_LVT1, value);
   8.338 +
   8.339 +    if (APIC_INTEGRATED(ver)) {		/* !82489DX */
   8.340 +        maxlvt = get_maxlvt();
   8.341 +        if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
   8.342 +            apic_write(APIC_ESR, 0);
   8.343 +        value = apic_read(APIC_ESR);
   8.344 +        printk("ESR value before enabling vector: %08lx\n", value);
   8.345 +
   8.346 +        value = ERROR_APIC_VECTOR;      // enables sending errors
   8.347 +        apic_write_around(APIC_LVTERR, value);
   8.348 +        /*
   8.349 +         * spec says clear errors after enabling vector.
   8.350 +         */
   8.351 +        if (maxlvt > 3)
   8.352 +            apic_write(APIC_ESR, 0);
   8.353 +        value = apic_read(APIC_ESR);
   8.354 +        printk("ESR value after enabling vector: %08lx\n", value);
   8.355 +    } else {
   8.356 +        printk("No ESR for 82489DX.\n");
   8.357 +    }
   8.358 +}
   8.359 +
   8.360 +
   8.361 +static inline void apic_pm_init1(void) { }
   8.362 +static inline void apic_pm_init2(void) { }
   8.363 +
   8.364 +
   8.365 +/*
   8.366 + * Detect and enable local APICs on non-SMP boards.
   8.367 + * Original code written by Keir Fraser.
   8.368 + */
   8.369 +
   8.370 +static int __init detect_init_APIC (void)
   8.371 +{
   8.372 +    u32 h, l, features;
   8.373 +    extern void get_cpu_vendor(struct cpuinfo_x86*);
   8.374 +
   8.375 +    /* Workaround for us being called before identify_cpu(). */
   8.376 +    get_cpu_vendor(&boot_cpu_data);
   8.377 +
   8.378 +    switch (boot_cpu_data.x86_vendor) {
   8.379 +    case X86_VENDOR_AMD:
   8.380 +        if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
   8.381 +            break;
   8.382 +        goto no_apic;
   8.383 +    case X86_VENDOR_INTEL:
   8.384 +        if (boot_cpu_data.x86 == 6 ||
   8.385 +            (boot_cpu_data.x86 == 15 && cpu_has_apic) ||
   8.386 +            (boot_cpu_data.x86 == 5 && cpu_has_apic))
   8.387 +            break;
   8.388 +        goto no_apic;
   8.389 +    default:
   8.390 +        goto no_apic;
   8.391 +    }
   8.392 +
   8.393 +    if (!cpu_has_apic) {
   8.394 +        /*
   8.395 +         * Some BIOSes disable the local APIC in the
   8.396 +         * APIC_BASE MSR. This can only be done in
   8.397 +         * software for Intel P6 and AMD K7 (Model > 1).
   8.398 +         */
   8.399 +        rdmsr(MSR_IA32_APICBASE, l, h);
   8.400 +        if (!(l & MSR_IA32_APICBASE_ENABLE)) {
   8.401 +            printk("Local APIC disabled by BIOS -- reenabling.\n");
   8.402 +            l &= ~MSR_IA32_APICBASE_BASE;
   8.403 +            l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
   8.404 +            wrmsr(MSR_IA32_APICBASE, l, h);
   8.405 +        }
   8.406 +    }
   8.407 +    /*
   8.408 +     * The APIC feature bit should now be enabled
   8.409 +     * in `cpuid'
   8.410 +     */
   8.411 +    features = cpuid_edx(1);
   8.412 +    if (!(features & (1 << X86_FEATURE_APIC))) {
   8.413 +        printk("Could not enable APIC!\n");
   8.414 +        return -1;
   8.415 +    }
   8.416 +
   8.417 +    set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
   8.418 +    mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
   8.419 +    boot_cpu_physical_apicid = 0;
   8.420 +
   8.421 +    printk("Found and enabled local APIC!\n");
   8.422 +
   8.423 +    apic_pm_init1();
   8.424 +
   8.425 +    return 0;
   8.426 +
   8.427 + no_apic:
   8.428 +    printk("No local APIC present or hardware disabled\n");
   8.429 +    return -1;
   8.430 +}
   8.431 +
   8.432 +void __init init_apic_mappings(void)
   8.433 +{
   8.434 +    unsigned long apic_phys = 0;
   8.435 +
   8.436 +    /*
   8.437 +     * If no local APIC can be found then set up a fake all zeroes page to 
   8.438 +     * simulate the local APIC and another one for the IO-APIC.
   8.439 +     */
   8.440 +    if (!smp_found_config && detect_init_APIC()) {
   8.441 +        apic_phys = get_free_page(GFP_KERNEL);
   8.442 +        apic_phys = __pa(apic_phys);
   8.443 +    } else
   8.444 +        apic_phys = mp_lapic_addr;
   8.445 +
   8.446 +    set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
   8.447 +    Dprintk("mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys);
   8.448 +
   8.449 +    /*
   8.450 +     * Fetch the APIC ID of the BSP in case we have a
   8.451 +     * default configuration (or the MP table is broken).
   8.452 +     */
   8.453 +    if (boot_cpu_physical_apicid == -1U)
   8.454 +        boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
   8.455 +
   8.456 +#ifdef CONFIG_X86_IO_APIC
   8.457 +    {
   8.458 +        unsigned long ioapic_phys = 0, idx = FIX_IO_APIC_BASE_0;
   8.459 +        int i;
   8.460 +
   8.461 +        for (i = 0; i < nr_ioapics; i++) {
   8.462 +            if (smp_found_config)
   8.463 +                ioapic_phys = mp_ioapics[i].mpc_apicaddr;
   8.464 +            set_fixmap_nocache(idx, ioapic_phys);
   8.465 +            Dprintk("mapped IOAPIC to %08lx (%08lx)\n",
   8.466 +                    __fix_to_virt(idx), ioapic_phys);
   8.467 +            idx++;
   8.468 +        }
   8.469 +    }
   8.470 +#endif
   8.471 +}
   8.472 +
   8.473 +/*
   8.474 + * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts
   8.475 + * per second. We assume that the caller has already set up the local
   8.476 + * APIC.
   8.477 + *
   8.478 + * The APIC timer is not exactly sync with the external timer chip, it
   8.479 + * closely follows bus clocks.
   8.480 + */
   8.481 +
   8.482 +/*
   8.483 + * The timer chip is already set up at HZ interrupts per second here,
   8.484 + * but we do not accept timer interrupts yet. We only allow the BP
   8.485 + * to calibrate.
   8.486 + */
   8.487 +static unsigned int __init get_8254_timer_count(void)
   8.488 +{
   8.489 +    /*extern spinlock_t i8253_lock;*/
   8.490 +    /*unsigned long flags;*/
   8.491 +
   8.492 +    unsigned int count;
   8.493 +
   8.494 +    /*spin_lock_irqsave(&i8253_lock, flags);*/
   8.495 +
   8.496 +    outb_p(0x00, 0x43);
   8.497 +    count = inb_p(0x40);
   8.498 +    count |= inb_p(0x40) << 8;
   8.499 +
   8.500 +    /*spin_unlock_irqrestore(&i8253_lock, flags);*/
   8.501 +
   8.502 +    return count;
   8.503 +}
   8.504 +
   8.505 +void __init wait_8254_wraparound(void)
   8.506 +{
   8.507 +    unsigned int curr_count, prev_count=~0;
   8.508 +    int delta;
   8.509 +
   8.510 +    curr_count = get_8254_timer_count();
   8.511 +
   8.512 +    do {
   8.513 +        prev_count = curr_count;
   8.514 +        curr_count = get_8254_timer_count();
   8.515 +        delta = curr_count-prev_count;
   8.516 +
   8.517 +	/*
   8.518 +	 * This limit for delta seems arbitrary, but it isn't, it's
   8.519 +	 * slightly above the level of error a buggy Mercury/Neptune
   8.520 +	 * chipset timer can cause.
   8.521 +	 */
   8.522 +
   8.523 +    } while (delta < 300);
   8.524 +}
   8.525 +
   8.526 +/*
   8.527 + * This function sets up the local APIC timer, with a timeout of
   8.528 + * 'clocks' APIC bus clock. During calibration we actually call
   8.529 + * this function twice on the boot CPU, once with a bogus timeout
   8.530 + * value, second time for real. The other (noncalibrating) CPUs
   8.531 + * call this function only once, with the real, calibrated value.
   8.532 + *
   8.533 + * We do reads before writes even if unnecessary, to get around the
   8.534 + * P5 APIC double write bug.
   8.535 + */
   8.536 +
   8.537 +#define APIC_DIVISOR 16
   8.538 +
   8.539 +void __setup_APIC_LVTT(unsigned int clocks)
   8.540 +{
   8.541 +    unsigned int lvtt1_value, tmp_value;
   8.542 +
   8.543 +    lvtt1_value = SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV) |
   8.544 +        APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
   8.545 +    apic_write_around(APIC_LVTT, lvtt1_value);
   8.546 +
   8.547 +    /*
   8.548 +     * Divide PICLK by 16
   8.549 +     */
   8.550 +    tmp_value = apic_read(APIC_TDCR);
   8.551 +    apic_write_around(APIC_TDCR, (tmp_value
   8.552 +                                  & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
   8.553 +                      | APIC_TDR_DIV_16);
   8.554 +
   8.555 +    apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
   8.556 +}
   8.557 +
   8.558 +void setup_APIC_timer(void * data)
   8.559 +{
   8.560 +    unsigned int clocks = (unsigned int) data, slice, t0, t1;
   8.561 +    unsigned long flags;
   8.562 +    int delta;
   8.563 +
   8.564 +    __save_flags(flags);
   8.565 +    __sti();
   8.566 +    /*
   8.567 +     * ok, Intel has some smart code in their APIC that knows
   8.568 +     * if a CPU was in 'hlt' lowpower mode, and this increases
   8.569 +     * its APIC arbitration priority. To avoid the external timer
   8.570 +     * IRQ APIC event being in synchron with the APIC clock we
   8.571 +     * introduce an interrupt skew to spread out timer events.
   8.572 +     *
   8.573 +     * The number of slices within a 'big' timeslice is smp_num_cpus+1
   8.574 +     */
   8.575 +
   8.576 +    slice = clocks / (smp_num_cpus+1);
   8.577 +    printk("cpu: %d, clocks: %d, slice: %d\n",
   8.578 +           smp_processor_id(), clocks, slice);
   8.579 +
   8.580 +    /*
   8.581 +     * Wait for IRQ0's slice:
   8.582 +     */
   8.583 +    wait_8254_wraparound();
   8.584 +
   8.585 +    __setup_APIC_LVTT(clocks);
   8.586 +
   8.587 +    t0 = apic_read(APIC_TMICT)*APIC_DIVISOR;
   8.588 +    /* Wait till TMCCT gets reloaded from TMICT... */
   8.589 +    do {
   8.590 +        t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
   8.591 +        delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
   8.592 +    } while (delta >= 0);
   8.593 +    /* Now wait for our slice for real. */
   8.594 +    do {
   8.595 +        t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
   8.596 +        delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
   8.597 +    } while (delta < 0);
   8.598 +
   8.599 +    __setup_APIC_LVTT(clocks);
   8.600 +
   8.601 +    printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n",
   8.602 +           smp_processor_id(), t0, t1, delta, slice, clocks);
   8.603 +
   8.604 +    __restore_flags(flags);
   8.605 +}
   8.606 +
   8.607 +/*
   8.608 + * In this function we calibrate APIC bus clocks to the external timer.
   8.609 + *
   8.610 + * We want to do the calibration only once since we
   8.611 + * want to have local timer irqs syncron. CPUs connected
   8.612 + * by the same APIC bus have the very same bus frequency.
   8.613 + * And we want to have irqs off anyways, no accidental
   8.614 + * APIC irq that way.
   8.615 + */
   8.616 +
   8.617 +int __init calibrate_APIC_clock(void)
   8.618 +{
   8.619 +    unsigned long long t1 = 0, t2 = 0;
   8.620 +    long tt1, tt2;
   8.621 +    long result;
   8.622 +    int i;
   8.623 +    const int LOOPS = HZ/10;
   8.624 +
   8.625 +    printk("calibrating APIC timer ...\n");
   8.626 +
   8.627 +    /*
   8.628 +     * Put whatever arbitrary (but long enough) timeout
   8.629 +     * value into the APIC clock, we just want to get the
   8.630 +     * counter running for calibration.
   8.631 +     */
   8.632 +    __setup_APIC_LVTT(1000000000);
   8.633 +
   8.634 +    /*
   8.635 +     * The timer chip counts down to zero. Let's wait
   8.636 +     * for a wraparound to start exact measurement:
   8.637 +     * (the current tick might have been already half done)
   8.638 +     */
   8.639 +
   8.640 +    wait_8254_wraparound();
   8.641 +
   8.642 +    /*
   8.643 +     * We wrapped around just now. Let's start:
   8.644 +     */
   8.645 +    rdtscll(t1);
   8.646 +    tt1 = apic_read(APIC_TMCCT);
   8.647 +
   8.648 +    /*
   8.649 +     * Let's wait LOOPS wraprounds:
   8.650 +     */
   8.651 +    for (i = 0; i < LOOPS; i++)
   8.652 +        wait_8254_wraparound();
   8.653 +
   8.654 +    tt2 = apic_read(APIC_TMCCT);
   8.655 +    rdtscll(t2);
   8.656 +
   8.657 +    /*
   8.658 +     * The APIC bus clock counter is 32 bits only, it
   8.659 +     * might have overflown, but note that we use signed
   8.660 +     * longs, thus no extra care needed.
   8.661 +     *
   8.662 +     * underflown to be exact, as the timer counts down ;)
   8.663 +     */
   8.664 +
   8.665 +    result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
   8.666 +
   8.667 +    printk("..... CPU clock speed is %ld.%04ld MHz.\n",
   8.668 +           ((long)(t2-t1)/LOOPS)/(1000000/HZ),
   8.669 +           ((long)(t2-t1)/LOOPS)%(1000000/HZ));
   8.670 +
   8.671 +    printk("..... host bus clock speed is %ld.%04ld MHz.\n",
   8.672 +           result/(1000000/HZ),
   8.673 +           result%(1000000/HZ));
   8.674 +
   8.675 +    return result;
   8.676 +}
   8.677 +
   8.678 +static unsigned int calibration_result;
   8.679 +
   8.680 +void __init setup_APIC_clocks (void)
   8.681 +{
   8.682 +    printk("Using local APIC timer interrupts.\n");
   8.683 +    using_apic_timer = 1;
   8.684 +
   8.685 +    __cli();
   8.686 +
   8.687 +    calibration_result = calibrate_APIC_clock();
   8.688 +    /*
   8.689 +     * Now set up the timer for real.
   8.690 +     */
   8.691 +    setup_APIC_timer((void *)calibration_result);
   8.692 +
   8.693 +    __sti();
   8.694 +
   8.695 +    /* and update all other cpus */
   8.696 +    smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1);
   8.697 +}
   8.698 +
   8.699 +#undef APIC_DIVISOR
   8.700 +
   8.701 +/*
   8.702 + * Local timer interrupt handler. It does both profiling and
   8.703 + * process statistics/rescheduling.
   8.704 + *
   8.705 + * We do profiling in every local tick, statistics/rescheduling
   8.706 + * happen only every 'profiling multiplier' ticks. The default
   8.707 + * multiplier is 1 and it can be changed by writing the new multiplier
   8.708 + * value into /proc/profile.
   8.709 + */
   8.710 +
   8.711 +inline void smp_local_timer_interrupt(struct pt_regs * regs)
   8.712 +{
   8.713 +    update_process_times(user_mode(regs));
   8.714 +}
   8.715 +
   8.716 +/*
   8.717 + * Local APIC timer interrupt. This is the most natural way for doing
   8.718 + * local interrupts, but local timer interrupts can be emulated by
   8.719 + * broadcast interrupts too. [in case the hw doesnt support APIC timers]
   8.720 + *
   8.721 + * [ if a single-CPU system runs an SMP kernel then we call the local
   8.722 + *   interrupt as well. Thus we cannot inline the local irq ... ]
   8.723 + */
   8.724 +unsigned int apic_timer_irqs [NR_CPUS];
   8.725 +
   8.726 +void smp_apic_timer_interrupt(struct pt_regs * regs)
   8.727 +{
   8.728 +    int cpu = smp_processor_id();
   8.729 +
   8.730 +    /*
   8.731 +     * the NMI deadlock-detector uses this.
   8.732 +     */
   8.733 +    apic_timer_irqs[cpu]++;
   8.734 +
   8.735 +    /*
   8.736 +     * NOTE! We'd better ACK the irq immediately,
   8.737 +     * because timer handling can be slow.
   8.738 +     */
   8.739 +    ack_APIC_irq();
   8.740 +    /*
   8.741 +     * update_process_times() expects us to have done irq_enter().
   8.742 +     * Besides, if we don't timer interrupts ignore the global
   8.743 +     * interrupt lock, which is the WrongThing (tm) to do.
   8.744 +     */
   8.745 +    irq_enter(cpu, 0);
   8.746 +    smp_local_timer_interrupt(regs);
   8.747 +    irq_exit(cpu, 0);
   8.748 +
   8.749 +    if (softirq_pending(cpu))
   8.750 +        do_softirq();
   8.751 +}
   8.752 +
   8.753 +/*
   8.754 + * This interrupt should _never_ happen with our APIC/SMP architecture
   8.755 + */
   8.756 +asmlinkage void smp_spurious_interrupt(void)
   8.757 +{
   8.758 +    unsigned long v;
   8.759 +
   8.760 +    /*
   8.761 +     * Check if this really is a spurious interrupt and ACK it
   8.762 +     * if it is a vectored one.  Just in case...
   8.763 +     * Spurious interrupts should not be ACKed.
   8.764 +     */
   8.765 +    v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
   8.766 +    if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
   8.767 +        ack_APIC_irq();
   8.768 +
   8.769 +    /* see sw-dev-man vol 3, chapter 7.4.13.5 */
   8.770 +    printk("spurious APIC interrupt on CPU#%d, should never happen.\n",
   8.771 +           smp_processor_id());
   8.772 +}
   8.773 +
   8.774 +/*
   8.775 + * This interrupt should never happen with our APIC/SMP architecture
   8.776 + */
   8.777 +
   8.778 +asmlinkage void smp_error_interrupt(void)
   8.779 +{
   8.780 +    unsigned long v, v1;
   8.781 +
   8.782 +    /* First tickle the hardware, only then report what went on. -- REW */
   8.783 +    v = apic_read(APIC_ESR);
   8.784 +    apic_write(APIC_ESR, 0);
   8.785 +    v1 = apic_read(APIC_ESR);
   8.786 +    ack_APIC_irq();
   8.787 +    atomic_inc(&irq_err_count);
   8.788 +
   8.789 +    /* Here is what the APIC error bits mean:
   8.790 +       0: Send CS error
   8.791 +       1: Receive CS error
   8.792 +       2: Send accept error
   8.793 +       3: Receive accept error
   8.794 +       4: Reserved
   8.795 +       5: Send illegal vector
   8.796 +       6: Received illegal vector
   8.797 +       7: Illegal register address
   8.798 +    */
   8.799 +    printk ("APIC error on CPU%d: %02lx(%02lx)\n",
   8.800 +            smp_processor_id(), v , v1);
   8.801 +}
   8.802 +
   8.803 +/*
   8.804 + * This initializes the IO-APIC and APIC hardware if this is
   8.805 + * a UP kernel.
   8.806 + */
   8.807 +int __init APIC_init_uniprocessor (void)
   8.808 +{
   8.809 +    if (!smp_found_config && !cpu_has_apic)
   8.810 +        return -1;
   8.811 +
   8.812 +    /*
   8.813 +     * Complain if the BIOS pretends there is one.
   8.814 +     */
   8.815 +    if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
   8.816 +        printk("BIOS bug, local APIC #%d not detected!...\n",
   8.817 +               boot_cpu_physical_apicid);
   8.818 +        return -1;
   8.819 +    }
   8.820 +
   8.821 +    verify_local_APIC();
   8.822 +
   8.823 +    connect_bsp_APIC();
   8.824 +
   8.825 +    phys_cpu_present_map = 1;
   8.826 +    apic_write_around(APIC_ID, boot_cpu_physical_apicid);
   8.827 +
   8.828 +    apic_pm_init2();
   8.829 +
   8.830 +    setup_local_APIC();
   8.831 +
   8.832 +#ifdef CONFIG_X86_IO_APIC
   8.833 +    if (smp_found_config && nr_ioapics)
   8.834 +        setup_IO_APIC();
   8.835 +#endif
   8.836 +    setup_APIC_clocks();
   8.837 +
   8.838 +    return 0;
   8.839 +}
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen-2.4.16/arch/i386/boot/boot.S	Wed Nov 20 12:02:17 2002 +0000
     9.3 @@ -0,0 +1,241 @@
     9.4 +#include <xeno/config.h>
     9.5 +#include <asm/page.h>
     9.6 +
     9.7 +#define  SECONDARY_CPU_FLAG 0xA5A5A5A5
     9.8 +                
     9.9 +       	.text
    9.10 +
    9.11 +ENTRY(start)
    9.12 +        jmp hal_entry
    9.13 +
    9.14 +        .align	4
    9.15 +
    9.16 +/*** MULTIBOOT HEADER ****/
    9.17 +        /* Magic number indicating a Multiboot header. */
    9.18 +	.long	0x1BADB002
    9.19 +	/* Flags to bootloader (see Multiboot spec). */
    9.20 +	.long	0x00000006
    9.21 +	/* Checksum: must be the negated sum of the first two fields. */
    9.22 +	.long	-0x1BADB008
    9.23 +        /* Unused loader addresses (ELF header has all this already).*/
    9.24 +        .long   0,0,0,0,0
    9.25 +        /* EGA text mode. */
    9.26 +        .long   1,0,0,0
    9.27 +        
    9.28 +hal_entry:
    9.29 +        /* Set up a few descriptors: on entry only CS is guaranteed good. */
    9.30 +        lgdt    %cs:nopaging_gdt_descr-__PAGE_OFFSET
    9.31 +        mov     $(__HYPERVISOR_DS),%ecx
    9.32 +        mov     %ecx,%ds
    9.33 +        mov     %ecx,%es
    9.34 +        ljmp    $(__HYPERVISOR_CS),$(1f)-__PAGE_OFFSET
    9.35 +1:      lss     stack_start-__PAGE_OFFSET,%esp
    9.36 +
    9.37 +        /* Reset EFLAGS (subsumes CLI and CLD). */
    9.38 +	pushl	$0
    9.39 +	popf
    9.40 +
    9.41 +        /* CPU type checks. We need P6+. */
    9.42 +        mov     $0x200000,%edx
    9.43 +        pushfl
    9.44 +        pop     %ecx
    9.45 +        and     %edx,%ecx
    9.46 +        jne     bad_cpu            # ID bit should be clear
    9.47 +        pushl   %edx
    9.48 +        popfl
    9.49 +        pushfl
    9.50 +        pop     %ecx
    9.51 +        and     %edx,%ecx
    9.52 +        je      bad_cpu            # ID bit should be set
    9.53 +
    9.54 +        /* Set up CR0. */
    9.55 +        mov     %cr0,%ecx
    9.56 +        and     $0x00000011,%ecx   # save ET and PE
    9.57 +        or      $0x00050022,%ecx   # set AM, WP, NE and MP
    9.58 +        mov     %ecx,%cr0
    9.59 +
    9.60 +        /* Set up FPU. */
    9.61 +        fninit
    9.62 +        
    9.63 +        /* Set up CR4, except global flag which Intel requires should be     */
    9.64 +        /* left until after paging is enabled (IA32 Manual Vol. 3, Sec. 2.5) */
    9.65 +        mov     %cr4,%ecx
    9.66 +        or      mmu_cr4_features-__PAGE_OFFSET,%ecx
    9.67 +        mov     %ecx,mmu_cr4_features-__PAGE_OFFSET
    9.68 +        and     $0x7f,%ecx /* disable GLOBAL bit */
    9.69 +        mov     %ecx,%cr4
    9.70 +                
    9.71 +        /* Is this a non-boot processor? */
    9.72 +        cmp     $(SECONDARY_CPU_FLAG),%ebx
    9.73 +        jne     continue_boot_cpu
    9.74 +        
    9.75 +        call    start_paging
    9.76 +        lidt    idt_descr                        
    9.77 +        jmp     initialize_secondary
    9.78 +        
    9.79 +continue_boot_cpu:
    9.80 +        add     $__PAGE_OFFSET,%ebx
    9.81 +	push 	%ebx /* Multiboot info struct */
    9.82 +	push 	%eax /* Multiboot magic value */
    9.83 +
    9.84 +        /* Initialize BSS (no nasty surprises!) */
    9.85 +        mov     $__bss_start-__PAGE_OFFSET,%edi
    9.86 +        mov     $_end-__PAGE_OFFSET,%ecx
    9.87 +        sub     %edi,%ecx
    9.88 +        xor     %eax,%eax
    9.89 +        rep     stosb
    9.90 +
    9.91 +        /* Initialize low and high mappings of all memory with 4MB pages */
    9.92 +        mov     $idle0_pg_table-__PAGE_OFFSET,%edi
    9.93 +        mov     $0x1e3,%eax                  /* PRESENT+RW+A+D+4MB+GLOBAL */
    9.94 +1:      mov     %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */
    9.95 +        stosl                                /* low mapping */
    9.96 +        add     $(1<<L2_PAGETABLE_SHIFT),%eax
    9.97 +        cmp     $MAX_USABLE_ADDRESS+0x1e3,%eax
    9.98 +        jne     1b
    9.99 +
   9.100 +        call    start_paging        
   9.101 +        call    setup_idt
   9.102 +        lidt    idt_descr
   9.103 +                
   9.104 +        /* Call into main C routine. This should never return.*/
   9.105 +       	call	cmain
   9.106 +        ud2     /* Force a panic (invalid opcode). */
   9.107 +
   9.108 +start_paging:
   9.109 +        mov     $idle0_pg_table-__PAGE_OFFSET,%eax
   9.110 +        mov     %eax,%cr3
   9.111 +        mov     %cr0,%eax
   9.112 +        or      $0x80010000,%eax /* set PG and WP bits */
   9.113 +        mov     %eax,%cr0
   9.114 +        jmp     1f
   9.115 +1:      /* Install relocated selectors (FS/GS unused). */
   9.116 +        lgdt    gdt_descr
   9.117 +        mov     $(__HYPERVISOR_DS),%ecx
   9.118 +        mov     %ecx,%ds
   9.119 +        mov     %ecx,%es
   9.120 +        mov     %ecx,%ss
   9.121 +        ljmp    $(__HYPERVISOR_CS),$1f
   9.122 +1:      /* Paging enabled, so we can now enable GLOBAL mappings in CR4. */
   9.123 +        movl    mmu_cr4_features,%ecx
   9.124 +        movl    %ecx,%cr4
   9.125 +        /* Relocate ESP */
   9.126 +        add     $__PAGE_OFFSET,%esp
   9.127 +        /* Relocate EIP via return jump */
   9.128 +        pop     %ecx
   9.129 +        add     $__PAGE_OFFSET,%ecx
   9.130 +        jmp     *%ecx
   9.131 +    
   9.132 +            
   9.133 +/*** INTERRUPT INITIALISATION ***/
   9.134 +        
   9.135 +setup_idt:
   9.136 +        lea     ignore_int,%edx
   9.137 +        mov     $(__HYPERVISOR_CS << 16),%eax
   9.138 +        mov     %dx,%ax            /* selector = 0x0010 = cs */
   9.139 +        mov     $0x8E00,%dx        /* interrupt gate - dpl=0, present */
   9.140 +
   9.141 +        lea     SYMBOL_NAME(idt_table),%edi
   9.142 +        mov     $256,%ecx
   9.143 +1:      mov     %eax,(%edi)
   9.144 +        mov     %edx,4(%edi)
   9.145 +        add     $8,%edi
   9.146 +        loop    1b
   9.147 +        ret
   9.148 +
   9.149 +/* This is the default interrupt handler. */
   9.150 +int_msg:
   9.151 +        .asciz "Unknown interrupt\n"
   9.152 +        ALIGN
   9.153 +ignore_int:
   9.154 +        cld
   9.155 +        push    %eax
   9.156 +        push    %ecx
   9.157 +        push    %edx
   9.158 +        pushl   %es
   9.159 +        pushl   %ds
   9.160 +        mov     $(__HYPERVISOR_DS),%eax
   9.161 +        mov     %eax,%ds
   9.162 +        mov     %eax,%es
   9.163 +        pushl   $int_msg
   9.164 +        call    SYMBOL_NAME(printf)
   9.165 +1:      jmp     1b
   9.166 +        pop     %eax
   9.167 +        popl    %ds
   9.168 +        popl    %es
   9.169 +        pop     %edx
   9.170 +        pop     %ecx
   9.171 +        pop     %eax
   9.172 +        iret
   9.173 +
   9.174 +
   9.175 +bad_cpu_msg:
   9.176 +        .asciz  "Bad CPU type. Need P6+."
   9.177 +        ALIGN
   9.178 +bad_cpu: 
   9.179 +        call    init_serial
   9.180 +        mov     $bad_cpu_msg,%esi
   9.181 +1:      lodsb
   9.182 +        test    %al,%al
   9.183 +        je      1f
   9.184 +        push    %eax
   9.185 +        call    putchar_serial
   9.186 +        add     $4,%esp
   9.187 +        jmp     1b
   9.188 +1:      jmp     1b
   9.189 +                   
   9.190 +        
   9.191 +/*** STACK LOCATION ***/
   9.192 +        
   9.193 +ENTRY(stack_start)
   9.194 +        .long SYMBOL_NAME(idle0_task_union)+8192-__PAGE_OFFSET
   9.195 +        .long __HYPERVISOR_DS
   9.196 +        
   9.197 +/*** DESCRIPTOR TABLES ***/
   9.198 +
   9.199 +.globl SYMBOL_NAME(idt)
   9.200 +.globl SYMBOL_NAME(gdt)        
   9.201 +
   9.202 +        ALIGN
   9.203 +        
   9.204 +        .word   0    
   9.205 +idt_descr:
   9.206 +	.word	256*8-1
   9.207 +SYMBOL_NAME(idt):
   9.208 +        .long	SYMBOL_NAME(idt_table)
   9.209 +
   9.210 +        .word   0
   9.211 +gdt_descr:
   9.212 +	.word	256*8-1
   9.213 +SYMBOL_NAME(gdt):       
   9.214 +        .long   SYMBOL_NAME(gdt_table)	/* gdt base */
   9.215 +
   9.216 +        .word   0
   9.217 +nopaging_gdt_descr:
   9.218 +        .word   256*8-1
   9.219 +        .long   SYMBOL_NAME(gdt_table)-__PAGE_OFFSET
   9.220 +        
   9.221 +        ALIGN
   9.222 +ENTRY(gdt_table)
   9.223 +        .quad 0x0000000000000000     /* NULL descriptor */
   9.224 +        .quad 0x0000000000000000     /* not used */
   9.225 +        .quad 0x00ceba000000ffff     /* 0x11 ring 1 3.5GB code at 0x00000000 */
   9.226 +        .quad 0x00ceb2000000ffff     /* 0x19 ring 1 3.5GB data at 0x00000000 */
   9.227 +        .quad 0x00cefa000000ffff     /* 0x23 ring 3 3.5GB code at 0x00000000 */
   9.228 +        .quad 0x00cef2000000ffff     /* 0x2b ring 3 3.5GB data at 0x00000000 */
   9.229 +        .quad 0x00cf9a000000ffff     /* 0x30 ring 0 4.0GB code at 0x00000000 */
   9.230 +        .quad 0x00cf92000000ffff     /* 0x38 ring 0 4.0GB data at 0x00000000 */
   9.231 +        .quad 0x0000000000000000
   9.232 +        .quad 0x0000000000000000
   9.233 +        .quad 0x0000000000000000
   9.234 +        .quad 0x0000000000000000
   9.235 +        .fill NR_CPUS*4,8,0             /* space for TSS's and LDT's */
   9.236 +
   9.237 +# The following adds 12kB to the kernel file size.
   9.238 +        .org 0x1000
   9.239 +ENTRY(idle0_pg_table)
   9.240 +        .org 0x2000
   9.241 +ENTRY(idle0_task_union)
   9.242 +        .org 0x4000
   9.243 +ENTRY(stext)
   9.244 +ENTRY(_stext)
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen-2.4.16/arch/i386/delay.c	Wed Nov 20 12:02:17 2002 +0000
    10.3 @@ -0,0 +1,29 @@
    10.4 +/*
    10.5 + *	Precise Delay Loops for i386
    10.6 + *
    10.7 + *	Copyright (C) 1993 Linus Torvalds
    10.8 + *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
    10.9 + *
   10.10 + *	The __delay function must _NOT_ be inlined as its execution time
   10.11 + *	depends wildly on alignment on many x86 processors. The additional
   10.12 + *	jump magic is needed to get the timing stable on all the CPU's
   10.13 + *	we have to worry about.
   10.14 + */
   10.15 +
   10.16 +#include <xeno/config.h>
   10.17 +#include <xeno/delay.h>
   10.18 +#include <asm/msr.h>
   10.19 +#include <asm/processor.h>
   10.20 +
   10.21 +void __udelay(unsigned long usecs)
   10.22 +{
   10.23 +    unsigned long ticks = usecs * ticks_per_usec;
   10.24 +    unsigned long s, e;
   10.25 +
   10.26 +    rdtscl(s);
   10.27 +    do
   10.28 +    {
   10.29 +        rep_nop();
   10.30 +        rdtscl(e);
   10.31 +    } while ((e-s) < ticks);
   10.32 +}
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen-2.4.16/arch/i386/entry.S	Wed Nov 20 12:02:17 2002 +0000
    11.3 @@ -0,0 +1,526 @@
    11.4 +/*
    11.5 + *  linux/arch/i386/entry.S
    11.6 + *
    11.7 + *  Copyright (C) 1991, 1992  Linus Torvalds
    11.8 + */
    11.9 +
   11.10 +/*
   11.11 + * entry.S contains the system-call and fault low-level handling routines.
   11.12 + * This also contains the timer-interrupt handler, as well as all interrupts
   11.13 + * and faults that can result in a task-switch.
   11.14 + *
   11.15 + * Stack layout in 'ret_from_system_call':
   11.16 + *	 0(%esp) - %ebx
   11.17 + *	 4(%esp) - %ecx
   11.18 + *	 8(%esp) - %edx
   11.19 + *       C(%esp) - %esi
   11.20 + *	10(%esp) - %edi
   11.21 + *	14(%esp) - %ebp
   11.22 + *	18(%esp) - %eax
   11.23 + *	1C(%esp) - %ds
   11.24 + *	20(%esp) - %es
   11.25 + *	24(%esp) - orig_eax
   11.26 + *	28(%esp) - %eip
   11.27 + *	2C(%esp) - %cs
   11.28 + *	30(%esp) - %eflags
   11.29 + *	34(%esp) - %oldesp
   11.30 + *	38(%esp) - %oldss
   11.31 + *
   11.32 + * "current" is in register %ebx during any slow entries.
   11.33 + */
   11.34 +/* The idea for callbacks from monitor -> guest OS.
   11.35 + * 
   11.36 + * First, we require that all callbacks (either via a supplied
   11.37 + * interrupt-descriptor-table, or via the special event or failsafe callbacks
   11.38 + * in the shared-info-structure) are to ring 1. This just makes life easier,
   11.39 + * in that it means we don't have to do messy GDT/LDT lookups to find
   11.40 + * out which the privilege-level of the return code-selector. That code
   11.41 + * would just be a hassle to write, and would need to account for running
   11.42 + * off the end of the GDT/LDT, for example. The event callback has quite
   11.43 + * a constrained callback method: the guest OS provides a linear address
   11.44 + * which we call back to using the hard-coded __GUEST_CS descriptor (which
   11.45 + * is a ring 1 descriptor). For IDT callbacks, we check that the provided
   11.46 + * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
   11.47 + * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
   11.48 + * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
   11.49 + * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
   11.50 + * than the correct ring) and bad things are bound to ensue -- IRET is
   11.51 + * likely to fault, and we may end up killing the domain (no harm can
   11.52 + * come to the hypervisor itself, though).
   11.53 + *      
   11.54 + * When doing a callback, we check if the return CS is in ring 0. If so,
   11.55 + * callback is delayed until next return to ring != 0.
   11.56 + * If return CS is in ring 1, then we create a callback frame
   11.57 + * starting at return SS/ESP. The base of the frame does an intra-privilege
   11.58 + * interrupt-return.
   11.59 + * If return CS is in ring > 1, we create a callback frame starting
   11.60 + * at SS/ESP taken from appropriate section of the current TSS. The base
   11.61 + * of the frame does an inter-privilege interrupt-return.
   11.62 + * 
   11.63 + * Note that the "failsafe callback" uses a special stackframe:
   11.64 + * { return_DS, return_ES, return_EIP, return_CS, return_EFLAGS, ... }
   11.65 + * That is, original values for DS/ES are placed on stack rather than
   11.66 + * in DS/ES themselves. Why? It saves us loading them, only to have them
   11.67 + * saved/restored in guest OS. Furthermore, if we load them we may cause
   11.68 + * a fault if they are invalid, which is a hassle to deal with. We avoid
   11.69 + * that problem if we don't load them :-) This property allows us to use
   11.70 + * the failsafe callback as a fallback: if we ever fault on loading DS/ES
   11.71 + * on return to ring != 0, we can simply package it up as a return via
   11.72 + * the failsafe callback, and let the guest OS sort it out (perhaps by
   11.73 + * killing an application process). Note that we also do this for any
   11.74 + * faulting IRET -- just let the guest OS handle it via the event
   11.75 + * callback.
   11.76 + *
   11.77 + * We terminate a domain in the following cases:
   11.78 + *  - creating a callback stack frame (due to bad ring-1 stack).
   11.79 + *  - faulting IRET on entry to failsafe callback handler.
   11.80 + * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
   11.81 + * handler in good order (absolutely no faults allowed!).
   11.82 + */
   11.83 +
   11.84 +#include <xeno/config.h>
   11.85 +#include <asm/smp.h>
   11.86 +
   11.87 +EBX		= 0x00
   11.88 +ECX		= 0x04
   11.89 +EDX		= 0x08
   11.90 +ESI		= 0x0C
   11.91 +EDI		= 0x10
   11.92 +EBP		= 0x14
   11.93 +EAX		= 0x18
   11.94 +DS		= 0x1C
   11.95 +ES		= 0x20
   11.96 +ORIG_EAX	= 0x24
   11.97 +EIP		= 0x28
   11.98 +CS		= 0x2C
   11.99 +EFLAGS		= 0x30
  11.100 +OLDESP		= 0x34
  11.101 +OLDSS		= 0x38
  11.102 +
  11.103 +/* Offsets in task_struct */
  11.104 +PROCESSOR       =  0
  11.105 +STATE           =  4
  11.106 +HYP_EVENTS      =  8
  11.107 +DOMAIN          = 12        
  11.108 +SHARED_INFO     = 16
  11.109 +
  11.110 +/* Offsets in shared_info_t */
  11.111 +EVENTS          =  0
  11.112 +EVENTS_ENABLE   =  4
  11.113 +EVENT_ADDR      =  8
  11.114 +FAILSAFE_ADDR   = 12
  11.115 +
  11.116 +/* Offsets in guest_trap_bounce */
  11.117 +GTB_ERROR_CODE  =  0
  11.118 +GTB_CR2         =  4
  11.119 +GTB_FLAGS       =  8
  11.120 +GTB_CS          = 10
  11.121 +GTB_EIP         = 12
  11.122 +GTBF_TRAP       =  1
  11.123 +GTBF_TRAP_NOCODE = 2
  11.124 +GTBF_TRAP_CR2   = 4
  11.125 +                        
  11.126 +CF_MASK		= 0x00000001
  11.127 +IF_MASK		= 0x00000200
  11.128 +NT_MASK		= 0x00004000
  11.129 +
  11.130 +#define SAVE_ALL \
  11.131 +	cld; \
  11.132 +	pushl %es; \
  11.133 +	pushl %ds; \
  11.134 +	pushl %eax; \
  11.135 +	pushl %ebp; \
  11.136 +	pushl %edi; \
  11.137 +	pushl %esi; \
  11.138 +	pushl %edx; \
  11.139 +	pushl %ecx; \
  11.140 +	pushl %ebx; \
  11.141 +	movl $(__HYPERVISOR_DS),%edx; \
  11.142 +	movl %edx,%ds; \
  11.143 +	movl %edx,%es;
  11.144 +
  11.145 +#define RESTORE_ALL	\
  11.146 +	popl %ebx;	\
  11.147 +	popl %ecx;	\
  11.148 +	popl %edx;	\
  11.149 +	popl %esi;	\
  11.150 +	popl %edi;	\
  11.151 +	popl %ebp;	\
  11.152 +	popl %eax;	\
  11.153 +1:	popl %ds;	\
  11.154 +2:	popl %es;	\
  11.155 +        addl $4,%esp;	\
  11.156 +3:      iret;		\
  11.157 +.section .fixup,"ax";	\
  11.158 +6:      subl $4,%esp;   \
  11.159 +        pushl %es;      \
  11.160 +5:      pushl %ds;      \
  11.161 +4:      pushl %eax;     \
  11.162 +	pushl %ebp;     \
  11.163 +	pushl %edi;     \
  11.164 +	pushl %esi;     \
  11.165 +	pushl %edx;     \
  11.166 +	pushl %ecx;     \
  11.167 +	pushl %ebx;     \
  11.168 +	pushl %ss;           \
  11.169 +	popl  %ds;           \
  11.170 +	pushl %ss;           \
  11.171 +	popl  %es;           \
  11.172 +	jmp  failsafe_callback;      \
  11.173 +.previous;                           \
  11.174 +.section __ex_table,"a";             \
  11.175 +	.align 4;	             \
  11.176 +	.long 1b,4b;       	     \
  11.177 +	.long 2b,5b;	             \
  11.178 +	.long 3b,6b;	             \
  11.179 +.previous
  11.180 +
  11.181 +#define GET_CURRENT(reg)  \
  11.182 +	movl $-8192, reg; \
  11.183 +	andl %esp, reg
  11.184 +
  11.185 +ENTRY(ret_from_newdomain)
  11.186 +	GET_CURRENT(%ebx)
  11.187 +	jmp test_all_events
  11.188 +
  11.189 +        ALIGN
  11.190 +restore_all:
  11.191 +	RESTORE_ALL
  11.192 +
  11.193 +        ALIGN
  11.194 +ENTRY(hypervisor_call)
  11.195 +        pushl %eax			# save orig_eax
  11.196 +	SAVE_ALL
  11.197 +	GET_CURRENT(%ebx)
  11.198 +	andl $255,%eax
  11.199 +	call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
  11.200 +	movl %eax,EAX(%esp)		# save the return value
  11.201 +
  11.202 +test_all_events:
  11.203 +        mov  PROCESSOR(%ebx),%eax
  11.204 +        shl  $4,%eax                    # sizeof(irq_cpustat) == 16
  11.205 +        lea  guest_trap_bounce(%eax),%edx
  11.206 +        cli                             # tests must not race interrupts
  11.207 +        xorl %ecx,%ecx
  11.208 +        notl %ecx
  11.209 +test_softirqs:  
  11.210 +        mov  PROCESSOR(%ebx),%eax
  11.211 +        shl  $4,%eax                    # sizeof(irq_cpustat) == 16
  11.212 +        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
  11.213 +        jnz  process_softirqs
  11.214 +test_hyp_events:        
  11.215 +        test %ecx, HYP_EVENTS(%ebx)
  11.216 +        jnz  process_hyp_events
  11.217 +test_guest_events:      
  11.218 +        movl SHARED_INFO(%ebx),%eax
  11.219 +        test %ecx,EVENTS(%eax)
  11.220 +        jz   restore_all
  11.221 +        test %ecx,EVENTS_ENABLE(%eax)
  11.222 +        jz   restore_all
  11.223 +        /* Prevent unnecessary reentry of event callback (stack overflow!) */
  11.224 +        xorl %ecx,%ecx
  11.225 +        movl %ecx,EVENTS_ENABLE(%eax)      
  11.226 +/* %eax == shared_info, %ebx == task_struct, %edx == guest_trap_bounce */
  11.227 +process_guest_events:   
  11.228 +        movl EVENT_ADDR(%eax),%eax
  11.229 +        movl %eax,GTB_EIP(%edx)
  11.230 +        movw $__GUEST_CS,GTB_CS(%edx)
  11.231 +        call create_bounce_frame
  11.232 +        jmp  restore_all
  11.233 +
  11.234 +        ALIGN
  11.235 +process_softirqs:       
  11.236 +        push %edx
  11.237 +        call SYMBOL_NAME(do_softirq)
  11.238 +        pop  %edx
  11.239 +        jmp  test_hyp_events
  11.240 +        
  11.241 +        ALIGN
  11.242 +process_hyp_events:
  11.243 +        sti
  11.244 +        call SYMBOL_NAME(do_hyp_events)
  11.245 +        jmp  test_all_events
  11.246 +
  11.247 +/* No special register assumptions */
  11.248 +failsafe_callback:
  11.249 +        GET_CURRENT(%ebx)
  11.250 +        mov  PROCESSOR(%ebx),%eax
  11.251 +        shl  $4,%eax
  11.252 +        lea  guest_trap_bounce(%eax),%edx
  11.253 +        movl SHARED_INFO(%ebx),%eax
  11.254 +        movl FAILSAFE_ADDR(%eax),%eax
  11.255 +        movl %eax,GTB_EIP(%edx)
  11.256 +        movw $__GUEST_CS,GTB_CS(%edx)
  11.257 +        call create_bounce_frame
  11.258 +        subl $8,%esi                 # add DS/ES to failsafe stack frame
  11.259 +        movl DS(%esp),%eax
  11.260 +FAULT3: movl %eax,(%esi) 
  11.261 +        movl ES(%esp),%eax
  11.262 +FAULT4: movl %eax,4(%esi)
  11.263 +        movl %esi,OLDESP(%esp)
  11.264 +        popl %ebx
  11.265 +        popl %ecx
  11.266 +        popl %edx
  11.267 +        popl %esi
  11.268 +        popl %edi
  11.269 +        popl %ebp
  11.270 +        popl %eax
  11.271 +        addl $12,%esp
  11.272 +FAULT5: iret 
  11.273 +
  11.274 +        
  11.275 +/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:         */
  11.276 +/*   {EIP, CS, EFLAGS, [ESP, SS]}                                     */
  11.277 +/* %edx == guest_trap_bounce, %ebx == task_struct                     */
  11.278 +/* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP.        */
  11.279 +create_bounce_frame:        
  11.280 +        mov  CS+4(%esp),%cl
  11.281 +        test $2,%cl
  11.282 +        jz   1f /* jump if returning to an existing ring-1 activation */
  11.283 +        /* obtain ss/esp from TSS -- no current ring-1 activations */
  11.284 +        movl PROCESSOR(%ebx),%eax
  11.285 +        shll $8,%eax /* multiply by 256 */
  11.286 +        addl $init_tss + 12,%eax
  11.287 +        movl (%eax),%esi /* tss->esp1 */
  11.288 +FAULT6: movl 4(%eax),%ds /* tss->ss1  */
  11.289 +        /* base of stack frame must contain ss/esp (inter-priv iret) */
  11.290 +        subl $8,%esi
  11.291 +        movl OLDESP+4(%esp),%eax
  11.292 +FAULT7: movl %eax,(%esi) 
  11.293 +        movl OLDSS+4(%esp),%eax
  11.294 +FAULT8: movl %eax,4(%esi) 
  11.295 +        jmp 2f
  11.296 +1:      /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
  11.297 +        movl OLDESP+4(%esp),%esi
  11.298 +FAULT9: movl OLDSS+4(%esp),%ds 
  11.299 +2:      /* Construct a stack frame: EFLAGS, CS/EIP */
  11.300 +        subl $12,%esi
  11.301 +        movl EIP+4(%esp),%eax
  11.302 +FAULT10:movl %eax,(%esi) 
  11.303 +        movl CS+4(%esp),%eax
  11.304 +FAULT11:movl %eax,4(%esi) 
  11.305 +        movl EFLAGS+4(%esp),%eax
  11.306 +FAULT12:movl %eax,8(%esi)
  11.307 +        /* Rewrite our stack frame and return to ring 1. */
  11.308 +        movl %ds,OLDSS+4(%esp)
  11.309 +        movl %esi,OLDESP+4(%esp)
  11.310 +        movzwl %es:GTB_CS(%edx),%eax
  11.311 +        movl %eax,CS+4(%esp)
  11.312 +        movl %es:GTB_EIP(%edx),%eax
  11.313 +        movl %eax,EIP+4(%esp)
  11.314 +        ret
  11.315 +        
  11.316 +                              
  11.317 +.section __ex_table,"a"
  11.318 +        .align 4
  11.319 +        .long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
  11.320 +        .long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
  11.321 +        .long FAULT3, kill_domain_fixup3 # Fault writing to ring-1 stack
  11.322 +        .long FAULT4, kill_domain_fixup3 # Fault writing to ring-1 stack
  11.323 +        .long FAULT5, kill_domain_fixup1 # Fault executing failsafe iret
  11.324 +        .long FAULT6, kill_domain_fixup2 # Fault loading ring-1 stack selector
  11.325 +        .long FAULT7, kill_domain_fixup2 # Fault writing to ring-1 stack
  11.326 +        .long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
  11.327 +        .long FAULT9, kill_domain_fixup2 # Fault loading ring-1 stack selector
  11.328 +        .long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
  11.329 +        .long FAULT11,kill_domain_fixup2 # Fault writing to ring-1 stack
  11.330 +        .long FAULT12,kill_domain_fixup2 # Fault writing to ring-1 stack
  11.331 +.previous
  11.332 +               
  11.333 +# This handler kills domains which experience unrecoverable faults.
  11.334 +.section .fixup,"ax"
  11.335 +kill_domain_fixup1:
  11.336 +        subl  $4,%esp
  11.337 +        SAVE_ALL
  11.338 +        jmp   kill_domain
  11.339 +kill_domain_fixup2:
  11.340 +        addl  $4,%esp                     
  11.341 +kill_domain_fixup3:
  11.342 +        pushl %ss
  11.343 +        popl  %ds
  11.344 +        jmp   kill_domain
  11.345 +.previous
  11.346 +
  11.347 +        ALIGN
  11.348 +process_guest_exception_and_events:        
  11.349 +        mov  PROCESSOR(%ebx),%eax
  11.350 +        shl  $4,%eax                    # sizeof(irq_cpustat) == 16
  11.351 +        lea  guest_trap_bounce(%eax),%edx
  11.352 +        testb $~0,GTB_FLAGS(%edx)
  11.353 +        jz   test_all_events
  11.354 +        call create_bounce_frame        # just the basic frame
  11.355 +        mov  %es:GTB_FLAGS(%edx),%cl
  11.356 +        test $GTBF_TRAP_NOCODE,%cl
  11.357 +        jnz  2f
  11.358 +        subl $4,%esi                    # push error_code onto guest frame
  11.359 +        movl %es:GTB_ERROR_CODE(%edx),%eax
  11.360 +FAULT1: movl %eax,(%esi)
  11.361 +        test $GTBF_TRAP_CR2,%cl
  11.362 +        jz   1f
  11.363 +        subl $4,%esi                    # push %cr2 onto guest frame
  11.364 +        movl %es:GTB_CR2(%edx),%eax
  11.365 +FAULT2: movl %eax,(%esi)
  11.366 +1:      movl %esi,OLDESP(%esp)        
  11.367 +2:      push %es                        # unclobber %ds
  11.368 +        pop  %ds 
  11.369 +        movb $0,GTB_FLAGS(%edx)
  11.370 +        jmp  test_all_events
  11.371 +
  11.372 +        ALIGN
  11.373 +ENTRY(ret_from_intr)
  11.374 +	GET_CURRENT(%ebx)
  11.375 +        movb CS(%esp),%al
  11.376 +	testb $3,%al	# return to non-supervisor?
  11.377 +	jne test_all_events
  11.378 +	jmp restore_all
  11.379 +
  11.380 +        ALIGN
  11.381 +ret_from_exception:
  11.382 +        movb CS(%esp),%al
  11.383 +	testb $3,%al	# return to non-supervisor?
  11.384 +	jne process_guest_exception_and_events
  11.385 +        jmp restore_all
  11.386 +
  11.387 +	ALIGN
  11.388 +
  11.389 +ENTRY(divide_error)
  11.390 +	pushl $0		# no error code
  11.391 +	pushl $ SYMBOL_NAME(do_divide_error)
  11.392 +	ALIGN
  11.393 +error_code:
  11.394 +	pushl %ds
  11.395 +	pushl %eax
  11.396 +	xorl %eax,%eax
  11.397 +	pushl %ebp
  11.398 +	pushl %edi
  11.399 +	pushl %esi
  11.400 +	pushl %edx
  11.401 +	decl %eax			# eax = -1
  11.402 +	pushl %ecx
  11.403 +	pushl %ebx
  11.404 +	cld
  11.405 +	movl %es,%ecx
  11.406 +	movl ORIG_EAX(%esp), %esi	# get the error code
  11.407 +	movl ES(%esp), %edi		# get the function address
  11.408 +	movl %eax, ORIG_EAX(%esp)
  11.409 +	movl %ecx, ES(%esp)
  11.410 +	movl %esp,%edx
  11.411 +	pushl %esi			# push the error code
  11.412 +	pushl %edx			# push the pt_regs pointer
  11.413 +	movl $(__HYPERVISOR_DS),%edx
  11.414 +	movl %edx,%ds
  11.415 +	movl %edx,%es
  11.416 +	GET_CURRENT(%ebx)
  11.417 +	call *%edi
  11.418 +	addl $8,%esp
  11.419 +	jmp ret_from_exception
  11.420 +
  11.421 +ENTRY(coprocessor_error)
  11.422 +	pushl $0
  11.423 +	pushl $ SYMBOL_NAME(do_coprocessor_error)
  11.424 +	jmp error_code
  11.425 +
  11.426 +ENTRY(simd_coprocessor_error)
  11.427 +	pushl $0
  11.428 +	pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
  11.429 +	jmp error_code
  11.430 +
  11.431 +ENTRY(device_not_available)
  11.432 +	pushl $0
  11.433 +        pushl $SYMBOL_NAME(math_state_restore)
  11.434 +        jmp   error_code
  11.435 +
  11.436 +ENTRY(debug)
  11.437 +	pushl $0
  11.438 +	pushl $ SYMBOL_NAME(do_debug)
  11.439 +	jmp error_code
  11.440 +
  11.441 +ENTRY(nmi)
  11.442 +	pushl %eax
  11.443 +	SAVE_ALL
  11.444 +	movl %esp,%edx
  11.445 +	pushl $0
  11.446 +	pushl %edx
  11.447 +	call SYMBOL_NAME(do_nmi)
  11.448 +	addl $8,%esp
  11.449 +	RESTORE_ALL
  11.450 +
  11.451 +ENTRY(int3)
  11.452 +	pushl $0
  11.453 +	pushl $ SYMBOL_NAME(do_int3)
  11.454 +	jmp error_code
  11.455 +
  11.456 +ENTRY(overflow)
  11.457 +	pushl $0
  11.458 +	pushl $ SYMBOL_NAME(do_overflow)
  11.459 +	jmp error_code
  11.460 +
  11.461 +ENTRY(bounds)
  11.462 +	pushl $0
  11.463 +	pushl $ SYMBOL_NAME(do_bounds)
  11.464 +	jmp error_code
  11.465 +
  11.466 +ENTRY(invalid_op)
  11.467 +	pushl $0
  11.468 +	pushl $ SYMBOL_NAME(do_invalid_op)
  11.469 +	jmp error_code
  11.470 +
  11.471 +ENTRY(coprocessor_segment_overrun)
  11.472 +	pushl $0
  11.473 +	pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
  11.474 +	jmp error_code
  11.475 +
  11.476 +ENTRY(double_fault)
  11.477 +	pushl $ SYMBOL_NAME(do_double_fault)
  11.478 +	jmp error_code
  11.479 +
  11.480 +ENTRY(invalid_TSS)
  11.481 +	pushl $ SYMBOL_NAME(do_invalid_TSS)
  11.482 +	jmp error_code
  11.483 +
  11.484 +ENTRY(segment_not_present)
  11.485 +	pushl $ SYMBOL_NAME(do_segment_not_present)
  11.486 +	jmp error_code
  11.487 +
  11.488 +ENTRY(stack_segment)
  11.489 +	pushl $ SYMBOL_NAME(do_stack_segment)
  11.490 +	jmp error_code
  11.491 +
  11.492 +ENTRY(general_protection)
  11.493 +	pushl $ SYMBOL_NAME(do_general_protection)
  11.494 +	jmp error_code
  11.495 +
  11.496 +ENTRY(alignment_check)
  11.497 +	pushl $ SYMBOL_NAME(do_alignment_check)
  11.498 +	jmp error_code
  11.499 +
  11.500 +ENTRY(page_fault)
  11.501 +	pushl $ SYMBOL_NAME(do_page_fault)
  11.502 +	jmp error_code
  11.503 +
  11.504 +ENTRY(machine_check)
  11.505 +	pushl $0
  11.506 +	pushl $ SYMBOL_NAME(do_machine_check)
  11.507 +	jmp error_code
  11.508 +
  11.509 +ENTRY(spurious_interrupt_bug)
  11.510 +	pushl $0
  11.511 +	pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
  11.512 +	jmp error_code
  11.513 +
  11.514 +.data
  11.515 +ENTRY(hypervisor_call_table)
  11.516 +        .long SYMBOL_NAME(do_set_trap_table)
  11.517 +        .long SYMBOL_NAME(do_process_page_updates)
  11.518 +        .long SYMBOL_NAME(do_console_write)
  11.519 +        .long SYMBOL_NAME(do_set_pagetable)
  11.520 +        .long SYMBOL_NAME(do_set_guest_stack)
  11.521 +        .long SYMBOL_NAME(do_net_update)
  11.522 +        .long SYMBOL_NAME(do_fpu_taskswitch)
  11.523 +        .long SYMBOL_NAME(do_yield)
  11.524 +        .long SYMBOL_NAME(kill_domain)
  11.525 +        .long SYMBOL_NAME(do_dom0_op)
  11.526 +        .long SYMBOL_NAME(do_network_op)
  11.527 +        .rept NR_syscalls-(.-hypervisor_call_table)/4
  11.528 +        .long SYMBOL_NAME(sys_ni_syscall)
  11.529 +	.endr
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen-2.4.16/arch/i386/extable.c	Wed Nov 20 12:02:17 2002 +0000
    12.3 @@ -0,0 +1,62 @@
    12.4 +/*
    12.5 + * linux/arch/i386/mm/extable.c
    12.6 + */
    12.7 +
    12.8 +#include <linux/config.h>
    12.9 +#include <linux/module.h>
   12.10 +#include <linux/spinlock.h>
   12.11 +#include <asm/uaccess.h>
   12.12 +
   12.13 +extern const struct exception_table_entry __start___ex_table[];
   12.14 +extern const struct exception_table_entry __stop___ex_table[];
   12.15 +
   12.16 +static inline unsigned long
   12.17 +search_one_table(const struct exception_table_entry *first,
   12.18 +		 const struct exception_table_entry *last,
   12.19 +		 unsigned long value)
   12.20 +{
   12.21 +        while (first <= last) {
   12.22 +		const struct exception_table_entry *mid;
   12.23 +		long diff;
   12.24 +
   12.25 +		mid = (last - first) / 2 + first;
   12.26 +		diff = mid->insn - value;
   12.27 +                if (diff == 0)
   12.28 +                        return mid->fixup;
   12.29 +                else if (diff < 0)
   12.30 +                        first = mid+1;
   12.31 +                else
   12.32 +                        last = mid-1;
   12.33 +        }
   12.34 +        return 0;
   12.35 +}
   12.36 +
   12.37 +extern spinlock_t modlist_lock;
   12.38 +
   12.39 +unsigned long
   12.40 +search_exception_table(unsigned long addr)
   12.41 +{
   12.42 +	unsigned long ret = 0;
   12.43 +	
   12.44 +#ifndef CONFIG_MODULES
   12.45 +	/* There is only the kernel to search.  */
   12.46 +	ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
   12.47 +	return ret;
   12.48 +#else
   12.49 +	unsigned long flags;
   12.50 +	/* The kernel is the last "module" -- no need to treat it special.  */
   12.51 +	struct module *mp;
   12.52 +
   12.53 +	spin_lock_irqsave(&modlist_lock, flags);
   12.54 +	for (mp = module_list; mp != NULL; mp = mp->next) {
   12.55 +		if (mp->ex_table_start == NULL || !(mp->flags&(MOD_RUNNING|MOD_INITIALIZING)))
   12.56 +			continue;
   12.57 +		ret = search_one_table(mp->ex_table_start,
   12.58 +				       mp->ex_table_end - 1, addr);
   12.59 +		if (ret)
   12.60 +			break;
   12.61 +	}
   12.62 +	spin_unlock_irqrestore(&modlist_lock, flags);
   12.63 +	return ret;
   12.64 +#endif
   12.65 +}
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xen-2.4.16/arch/i386/i387.c	Wed Nov 20 12:02:17 2002 +0000
    13.3 @@ -0,0 +1,50 @@
    13.4 +/*
    13.5 + *  linux/arch/i386/kernel/i387.c
    13.6 + *
    13.7 + *  Copyright (C) 1994 Linus Torvalds
    13.8 + *
    13.9 + *  Pentium III FXSR, SSE support
   13.10 + *  General FPU state handling cleanups
   13.11 + *	Gareth Hughes <gareth@valinux.com>, May 2000
   13.12 + */
   13.13 +
   13.14 +#include <xeno/config.h>
   13.15 +#include <xeno/sched.h>
   13.16 +#include <asm/processor.h>
   13.17 +#include <asm/i387.h>
   13.18 +
   13.19 +void init_fpu(void)
   13.20 +{
   13.21 +    __asm__("fninit");
   13.22 +    if ( cpu_has_xmm ) load_mxcsr(0x1f80);
   13.23 +    current->flags |= PF_DONEFPUINIT;
   13.24 +}
   13.25 +
   13.26 +static inline void __save_init_fpu( struct task_struct *tsk )
   13.27 +{
   13.28 +	if ( cpu_has_fxsr ) {
   13.29 +		asm volatile( "fxsave %0 ; fnclex"
   13.30 +			      : "=m" (tsk->thread.i387.fxsave) );
   13.31 +	} else {
   13.32 +		asm volatile( "fnsave %0 ; fwait"
   13.33 +			      : "=m" (tsk->thread.i387.fsave) );
   13.34 +	}
   13.35 +	tsk->flags &= ~PF_USEDFPU;
   13.36 +}
   13.37 +
   13.38 +void save_init_fpu( struct task_struct *tsk )
   13.39 +{
   13.40 +	__save_init_fpu(tsk);
   13.41 +	stts();
   13.42 +}
   13.43 +
   13.44 +void restore_fpu( struct task_struct *tsk )
   13.45 +{
   13.46 +    if ( cpu_has_fxsr ) {
   13.47 +        asm volatile( "fxrstor %0"
   13.48 +                      : : "m" (tsk->thread.i387.fxsave) );
   13.49 +    } else {
   13.50 +        asm volatile( "frstor %0"
   13.51 +                      : : "m" (tsk->thread.i387.fsave) );
   13.52 +    }
   13.53 +}
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen-2.4.16/arch/i386/i8259.c	Wed Nov 20 12:02:17 2002 +0000
    14.3 @@ -0,0 +1,469 @@
    14.4 +/******************************************************************************
    14.5 + * i8259.c
    14.6 + * 
    14.7 + * Well, this is required for SMP systems as well, as it build interrupt
    14.8 + * tables for IO APICS as well as uniprocessor 8259-alikes.
    14.9 + */
   14.10 +
   14.11 +#include <xeno/config.h>
   14.12 +#include <xeno/init.h>
   14.13 +#include <asm/ptrace.h>
   14.14 +#include <xeno/errno.h>
   14.15 +#include <xeno/sched.h>
   14.16 +#include <xeno/interrupt.h>
   14.17 +#include <xeno/irq.h>
   14.18 +
   14.19 +#include <asm/atomic.h>
   14.20 +#include <asm/system.h>
   14.21 +#include <asm/io.h>
   14.22 +#include <asm/desc.h>
   14.23 +#include <asm/bitops.h>
   14.24 +#include <xeno/delay.h>
   14.25 +#include <asm/apic.h>
   14.26 +
   14.27 +
   14.28 +/*
   14.29 + * Common place to define all x86 IRQ vectors
   14.30 + *
   14.31 + * This builds up the IRQ handler stubs using some ugly macros in irq.h
   14.32 + *
   14.33 + * These macros create the low-level assembly IRQ routines that save
   14.34 + * register context and call do_IRQ(). do_IRQ() then does all the
   14.35 + * operations that are needed to keep the AT (or SMP IOAPIC)
   14.36 + * interrupt-controller happy.
   14.37 + */
   14.38 +
   14.39 +BUILD_COMMON_IRQ()
   14.40 +
   14.41 +#define BI(x,y) \
   14.42 +	BUILD_IRQ(x##y)
   14.43 +
   14.44 +#define BUILD_16_IRQS(x) \
   14.45 +	BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
   14.46 +	BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
   14.47 +	BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
   14.48 +	BI(x,c) BI(x,d) BI(x,e) BI(x,f)
   14.49 +
   14.50 +/*
   14.51 + * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
   14.52 + * (these are usually mapped to vectors 0x20-0x2f)
   14.53 + */
   14.54 +    BUILD_16_IRQS(0x0)
   14.55 +
   14.56 +#ifdef CONFIG_X86_IO_APIC
   14.57 +/*
   14.58 + * The IO-APIC gives us many more interrupt sources. Most of these 
   14.59 + * are unused but an SMP system is supposed to have enough memory ...
   14.60 + * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
   14.61 + * across the spectrum, so we really want to be prepared to get all
   14.62 + * of these. Plus, more powerful systems might have more than 64
   14.63 + * IO-APIC registers.
   14.64 + *
   14.65 + * (these are usually mapped into the 0x30-0xff vector range)
   14.66 + */
   14.67 +    BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
   14.68 +    BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
   14.69 +    BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
   14.70 +    BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
   14.71 +#endif
   14.72 +
   14.73 +#undef BUILD_16_IRQS
   14.74 +#undef BI
   14.75 +
   14.76 +
   14.77 +/*
   14.78 + * The following vectors are part of the Linux architecture, there
   14.79 + * is no hardware IRQ pin equivalent for them, they are triggered
   14.80 + * through the ICC by us (IPIs)
   14.81 + */
   14.82 +#ifdef CONFIG_SMP
   14.83 +    BUILD_SMP_INTERRUPT(event_check_interrupt,EVENT_CHECK_VECTOR)
   14.84 +    BUILD_SMP_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
   14.85 +    BUILD_SMP_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
   14.86 +#endif
   14.87 +
   14.88 +/*
   14.89 + * every pentium local APIC has two 'local interrupts', with a
   14.90 + * soft-definable vector attached to both interrupts, one of
   14.91 + * which is a timer interrupt, the other one is error counter
   14.92 + * overflow. Linux uses the local APIC timer interrupt to get
   14.93 + * a much simpler SMP time architecture:
   14.94 + */
   14.95 +#ifdef CONFIG_X86_LOCAL_APIC
   14.96 +    BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
   14.97 +    BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
   14.98 +    BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
   14.99 +#endif
  14.100 +
  14.101 +#define IRQ(x,y) \
  14.102 +	IRQ##x##y##_interrupt
  14.103 +
  14.104 +#define IRQLIST_16(x) \
  14.105 +	IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
  14.106 +	IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
  14.107 +	IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
  14.108 +	IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
  14.109 +
  14.110 +    void (*interrupt[NR_IRQS])(void) = {
  14.111 +	IRQLIST_16(0x0),
  14.112 +
  14.113 +#ifdef CONFIG_X86_IO_APIC
  14.114 +        IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
  14.115 +	IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
  14.116 +	IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
  14.117 +	IRQLIST_16(0xc), IRQLIST_16(0xd)
  14.118 +#endif
  14.119 +    };
  14.120 +
  14.121 +#undef IRQ
  14.122 +#undef IRQLIST_16
  14.123 +
  14.124 +/*
  14.125 + * This is the 'legacy' 8259A Programmable Interrupt Controller,
  14.126 + * present in the majority of PC/AT boxes.
  14.127 + * plus some generic x86 specific things if generic specifics makes
  14.128 + * any sense at all.
  14.129 + * this file should become arch/i386/kernel/irq.c when the old irq.c
  14.130 + * moves to arch independent land
  14.131 + */
  14.132 +
  14.133 +spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED;
  14.134 +
  14.135 +static void end_8259A_irq (unsigned int irq)
  14.136 +{
  14.137 +    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  14.138 +        enable_8259A_irq(irq);
  14.139 +}
  14.140 +
  14.141 +#define shutdown_8259A_irq	disable_8259A_irq
  14.142 +
  14.143 +void mask_and_ack_8259A(unsigned int);
  14.144 +
  14.145 +static unsigned int startup_8259A_irq(unsigned int irq)
  14.146 +{ 
  14.147 +    enable_8259A_irq(irq);
  14.148 +    return 0; /* never anything pending */
  14.149 +}
  14.150 +
  14.151 +static struct hw_interrupt_type i8259A_irq_type = {
  14.152 +    "XT-PIC",
  14.153 +    startup_8259A_irq,
  14.154 +    shutdown_8259A_irq,
  14.155 +    enable_8259A_irq,
  14.156 +    disable_8259A_irq,
  14.157 +    mask_and_ack_8259A,
  14.158 +    end_8259A_irq,
  14.159 +    NULL
  14.160 +};
  14.161 +
  14.162 +/*
  14.163 + * 8259A PIC functions to handle ISA devices:
  14.164 + */
  14.165 +
  14.166 +/*
  14.167 + * This contains the irq mask for both 8259A irq controllers,
  14.168 + */
  14.169 +static unsigned int cached_irq_mask = 0xffff;
  14.170 +
  14.171 +#define __byte(x,y) 	(((unsigned char *)&(y))[x])
  14.172 +#define cached_21	(__byte(0,cached_irq_mask))
  14.173 +#define cached_A1	(__byte(1,cached_irq_mask))
  14.174 +
  14.175 +/*
  14.176 + * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
  14.177 + * boards the timer interrupt is not really connected to any IO-APIC pin,
  14.178 + * it's fed to the master 8259A's IR0 line only.
  14.179 + *
  14.180 + * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
  14.181 + * this 'mixed mode' IRQ handling costs nothing because it's only used
  14.182 + * at IRQ setup time.
  14.183 + */
  14.184 +unsigned long io_apic_irqs;
  14.185 +
  14.186 +void disable_8259A_irq(unsigned int irq)
  14.187 +{
  14.188 +    unsigned int mask = 1 << irq;
  14.189 +    unsigned long flags;
  14.190 +
  14.191 +    spin_lock_irqsave(&i8259A_lock, flags);
  14.192 +    cached_irq_mask |= mask;
  14.193 +    if (irq & 8)
  14.194 +        outb(cached_A1,0xA1);
  14.195 +    else
  14.196 +        outb(cached_21,0x21);
  14.197 +    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.198 +}
  14.199 +
  14.200 +void enable_8259A_irq(unsigned int irq)
  14.201 +{
  14.202 +    unsigned int mask = ~(1 << irq);
  14.203 +    unsigned long flags;
  14.204 +
  14.205 +    spin_lock_irqsave(&i8259A_lock, flags);
  14.206 +    cached_irq_mask &= mask;
  14.207 +    if (irq & 8)
  14.208 +        outb(cached_A1,0xA1);
  14.209 +    else
  14.210 +        outb(cached_21,0x21);
  14.211 +    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.212 +}
  14.213 +
  14.214 +int i8259A_irq_pending(unsigned int irq)
  14.215 +{
  14.216 +    unsigned int mask = 1<<irq;
  14.217 +    unsigned long flags;
  14.218 +    int ret;
  14.219 +
  14.220 +    spin_lock_irqsave(&i8259A_lock, flags);
  14.221 +    if (irq < 8)
  14.222 +        ret = inb(0x20) & mask;
  14.223 +    else
  14.224 +        ret = inb(0xA0) & (mask >> 8);
  14.225 +    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.226 +
  14.227 +    return ret;
  14.228 +}
  14.229 +
  14.230 +void make_8259A_irq(unsigned int irq)
  14.231 +{
  14.232 +    disable_irq_nosync(irq);
  14.233 +    io_apic_irqs &= ~(1<<irq);
  14.234 +    irq_desc[irq].handler = &i8259A_irq_type;
  14.235 +    enable_irq(irq);
  14.236 +}
  14.237 +
  14.238 +/*
  14.239 + * This function assumes to be called rarely. Switching between
  14.240 + * 8259A registers is slow.
  14.241 + * This has to be protected by the irq controller spinlock
  14.242 + * before being called.
  14.243 + */
  14.244 +static inline int i8259A_irq_real(unsigned int irq)
  14.245 +{
  14.246 +    int value;
  14.247 +    int irqmask = 1<<irq;
  14.248 +
  14.249 +    if (irq < 8) {
  14.250 +        outb(0x0B,0x20);		/* ISR register */
  14.251 +        value = inb(0x20) & irqmask;
  14.252 +        outb(0x0A,0x20);		/* back to the IRR register */
  14.253 +        return value;
  14.254 +    }
  14.255 +    outb(0x0B,0xA0);		/* ISR register */
  14.256 +    value = inb(0xA0) & (irqmask >> 8);
  14.257 +    outb(0x0A,0xA0);		/* back to the IRR register */
  14.258 +    return value;
  14.259 +}
  14.260 +
  14.261 +/*
  14.262 + * Careful! The 8259A is a fragile beast, it pretty
  14.263 + * much _has_ to be done exactly like this (mask it
  14.264 + * first, _then_ send the EOI, and the order of EOI
  14.265 + * to the two 8259s is important!
  14.266 + */
  14.267 +void mask_and_ack_8259A(unsigned int irq)
  14.268 +{
  14.269 +    unsigned int irqmask = 1 << irq;
  14.270 +    unsigned long flags;
  14.271 +
  14.272 +    spin_lock_irqsave(&i8259A_lock, flags);
  14.273 +    /*
  14.274 +     * Lightweight spurious IRQ detection. We do not want
  14.275 +     * to overdo spurious IRQ handling - it's usually a sign
  14.276 +     * of hardware problems, so we only do the checks we can
  14.277 +     * do without slowing down good hardware unnecesserily.
  14.278 +     *
  14.279 +     * Note that IRQ7 and IRQ15 (the two spurious IRQs
  14.280 +     * usually resulting from the 8259A-1|2 PICs) occur
  14.281 +     * even if the IRQ is masked in the 8259A. Thus we
  14.282 +     * can check spurious 8259A IRQs without doing the
  14.283 +     * quite slow i8259A_irq_real() call for every IRQ.
  14.284 +     * This does not cover 100% of spurious interrupts,
  14.285 +     * but should be enough to warn the user that there
  14.286 +     * is something bad going on ...
  14.287 +     */
  14.288 +    if (cached_irq_mask & irqmask)
  14.289 +        goto spurious_8259A_irq;
  14.290 +    cached_irq_mask |= irqmask;
  14.291 +
  14.292 + handle_real_irq:
  14.293 +    if (irq & 8) {
  14.294 +        inb(0xA1);		/* DUMMY - (do we need this?) */
  14.295 +        outb(cached_A1,0xA1);
  14.296 +        outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
  14.297 +        outb(0x62,0x20);	/* 'Specific EOI' to master-IRQ2 */
  14.298 +    } else {
  14.299 +        inb(0x21);		/* DUMMY - (do we need this?) */
  14.300 +        outb(cached_21,0x21);
  14.301 +        outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
  14.302 +    }
  14.303 +    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.304 +    return;
  14.305 +
  14.306 + spurious_8259A_irq:
  14.307 +    /*
  14.308 +     * this is the slow path - should happen rarely.
  14.309 +     */
  14.310 +    if (i8259A_irq_real(irq))
  14.311 +        /*
  14.312 +         * oops, the IRQ _is_ in service according to the
  14.313 +         * 8259A - not spurious, go handle it.
  14.314 +         */
  14.315 +        goto handle_real_irq;
  14.316 +
  14.317 +    {
  14.318 +        static int spurious_irq_mask;
  14.319 +        /*
  14.320 +         * At this point we can be sure the IRQ is spurious,
  14.321 +         * lets ACK and report it. [once per IRQ]
  14.322 +         */
  14.323 +        if (!(spurious_irq_mask & irqmask)) {
  14.324 +            printk("spurious 8259A interrupt: IRQ%d.\n", irq);
  14.325 +            spurious_irq_mask |= irqmask;
  14.326 +        }
  14.327 +        atomic_inc(&irq_err_count);
  14.328 +        /*
  14.329 +         * Theoretically we do not have to handle this IRQ,
  14.330 +         * but in Linux this does not cause problems and is
  14.331 +         * simpler for us.
  14.332 +         */
  14.333 +        goto handle_real_irq;
  14.334 +    }
  14.335 +}
  14.336 +
  14.337 +void __init init_8259A(int auto_eoi)
  14.338 +{
  14.339 +    unsigned long flags;
  14.340 +
  14.341 +    spin_lock_irqsave(&i8259A_lock, flags);
  14.342 +
  14.343 +    outb(0xff, 0x21);	/* mask all of 8259A-1 */
  14.344 +    outb(0xff, 0xA1);	/* mask all of 8259A-2 */
  14.345 +
  14.346 +    /*
  14.347 +     * outb_p - this has to work on a wide range of PC hardware.
  14.348 +     */
  14.349 +    outb_p(0x11, 0x20);	/* ICW1: select 8259A-1 init */
  14.350 +    outb_p(0x20 + 0, 0x21);	/* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
  14.351 +    outb_p(0x04, 0x21);	/* 8259A-1 (the master) has a slave on IR2 */
  14.352 +    if (auto_eoi)
  14.353 +        outb_p(0x03, 0x21);	/* master does Auto EOI */
  14.354 +    else
  14.355 +        outb_p(0x01, 0x21);	/* master expects normal EOI */
  14.356 +
  14.357 +    outb_p(0x11, 0xA0);	/* ICW1: select 8259A-2 init */
  14.358 +    outb_p(0x20 + 8, 0xA1);	/* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
  14.359 +    outb_p(0x02, 0xA1);	/* 8259A-2 is a slave on master's IR2 */
  14.360 +    outb_p(0x01, 0xA1);	/* (slave's support for AEOI in flat mode
  14.361 +                           is to be investigated) */
  14.362 +
  14.363 +    if (auto_eoi)
  14.364 +        /*
  14.365 +         * in AEOI mode we just have to mask the interrupt
  14.366 +         * when acking.
  14.367 +         */
  14.368 +        i8259A_irq_type.ack = disable_8259A_irq;
  14.369 +    else
  14.370 +        i8259A_irq_type.ack = mask_and_ack_8259A;
  14.371 +
  14.372 +    udelay(100);		/* wait for 8259A to initialize */
  14.373 +
  14.374 +    outb(cached_21, 0x21);	/* restore master IRQ mask */
  14.375 +    outb(cached_A1, 0xA1);	/* restore slave IRQ mask */
  14.376 +
  14.377 +    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.378 +}
  14.379 +
  14.380 +
  14.381 +/*
  14.382 + * IRQ2 is cascade interrupt to second interrupt controller
  14.383 + */
  14.384 +
  14.385 +static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
  14.386 +
  14.387 +void __init init_ISA_irqs (void)
  14.388 +{
  14.389 +    int i;
  14.390 +
  14.391 +#ifdef CONFIG_X86_LOCAL_APIC
  14.392 +    init_bsp_APIC();
  14.393 +#endif
  14.394 +    init_8259A(0);
  14.395 +
  14.396 +    for (i = 0; i < NR_IRQS; i++) {
  14.397 +        irq_desc[i].status = IRQ_DISABLED;
  14.398 +        irq_desc[i].action = 0;
  14.399 +        irq_desc[i].depth = 1;
  14.400 +
  14.401 +        if (i < 16) {
  14.402 +            /*
  14.403 +             * 16 old-style INTA-cycle interrupts:
  14.404 +             */
  14.405 +            irq_desc[i].handler = &i8259A_irq_type;
  14.406 +        } else {
  14.407 +            /*
  14.408 +             * 'high' PCI IRQs filled in on demand
  14.409 +             */
  14.410 +            irq_desc[i].handler = &no_irq_type;
  14.411 +        }
  14.412 +    }
  14.413 +}
  14.414 +
  14.415 +void __init init_IRQ(void)
  14.416 +{
  14.417 +    int i;
  14.418 +
  14.419 +    init_ISA_irqs();
  14.420 +
  14.421 +    /*
  14.422 +     * Cover the whole vector space, no vector can escape
  14.423 +     * us. (some of these will be overridden and become
  14.424 +     * 'special' SMP interrupts)
  14.425 +     */
  14.426 +    for (i = 0; i < NR_IRQS; i++) {
  14.427 +        int vector = FIRST_EXTERNAL_VECTOR + i;
  14.428 +        if (vector != HYPERVISOR_CALL_VECTOR) 
  14.429 +            set_intr_gate(vector, interrupt[i]);
  14.430 +    }
  14.431 +
  14.432 +#ifdef CONFIG_SMP
  14.433 +    /*
  14.434 +     * IRQ0 must be given a fixed assignment and initialized,
  14.435 +     * because it's used before the IO-APIC is set up.
  14.436 +     */
  14.437 +    set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
  14.438 +
  14.439 +    /*
  14.440 +     * The reschedule interrupt is a CPU-to-CPU reschedule-helper
  14.441 +     * IPI, driven by wakeup.
  14.442 +     */
  14.443 +    set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
  14.444 +
  14.445 +    /* IPI for invalidation */
  14.446 +    set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
  14.447 +
  14.448 +    /* IPI for generic function call */
  14.449 +    set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
  14.450 +#endif	
  14.451 +
  14.452 +#ifdef CONFIG_X86_LOCAL_APIC
  14.453 +    /* self generated IPI for local APIC timer */
  14.454 +    set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
  14.455 +
  14.456 +    /* IPI vectors for APIC spurious and error interrupts */
  14.457 +    set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
  14.458 +    set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
  14.459 +#endif
  14.460 +
  14.461 +    /*
  14.462 +     * Set the clock to HZ Hz, we already have a valid
  14.463 +     * vector now:
  14.464 +     */
  14.465 +#define CLOCK_TICK_RATE 1193180 /* crystal freq (Hz) */
  14.466 +#define LATCH (((CLOCK_TICK_RATE)+(HZ/2))/HZ)
  14.467 +    outb_p(0x34,0x43);		/* binary, mode 2, LSB/MSB, ch 0 */
  14.468 +    outb_p(LATCH & 0xff , 0x40);	/* LSB */
  14.469 +    outb(LATCH >> 8 , 0x40);	/* MSB */
  14.470 +
  14.471 +    setup_irq(2, &irq2);
  14.472 +}
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen-2.4.16/arch/i386/idle0_task.c	Wed Nov 20 12:02:17 2002 +0000
    15.3 @@ -0,0 +1,20 @@
    15.4 +#include <xeno/config.h>
    15.5 +#include <xeno/sched.h>
    15.6 +#include <asm/desc.h>
    15.7 +
    15.8 +/*
    15.9 + * Initial task structure. XXX KAF: To get this 8192-byte aligned without
   15.10 + * linker tricks I copy it into aligned BSS area at boot time.
   15.11 + * Actual name idle0_task_union now declared in boot.S.
   15.12 + */
   15.13 +struct task_struct first_task_struct = IDLE0_TASK(idle0_task_union.task);
   15.14 +
   15.15 +/*
   15.16 + * per-CPU TSS segments. Threads are completely 'soft' on Linux,
   15.17 + * no more per-task TSS's. The TSS size is kept cacheline-aligned
   15.18 + * so they are allowed to end up in the .data.cacheline_aligned
   15.19 + * section. Since TSS's are completely CPU-local, we want them
   15.20 + * on exact cacheline boundaries, to eliminate cacheline ping-pong.
   15.21 + */ 
   15.22 +struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS };
   15.23 +
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xen-2.4.16/arch/i386/io_apic.c	Wed Nov 20 12:02:17 2002 +0000
    16.3 @@ -0,0 +1,1487 @@
    16.4 +/*
    16.5 + *	Intel IO-APIC support for multi-Pentium hosts.
    16.6 + *
    16.7 + *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
    16.8 + *
    16.9 + *	Many thanks to Stig Venaas for trying out countless experimental
   16.10 + *	patches and reporting/debugging problems patiently!
   16.11 + *
   16.12 + *	(c) 1999, Multiple IO-APIC support, developed by
   16.13 + *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
   16.14 + *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
   16.15 + *	further tested and cleaned up by Zach Brown <zab@redhat.com>
   16.16 + *	and Ingo Molnar <mingo@redhat.com>
   16.17 + *
   16.18 + *	Fixes
   16.19 + *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
   16.20 + *					thanks to Eric Gilmore
   16.21 + *					and Rolf G. Tews
   16.22 + *					for testing these extensively
   16.23 + */
   16.24 +
   16.25 +#include <xeno/config.h>
   16.26 +#include <xeno/init.h>
   16.27 +#include <xeno/interrupt.h>
   16.28 +#include <xeno/irq.h>
   16.29 +#include <xeno/delay.h>
   16.30 +#include <xeno/sched.h>
   16.31 +#include <xeno/config.h>
   16.32 +#include <asm/mc146818rtc.h>
   16.33 +#include <asm/io.h>
   16.34 +#include <asm/desc.h>
   16.35 +#include <asm/smp.h>
   16.36 +
   16.37 +static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED;
   16.38 +
   16.39 +/*
   16.40 + * # of IRQ routing registers
   16.41 + */
   16.42 +int nr_ioapic_registers[MAX_IO_APICS];
   16.43 +
   16.44 +/*
   16.45 + * Rough estimation of how many shared IRQs there are, can
   16.46 + * be changed anytime.
   16.47 + */
   16.48 +#define MAX_PLUS_SHARED_IRQS NR_IRQS
   16.49 +#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
   16.50 +
   16.51 +/*
   16.52 + * This is performance-critical, we want to do it O(1)
   16.53 + * the indexing order of this array favors 1:1 mappings
   16.54 + * between pins and IRQs.
   16.55 + */
   16.56 +
   16.57 +static struct irq_pin_list {
   16.58 +	int apic, pin, next;
   16.59 +} irq_2_pin[PIN_MAP_SIZE];
   16.60 +
   16.61 +/*
   16.62 + * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
   16.63 + * shared ISA-space IRQs, so we have to support them. We are super
   16.64 + * fast in the common case, and fast for shared ISA-space IRQs.
   16.65 + */
   16.66 +static void add_pin_to_irq(unsigned int irq, int apic, int pin)
   16.67 +{
   16.68 +	static int first_free_entry = NR_IRQS;
   16.69 +	struct irq_pin_list *entry = irq_2_pin + irq;
   16.70 +
   16.71 +	while (entry->next)
   16.72 +		entry = irq_2_pin + entry->next;
   16.73 +
   16.74 +	if (entry->pin != -1) {
   16.75 +		entry->next = first_free_entry;
   16.76 +		entry = irq_2_pin + entry->next;
   16.77 +		if (++first_free_entry >= PIN_MAP_SIZE)
   16.78 +			panic("io_apic.c: whoops");
   16.79 +	}
   16.80 +	entry->apic = apic;
   16.81 +	entry->pin = pin;
   16.82 +}
   16.83 +
   16.84 +#define __DO_ACTION(R, ACTION, FINAL)					\
   16.85 +									\
   16.86 +{									\
   16.87 +	int pin;							\
   16.88 +	struct irq_pin_list *entry = irq_2_pin + irq;			\
   16.89 +									\
   16.90 +	for (;;) {							\
   16.91 +		unsigned int reg;					\
   16.92 +		pin = entry->pin;					\
   16.93 +		if (pin == -1)						\
   16.94 +			break;						\
   16.95 +		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
   16.96 +		reg ACTION;						\
   16.97 +		io_apic_modify(entry->apic, reg);			\
   16.98 +		if (!entry->next)					\
   16.99 +			break;						\
  16.100 +		entry = irq_2_pin + entry->next;			\
  16.101 +	}								\
  16.102 +	FINAL;								\
  16.103 +}
  16.104 +
  16.105 +#define DO_ACTION(name,R,ACTION, FINAL)					\
  16.106 +									\
  16.107 +	static void name##_IO_APIC_irq (unsigned int irq)		\
  16.108 +	__DO_ACTION(R, ACTION, FINAL)
  16.109 +
  16.110 +DO_ACTION( __mask,             0, |= 0x00010000, io_apic_sync(entry->apic) )
  16.111 +						/* mask = 1 */
  16.112 +DO_ACTION( __unmask,           0, &= 0xfffeffff, )
  16.113 +						/* mask = 0 */
  16.114 +DO_ACTION( __mask_and_edge,    0, = (reg & 0xffff7fff) | 0x00010000, )
  16.115 +						/* mask = 1, trigger = 0 */
  16.116 +DO_ACTION( __unmask_and_level, 0, = (reg & 0xfffeffff) | 0x00008000, )
  16.117 +						/* mask = 0, trigger = 1 */
  16.118 +
  16.119 +static void mask_IO_APIC_irq (unsigned int irq)
  16.120 +{
  16.121 +	unsigned long flags;
  16.122 +
  16.123 +	spin_lock_irqsave(&ioapic_lock, flags);
  16.124 +	__mask_IO_APIC_irq(irq);
  16.125 +	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.126 +}
  16.127 +
  16.128 +static void unmask_IO_APIC_irq (unsigned int irq)
  16.129 +{
  16.130 +	unsigned long flags;
  16.131 +
  16.132 +	spin_lock_irqsave(&ioapic_lock, flags);
  16.133 +	__unmask_IO_APIC_irq(irq);
  16.134 +	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.135 +}
  16.136 +
  16.137 +void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
  16.138 +{
  16.139 +	struct IO_APIC_route_entry entry;
  16.140 +	unsigned long flags;
  16.141 +
  16.142 +	/*
  16.143 +	 * Disable it in the IO-APIC irq-routing table:
  16.144 +	 */
  16.145 +	memset(&entry, 0, sizeof(entry));
  16.146 +	entry.mask = 1;
  16.147 +	spin_lock_irqsave(&ioapic_lock, flags);
  16.148 +	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
  16.149 +	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
  16.150 +	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.151 +}
  16.152 +
  16.153 +static void clear_IO_APIC (void)
  16.154 +{
  16.155 +	int apic, pin;
  16.156 +
  16.157 +	for (apic = 0; apic < nr_ioapics; apic++)
  16.158 +		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
  16.159 +			clear_IO_APIC_pin(apic, pin);
  16.160 +}
  16.161 +
  16.162 +/*
  16.163 + * Find the IRQ entry number of a certain pin.
  16.164 + */
  16.165 +static int __init find_irq_entry(int apic, int pin, int type)
  16.166 +{
  16.167 +	int i;
  16.168 +
  16.169 +	for (i = 0; i < mp_irq_entries; i++)
  16.170 +		if (mp_irqs[i].mpc_irqtype == type &&
  16.171 +		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
  16.172 +		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
  16.173 +		    mp_irqs[i].mpc_dstirq == pin)
  16.174 +			return i;
  16.175 +
  16.176 +	return -1;
  16.177 +}
  16.178 +
  16.179 +/*
  16.180 + * Find the pin to which IRQ[irq] (ISA) is connected
  16.181 + */
  16.182 +static int __init find_isa_irq_pin(int irq, int type)
  16.183 +{
  16.184 +	int i;
  16.185 +
  16.186 +	for (i = 0; i < mp_irq_entries; i++) {
  16.187 +		int lbus = mp_irqs[i].mpc_srcbus;
  16.188 +
  16.189 +		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
  16.190 +		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
  16.191 +		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
  16.192 +		    (mp_irqs[i].mpc_irqtype == type) &&
  16.193 +		    (mp_irqs[i].mpc_srcbusirq == irq))
  16.194 +
  16.195 +			return mp_irqs[i].mpc_dstirq;
  16.196 +	}
  16.197 +	return -1;
  16.198 +}
  16.199 +
  16.200 +/*
  16.201 + * Find a specific PCI IRQ entry.
  16.202 + * Not an __init, possibly needed by modules
  16.203 + */
  16.204 +static int pin_2_irq(int idx, int apic, int pin);
  16.205 +
  16.206 +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
  16.207 +{
  16.208 +	int apic, i, best_guess = -1;
  16.209 +
  16.210 +	Dprintk("querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
  16.211 +		bus, slot, pin);
  16.212 +	if (mp_bus_id_to_pci_bus[bus] == -1) {
  16.213 +		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
  16.214 +		return -1;
  16.215 +	}
  16.216 +	for (i = 0; i < mp_irq_entries; i++) {
  16.217 +		int lbus = mp_irqs[i].mpc_srcbus;
  16.218 +
  16.219 +		for (apic = 0; apic < nr_ioapics; apic++)
  16.220 +			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
  16.221 +			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
  16.222 +				break;
  16.223 +
  16.224 +		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
  16.225 +		    !mp_irqs[i].mpc_irqtype &&
  16.226 +		    (bus == lbus) &&
  16.227 +		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
  16.228 +			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
  16.229 +
  16.230 +			if (!(apic || IO_APIC_IRQ(irq)))
  16.231 +				continue;
  16.232 +
  16.233 +			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
  16.234 +				return irq;
  16.235 +			/*
  16.236 +			 * Use the first all-but-pin matching entry as a
  16.237 +			 * best-guess fuzzy result for broken mptables.
  16.238 +			 */
  16.239 +			if (best_guess < 0)
  16.240 +				best_guess = irq;
  16.241 +		}
  16.242 +	}
  16.243 +	return best_guess;
  16.244 +}
  16.245 +
  16.246 +/*
  16.247 + * EISA Edge/Level control register, ELCR
  16.248 + */
  16.249 +static int __init EISA_ELCR(unsigned int irq)
  16.250 +{
  16.251 +	if (irq < 16) {
  16.252 +		unsigned int port = 0x4d0 + (irq >> 3);
  16.253 +		return (inb(port) >> (irq & 7)) & 1;
  16.254 +	}
  16.255 +	printk(KERN_INFO "Broken MPtable reports ISA irq %d\n", irq);
  16.256 +	return 0;
  16.257 +}
  16.258 +
  16.259 +/* EISA interrupts are always polarity zero and can be edge or level
  16.260 + * trigger depending on the ELCR value.  If an interrupt is listed as
  16.261 + * EISA conforming in the MP table, that means its trigger type must
  16.262 + * be read in from the ELCR */
  16.263 +
  16.264 +#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
  16.265 +#define default_EISA_polarity(idx)	(0)
  16.266 +
  16.267 +/* ISA interrupts are always polarity zero edge triggered,
  16.268 + * when listed as conforming in the MP table. */
  16.269 +
  16.270 +#define default_ISA_trigger(idx)	(0)
  16.271 +#define default_ISA_polarity(idx)	(0)
  16.272 +
  16.273 +/* PCI interrupts are always polarity one level triggered,
  16.274 + * when listed as conforming in the MP table. */
  16.275 +
  16.276 +#define default_PCI_trigger(idx)	(1)
  16.277 +#define default_PCI_polarity(idx)	(1)
  16.278 +
  16.279 +/* MCA interrupts are always polarity zero level triggered,
  16.280 + * when listed as conforming in the MP table. */
  16.281 +
  16.282 +#define default_MCA_trigger(idx)	(1)
  16.283 +#define default_MCA_polarity(idx)	(0)
  16.284 +
  16.285 +static int __init MPBIOS_polarity(int idx)
  16.286 +{
  16.287 +	int bus = mp_irqs[idx].mpc_srcbus;
  16.288 +	int polarity;
  16.289 +
  16.290 +	/*
  16.291 +	 * Determine IRQ line polarity (high active or low active):
  16.292 +	 */
  16.293 +	switch (mp_irqs[idx].mpc_irqflag & 3)
  16.294 +	{
  16.295 +		case 0: /* conforms, ie. bus-type dependent polarity */
  16.296 +		{
  16.297 +			switch (mp_bus_id_to_type[bus])
  16.298 +			{
  16.299 +				case MP_BUS_ISA: /* ISA pin */
  16.300 +				{
  16.301 +					polarity = default_ISA_polarity(idx);
  16.302 +					break;
  16.303 +				}
  16.304 +				case MP_BUS_EISA: /* EISA pin */
  16.305 +				{
  16.306 +					polarity = default_EISA_polarity(idx);
  16.307 +					break;
  16.308 +				}
  16.309 +				case MP_BUS_PCI: /* PCI pin */
  16.310 +				{
  16.311 +					polarity = default_PCI_polarity(idx);
  16.312 +					break;
  16.313 +				}
  16.314 +				case MP_BUS_MCA: /* MCA pin */
  16.315 +				{
  16.316 +					polarity = default_MCA_polarity(idx);
  16.317 +					break;
  16.318 +				}
  16.319 +				default:
  16.320 +				{
  16.321 +					printk(KERN_WARNING "broken BIOS!!\n");
  16.322 +					polarity = 1;
  16.323 +					break;
  16.324 +				}
  16.325 +			}
  16.326 +			break;
  16.327 +		}
  16.328 +		case 1: /* high active */
  16.329 +		{
  16.330 +			polarity = 0;
  16.331 +			break;
  16.332 +		}
  16.333 +		case 2: /* reserved */
  16.334 +		{
  16.335 +			printk(KERN_WARNING "broken BIOS!!\n");
  16.336 +			polarity = 1;
  16.337 +			break;
  16.338 +		}
  16.339 +		case 3: /* low active */
  16.340 +		{
  16.341 +			polarity = 1;
  16.342 +			break;
  16.343 +		}
  16.344 +		default: /* invalid */
  16.345 +		{
  16.346 +			printk(KERN_WARNING "broken BIOS!!\n");
  16.347 +			polarity = 1;
  16.348 +			break;
  16.349 +		}
  16.350 +	}
  16.351 +	return polarity;
  16.352 +}
  16.353 +
  16.354 +static int __init MPBIOS_trigger(int idx)
  16.355 +{
  16.356 +	int bus = mp_irqs[idx].mpc_srcbus;
  16.357 +	int trigger;
  16.358 +
  16.359 +	/*
  16.360 +	 * Determine IRQ trigger mode (edge or level sensitive):
  16.361 +	 */
  16.362 +	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
  16.363 +	{
  16.364 +		case 0: /* conforms, ie. bus-type dependent */
  16.365 +		{
  16.366 +			switch (mp_bus_id_to_type[bus])
  16.367 +			{
  16.368 +				case MP_BUS_ISA: /* ISA pin */
  16.369 +				{
  16.370 +					trigger = default_ISA_trigger(idx);
  16.371 +					break;
  16.372 +				}
  16.373 +				case MP_BUS_EISA: /* EISA pin */
  16.374 +				{
  16.375 +					trigger = default_EISA_trigger(idx);
  16.376 +					break;
  16.377 +				}
  16.378 +				case MP_BUS_PCI: /* PCI pin */
  16.379 +				{
  16.380 +					trigger = default_PCI_trigger(idx);
  16.381 +					break;
  16.382 +				}
  16.383 +				case MP_BUS_MCA: /* MCA pin */
  16.384 +				{
  16.385 +					trigger = default_MCA_trigger(idx);
  16.386 +					break;
  16.387 +				}
  16.388 +				default:
  16.389 +				{
  16.390 +					printk(KERN_WARNING "broken BIOS!!\n");
  16.391 +					trigger = 1;
  16.392 +					break;
  16.393 +				}
  16.394 +			}
  16.395 +			break;
  16.396 +		}
  16.397 +		case 1: /* edge */
  16.398 +		{
  16.399 +			trigger = 0;
  16.400 +			break;
  16.401 +		}
  16.402 +		case 2: /* reserved */
  16.403 +		{
  16.404 +			printk(KERN_WARNING "broken BIOS!!\n");
  16.405 +			trigger = 1;
  16.406 +			break;
  16.407 +		}
  16.408 +		case 3: /* level */
  16.409 +		{
  16.410 +			trigger = 1;
  16.411 +			break;
  16.412 +		}
  16.413 +		default: /* invalid */
  16.414 +		{
  16.415 +			printk(KERN_WARNING "broken BIOS!!\n");
  16.416 +			trigger = 0;
  16.417 +			break;
  16.418 +		}
  16.419 +	}
  16.420 +	return trigger;
  16.421 +}
  16.422 +
  16.423 +static inline int irq_polarity(int idx)
  16.424 +{
  16.425 +	return MPBIOS_polarity(idx);
  16.426 +}
  16.427 +
  16.428 +static inline int irq_trigger(int idx)
  16.429 +{
  16.430 +	return MPBIOS_trigger(idx);
  16.431 +}
  16.432 +
  16.433 +static int pin_2_irq(int idx, int apic, int pin)
  16.434 +{
  16.435 +	int irq, i;
  16.436 +	int bus = mp_irqs[idx].mpc_srcbus;
  16.437 +
  16.438 +	/*
  16.439 +	 * Debugging check, we are in big trouble if this message pops up!
  16.440 +	 */
  16.441 +	if (mp_irqs[idx].mpc_dstirq != pin)
  16.442 +		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
  16.443 +
  16.444 +	switch (mp_bus_id_to_type[bus])
  16.445 +	{
  16.446 +		case MP_BUS_ISA: /* ISA pin */
  16.447 +		case MP_BUS_EISA:
  16.448 +		case MP_BUS_MCA:
  16.449 +		{
  16.450 +			irq = mp_irqs[idx].mpc_srcbusirq;
  16.451 +			break;
  16.452 +		}
  16.453 +		case MP_BUS_PCI: /* PCI pin */
  16.454 +		{
  16.455 +			/*
  16.456 +			 * PCI IRQs are mapped in order
  16.457 +			 */
  16.458 +			i = irq = 0;
  16.459 +			while (i < apic)
  16.460 +				irq += nr_ioapic_registers[i++];
  16.461 +			irq += pin;
  16.462 +			break;
  16.463 +		}
  16.464 +		default:
  16.465 +		{
  16.466 +			printk(KERN_ERR "unknown bus type %d.\n",bus); 
  16.467 +			irq = 0;
  16.468 +			break;
  16.469 +		}
  16.470 +	}
  16.471 +
  16.472 +	return irq;
  16.473 +}
  16.474 +
  16.475 +static inline int IO_APIC_irq_trigger(int irq)
  16.476 +{
  16.477 +	int apic, idx, pin;
  16.478 +
  16.479 +	for (apic = 0; apic < nr_ioapics; apic++) {
  16.480 +		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  16.481 +			idx = find_irq_entry(apic,pin,mp_INT);
  16.482 +			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
  16.483 +				return irq_trigger(idx);
  16.484 +		}
  16.485 +	}
  16.486 +	/*
  16.487 +	 * nonexistent IRQs are edge default
  16.488 +	 */
  16.489 +	return 0;
  16.490 +}
  16.491 +
  16.492 +int irq_vector[NR_IRQS] = { FIRST_DEVICE_VECTOR , 0 };
  16.493 +
  16.494 +static int __init assign_irq_vector(int irq)
  16.495 +{
  16.496 +	static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
  16.497 +	if (IO_APIC_VECTOR(irq) > 0)
  16.498 +		return IO_APIC_VECTOR(irq);
  16.499 +next:
  16.500 +	current_vector += 8;
  16.501 +	if (current_vector == HYPERVISOR_CALL_VECTOR)
  16.502 +		goto next;
  16.503 +
  16.504 +	if (current_vector > FIRST_SYSTEM_VECTOR) {
  16.505 +		offset++;
  16.506 +		current_vector = FIRST_DEVICE_VECTOR + offset;
  16.507 +	}
  16.508 +
  16.509 +	if (current_vector == FIRST_SYSTEM_VECTOR)
  16.510 +		panic("ran out of interrupt sources!");
  16.511 +
  16.512 +	IO_APIC_VECTOR(irq) = current_vector;
  16.513 +	return current_vector;
  16.514 +}
  16.515 +
  16.516 +extern void (*interrupt[NR_IRQS])(void);
  16.517 +static struct hw_interrupt_type ioapic_level_irq_type;
  16.518 +static struct hw_interrupt_type ioapic_edge_irq_type;
  16.519 +
  16.520 +void __init setup_IO_APIC_irqs(void)
  16.521 +{
  16.522 +	struct IO_APIC_route_entry entry;
  16.523 +	int apic, pin, idx, irq, first_notcon = 1, vector;
  16.524 +	unsigned long flags;
  16.525 +
  16.526 +	printk(KERN_DEBUG "init IO_APIC IRQs\n");
  16.527 +
  16.528 +	for (apic = 0; apic < nr_ioapics; apic++) {
  16.529 +	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  16.530 +
  16.531 +		/*
  16.532 +		 * add it to the IO-APIC irq-routing table:
  16.533 +		 */
  16.534 +		memset(&entry,0,sizeof(entry));
  16.535 +
  16.536 +		entry.delivery_mode = dest_LowestPrio;
  16.537 +		entry.dest_mode = INT_DELIVERY_MODE;
  16.538 +		entry.mask = 0;				/* enable IRQ */
  16.539 +		entry.dest.logical.logical_dest = TARGET_CPUS;
  16.540 +
  16.541 +		idx = find_irq_entry(apic,pin,mp_INT);
  16.542 +		if (idx == -1) {
  16.543 +			if (first_notcon) {
  16.544 +				printk(KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
  16.545 +				first_notcon = 0;
  16.546 +			} else
  16.547 +				printk(", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
  16.548 +			continue;
  16.549 +		}
  16.550 +
  16.551 +		entry.trigger = irq_trigger(idx);
  16.552 +		entry.polarity = irq_polarity(idx);
  16.553 +
  16.554 +		if (irq_trigger(idx)) {
  16.555 +			entry.trigger = 1;
  16.556 +			entry.mask = 1;
  16.557 +			entry.dest.logical.logical_dest = TARGET_CPUS;
  16.558 +		}
  16.559 +
  16.560 +		irq = pin_2_irq(idx, apic, pin);
  16.561 +		add_pin_to_irq(irq, apic, pin);
  16.562 +
  16.563 +		if (!apic && !IO_APIC_IRQ(irq))
  16.564 +			continue;
  16.565 +
  16.566 +		if (IO_APIC_IRQ(irq)) {
  16.567 +			vector = assign_irq_vector(irq);
  16.568 +			entry.vector = vector;
  16.569 +
  16.570 +			if (IO_APIC_irq_trigger(irq))
  16.571 +				irq_desc[irq].handler = &ioapic_level_irq_type;
  16.572 +			else
  16.573 +				irq_desc[irq].handler = &ioapic_edge_irq_type;
  16.574 +
  16.575 +			set_intr_gate(vector, interrupt[irq]);
  16.576 +		
  16.577 +			if (!apic && (irq < 16))
  16.578 +				disable_8259A_irq(irq);
  16.579 +		}
  16.580 +		spin_lock_irqsave(&ioapic_lock, flags);
  16.581 +		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
  16.582 +		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
  16.583 +		spin_unlock_irqrestore(&ioapic_lock, flags);
  16.584 +	}
  16.585 +	}
  16.586 +
  16.587 +	if (!first_notcon)
  16.588 +		printk(" not connected.\n");
  16.589 +}
  16.590 +
  16.591 +/*
  16.592 + * Set up the 8259A-master output pin as broadcast to all
  16.593 + * CPUs.
  16.594 + */
  16.595 +void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
  16.596 +{
  16.597 +	struct IO_APIC_route_entry entry;
  16.598 +	unsigned long flags;
  16.599 +
  16.600 +	memset(&entry,0,sizeof(entry));
  16.601 +
  16.602 +	disable_8259A_irq(0);
  16.603 +
  16.604 +	/* mask LVT0 */
  16.605 +	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
  16.606 +
  16.607 +	/*
  16.608 +	 * We use logical delivery to get the timer IRQ
  16.609 +	 * to the first CPU.
  16.610 +	 */
  16.611 +	entry.dest_mode = INT_DELIVERY_MODE;
  16.612 +	entry.mask = 0;					/* unmask IRQ now */
  16.613 +	entry.dest.logical.logical_dest = TARGET_CPUS;
  16.614 +	entry.delivery_mode = dest_LowestPrio;
  16.615 +	entry.polarity = 0;
  16.616 +	entry.trigger = 0;
  16.617 +	entry.vector = vector;
  16.618 +
  16.619 +	/*
  16.620 +	 * The timer IRQ doesnt have to know that behind the
  16.621 +	 * scene we have a 8259A-master in AEOI mode ...
  16.622 +	 */
  16.623 +	irq_desc[0].handler = &ioapic_edge_irq_type;
  16.624 +
  16.625 +	/*
  16.626 +	 * Add it to the IO-APIC irq-routing table:
  16.627 +	 */
  16.628 +	spin_lock_irqsave(&ioapic_lock, flags);
  16.629 +	io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
  16.630 +	io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
  16.631 +	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.632 +
  16.633 +	enable_8259A_irq(0);
  16.634 +}
  16.635 +
  16.636 +void __init UNEXPECTED_IO_APIC(void)
  16.637 +{
  16.638 +	printk(KERN_WARNING " WARNING: unexpected IO-APIC, please mail\n");
  16.639 +	printk(KERN_WARNING "          to linux-smp@vger.kernel.org\n");
  16.640 +}
  16.641 +
  16.642 +void __init print_IO_APIC(void)
  16.643 +{
  16.644 +	int apic, i;
  16.645 +	struct IO_APIC_reg_00 reg_00;
  16.646 +	struct IO_APIC_reg_01 reg_01;
  16.647 +	struct IO_APIC_reg_02 reg_02;
  16.648 +	unsigned long flags;
  16.649 +
  16.650 + 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
  16.651 +	for (i = 0; i < nr_ioapics; i++)
  16.652 +		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
  16.653 +		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
  16.654 +
  16.655 +	/*
  16.656 +	 * We are a bit conservative about what we expect.  We have to
  16.657 +	 * know about every hardware change ASAP.
  16.658 +	 */
  16.659 +	printk(KERN_INFO "testing the IO APIC.......................\n");
  16.660 +
  16.661 +	for (apic = 0; apic < nr_ioapics; apic++) {
  16.662 +
  16.663 +	spin_lock_irqsave(&ioapic_lock, flags);
  16.664 +	*(int *)&reg_00 = io_apic_read(apic, 0);
  16.665 +	*(int *)&reg_01 = io_apic_read(apic, 1);
  16.666 +	if (reg_01.version >= 0x10)
  16.667 +		*(int *)&reg_02 = io_apic_read(apic, 2);
  16.668 +	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.669 +
  16.670 +	printk("\n");
  16.671 +	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
  16.672 +	printk(KERN_DEBUG ".... register #00: %08X\n", *(int *)&reg_00);
  16.673 +	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.ID);
  16.674 +	if (reg_00.__reserved_1 || reg_00.__reserved_2)
  16.675 +		UNEXPECTED_IO_APIC();
  16.676 +
  16.677 +	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
  16.678 +	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.entries);
  16.679 +	if (	(reg_01.entries != 0x0f) && /* older (Neptune) boards */
  16.680 +		(reg_01.entries != 0x17) && /* typical ISA+PCI boards */
  16.681 +		(reg_01.entries != 0x1b) && /* Compaq Proliant boards */
  16.682 +		(reg_01.entries != 0x1f) && /* dual Xeon boards */
  16.683 +		(reg_01.entries != 0x22) && /* bigger Xeon boards */
  16.684 +		(reg_01.entries != 0x2E) &&
  16.685 +		(reg_01.entries != 0x3F)
  16.686 +	)
  16.687 +		UNEXPECTED_IO_APIC();
  16.688 +
  16.689 +	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.PRQ);
  16.690 +	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.version);
  16.691 +	if (	(reg_01.version != 0x01) && /* 82489DX IO-APICs */
  16.692 +		(reg_01.version != 0x10) && /* oldest IO-APICs */
  16.693 +		(reg_01.version != 0x11) && /* Pentium/Pro IO-APICs */
  16.694 +		(reg_01.version != 0x13) && /* Xeon IO-APICs */
  16.695 +		(reg_01.version != 0x20)    /* Intel P64H (82806 AA) */
  16.696 +	)
  16.697 +		UNEXPECTED_IO_APIC();
  16.698 +	if (reg_01.__reserved_1 || reg_01.__reserved_2)
  16.699 +		UNEXPECTED_IO_APIC();
  16.700 +
  16.701 +	if (reg_01.version >= 0x10) {
  16.702 +		printk(KERN_DEBUG ".... register #02: %08X\n", *(int *)&reg_02);
  16.703 +		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.arbitration);
  16.704 +		if (reg_02.__reserved_1 || reg_02.__reserved_2)
  16.705 +			UNEXPECTED_IO_APIC();
  16.706 +	}
  16.707 +
  16.708 +	printk(KERN_DEBUG ".... IRQ redirection table:\n");
  16.709 +
  16.710 +	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
  16.711 +			  " Stat Dest Deli Vect:   \n");
  16.712 +
  16.713 +	for (i = 0; i <= reg_01.entries; i++) {
  16.714 +		struct IO_APIC_route_entry entry;
  16.715 +
  16.716 +		spin_lock_irqsave(&ioapic_lock, flags);
  16.717 +		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
  16.718 +		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
  16.719 +		spin_unlock_irqrestore(&ioapic_lock, flags);
  16.720 +
  16.721 +		printk(KERN_DEBUG " %02x %03X %02X  ",
  16.722 +			i,
  16.723 +			entry.dest.logical.logical_dest,
  16.724 +			entry.dest.physical.physical_dest
  16.725 +		);
  16.726 +
  16.727 +		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
  16.728 +			entry.mask,
  16.729 +			entry.trigger,
  16.730 +			entry.irr,
  16.731 +			entry.polarity,
  16.732 +			entry.delivery_status,
  16.733 +			entry.dest_mode,
  16.734 +			entry.delivery_mode,
  16.735 +			entry.vector
  16.736 +		);
  16.737 +	}
  16.738 +	}
  16.739 +	printk(KERN_DEBUG "IRQ to pin mappings:\n");
  16.740 +	for (i = 0; i < NR_IRQS; i++) {
  16.741 +		struct irq_pin_list *entry = irq_2_pin + i;
  16.742 +		if (entry->pin < 0)
  16.743 +			continue;
  16.744 +		printk(KERN_DEBUG "IRQ%d ", i);
  16.745 +		for (;;) {
  16.746 +			printk("-> %d:%d", entry->apic, entry->pin);
  16.747 +			if (!entry->next)
  16.748 +				break;
  16.749 +			entry = irq_2_pin + entry->next;
  16.750 +		}
  16.751 +		printk("\n");
  16.752 +	}
  16.753 +
  16.754 +	printk(KERN_INFO ".................................... done.\n");
  16.755 +
  16.756 +	return;
  16.757 +}
  16.758 +
  16.759 +static void print_APIC_bitfield (int base)
  16.760 +{
  16.761 +	unsigned int v;
  16.762 +	int i, j;
  16.763 +
  16.764 +	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
  16.765 +	for (i = 0; i < 8; i++) {
  16.766 +		v = apic_read(base + i*0x10);
  16.767 +		for (j = 0; j < 32; j++) {
  16.768 +			if (v & (1<<j))
  16.769 +				printk("1");
  16.770 +			else
  16.771 +				printk("0");
  16.772 +		}
  16.773 +		printk("\n");
  16.774 +	}
  16.775 +}
  16.776 +
  16.777 +void /*__init*/ print_local_APIC(void * dummy)
  16.778 +{
  16.779 +	unsigned int v, ver, maxlvt;
  16.780 +
  16.781 +	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
  16.782 +		smp_processor_id(), hard_smp_processor_id());
  16.783 +	v = apic_read(APIC_ID);
  16.784 +	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
  16.785 +	v = apic_read(APIC_LVR);
  16.786 +	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
  16.787 +	ver = GET_APIC_VERSION(v);
  16.788 +	maxlvt = get_maxlvt();
  16.789 +
  16.790 +	v = apic_read(APIC_TASKPRI);
  16.791 +	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
  16.792 +
  16.793 +	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
  16.794 +		v = apic_read(APIC_ARBPRI);
  16.795 +		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
  16.796 +			v & APIC_ARBPRI_MASK);
  16.797 +		v = apic_read(APIC_PROCPRI);
  16.798 +		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
  16.799 +	}
  16.800 +
  16.801 +	v = apic_read(APIC_EOI);
  16.802 +	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
  16.803 +	v = apic_read(APIC_RRR);
  16.804 +	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
  16.805 +	v = apic_read(APIC_LDR);
  16.806 +	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
  16.807 +	v = apic_read(APIC_DFR);
  16.808 +	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
  16.809 +	v = apic_read(APIC_SPIV);
  16.810 +	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
  16.811 +
  16.812 +	printk(KERN_DEBUG "... APIC ISR field:\n");
  16.813 +	print_APIC_bitfield(APIC_ISR);
  16.814 +	printk(KERN_DEBUG "... APIC TMR field:\n");
  16.815 +	print_APIC_bitfield(APIC_TMR);
  16.816 +	printk(KERN_DEBUG "... APIC IRR field:\n");
  16.817 +	print_APIC_bitfield(APIC_IRR);
  16.818 +
  16.819 +	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
  16.820 +		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
  16.821 +			apic_write(APIC_ESR, 0);
  16.822 +		v = apic_read(APIC_ESR);
  16.823 +		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
  16.824 +	}
  16.825 +
  16.826 +	v = apic_read(APIC_ICR);
  16.827 +	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
  16.828 +	v = apic_read(APIC_ICR2);
  16.829 +	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
  16.830 +
  16.831 +	v = apic_read(APIC_LVTT);
  16.832 +	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
  16.833 +
  16.834 +	if (maxlvt > 3) {                       /* PC is LVT#4. */
  16.835 +		v = apic_read(APIC_LVTPC);
  16.836 +		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
  16.837 +	}
  16.838 +	v = apic_read(APIC_LVT0);
  16.839 +	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
  16.840 +	v = apic_read(APIC_LVT1);
  16.841 +	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
  16.842 +
  16.843 +	if (maxlvt > 2) {			/* ERR is LVT#3. */
  16.844 +		v = apic_read(APIC_LVTERR);
  16.845 +		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
  16.846 +	}
  16.847 +
  16.848 +	v = apic_read(APIC_TMICT);
  16.849 +	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
  16.850 +	v = apic_read(APIC_TMCCT);
  16.851 +	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
  16.852 +	v = apic_read(APIC_TDCR);
  16.853 +	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
  16.854 +	printk("\n");
  16.855 +}
  16.856 +
  16.857 +void print_all_local_APICs (void)
  16.858 +{
  16.859 +	smp_call_function(print_local_APIC, NULL, 1, 1);
  16.860 +	print_local_APIC(NULL);
  16.861 +}
  16.862 +
  16.863 +void /*__init*/ print_PIC(void)
  16.864 +{
  16.865 +	extern spinlock_t i8259A_lock;
  16.866 +	unsigned int v, flags;
  16.867 +
  16.868 +	printk(KERN_DEBUG "\nprinting PIC contents\n");
  16.869 +
  16.870 +	spin_lock_irqsave(&i8259A_lock, flags);
  16.871 +
  16.872 +	v = inb(0xa1) << 8 | inb(0x21);
  16.873 +	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
  16.874 +
  16.875 +	v = inb(0xa0) << 8 | inb(0x20);
  16.876 +	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
  16.877 +
  16.878 +	outb(0x0b,0xa0);
  16.879 +	outb(0x0b,0x20);
  16.880 +	v = inb(0xa0) << 8 | inb(0x20);
  16.881 +	outb(0x0a,0xa0);
  16.882 +	outb(0x0a,0x20);
  16.883 +
  16.884 +	spin_unlock_irqrestore(&i8259A_lock, flags);
  16.885 +
  16.886 +	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
  16.887 +
  16.888 +	v = inb(0x4d1) << 8 | inb(0x4d0);
  16.889 +	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
  16.890 +}
  16.891 +
  16.892 +static void __init enable_IO_APIC(void)
  16.893 +{
  16.894 +	struct IO_APIC_reg_01 reg_01;
  16.895 +	int i;
  16.896 +	unsigned long flags;
  16.897 +
  16.898 +	for (i = 0; i < PIN_MAP_SIZE; i++) {
  16.899 +		irq_2_pin[i].pin = -1;
  16.900 +		irq_2_pin[i].next = 0;
  16.901 +	}
  16.902 +
  16.903 +	/*
  16.904 +	 * The number of IO-APIC IRQ registers (== #pins):
  16.905 +	 */
  16.906 +	for (i = 0; i < nr_ioapics; i++) {
  16.907 +		spin_lock_irqsave(&ioapic_lock, flags);
  16.908 +		*(int *)&reg_01 = io_apic_read(i, 1);
  16.909 +		spin_unlock_irqrestore(&ioapic_lock, flags);
  16.910 +		nr_ioapic_registers[i] = reg_01.entries+1;
  16.911 +	}
  16.912 +
  16.913 +	/*
  16.914 +	 * Do not trust the IO-APIC being empty at bootup
  16.915 +	 */
  16.916 +	clear_IO_APIC();
  16.917 +}
  16.918 +
  16.919 +/*
  16.920 + * Not an __init, needed by the reboot code
  16.921 + */
  16.922 +void disable_IO_APIC(void)
  16.923 +{
  16.924 +	/*
  16.925 +	 * Clear the IO-APIC before rebooting:
  16.926 +	 */
  16.927 +	clear_IO_APIC();
  16.928 +
  16.929 +	disconnect_bsp_APIC();
  16.930 +}
  16.931 +
  16.932 +/*
  16.933 + * function to set the IO-APIC physical IDs based on the
  16.934 + * values stored in the MPC table.
  16.935 + *
  16.936 + * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
  16.937 + */
  16.938 +
  16.939 +static void __init setup_ioapic_ids_from_mpc (void)
  16.940 +{
  16.941 +	struct IO_APIC_reg_00 reg_00;
  16.942 +	unsigned long phys_id_present_map = phys_cpu_present_map;
  16.943 +	int apic;
  16.944 +	int i;
  16.945 +	unsigned char old_id;
  16.946 +	unsigned long flags;
  16.947 +
  16.948 +	/*
  16.949 +	 * Set the IOAPIC ID to the value stored in the MPC table.
  16.950 +	 */
  16.951 +	for (apic = 0; apic < nr_ioapics; apic++) {
  16.952 +
  16.953 +		/* Read the register 0 value */
  16.954 +		spin_lock_irqsave(&ioapic_lock, flags);
  16.955 +		*(int *)&reg_00 = io_apic_read(apic, 0);
  16.956 +		spin_unlock_irqrestore(&ioapic_lock, flags);
  16.957 +		
  16.958 +		old_id = mp_ioapics[apic].mpc_apicid;
  16.959 +
  16.960 +		if (mp_ioapics[apic].mpc_apicid >= 0xf) {
  16.961 +			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
  16.962 +				apic, mp_ioapics[apic].mpc_apicid);
  16.963 +			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
  16.964 +				reg_00.ID);
  16.965 +			mp_ioapics[apic].mpc_apicid = reg_00.ID;
  16.966 +		}
  16.967 +
  16.968 +		/*
  16.969 +		 * Sanity check, is the ID really free? Every APIC in a
  16.970 +		 * system must have a unique ID or we get lots of nice
  16.971 +		 * 'stuck on smp_invalidate_needed IPI wait' messages.
  16.972 +		 */
  16.973 +		if (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid)) {
  16.974 +			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
  16.975 +				apic, mp_ioapics[apic].mpc_apicid);
  16.976 +			for (i = 0; i < 0xf; i++)
  16.977 +				if (!(phys_id_present_map & (1 << i)))
  16.978 +					break;
  16.979 +			if (i >= 0xf)
  16.980 +				panic("Max APIC ID exceeded!\n");
  16.981 +			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
  16.982 +				i);
  16.983 +			phys_id_present_map |= 1 << i;
  16.984 +			mp_ioapics[apic].mpc_apicid = i;
  16.985 +		} else {
  16.986 +			printk("Setting %d in the phys_id_present_map\n", mp_ioapics[apic].mpc_apicid);
  16.987 +			phys_id_present_map |= 1 << mp_ioapics[apic].mpc_apicid;
  16.988 +		}
  16.989 +
  16.990 +
  16.991 +		/*
  16.992 +		 * We need to adjust the IRQ routing table
  16.993 +		 * if the ID changed.
  16.994 +		 */
  16.995 +		if (old_id != mp_ioapics[apic].mpc_apicid)
  16.996 +			for (i = 0; i < mp_irq_entries; i++)
  16.997 +				if (mp_irqs[i].mpc_dstapic == old_id)
  16.998 +					mp_irqs[i].mpc_dstapic
  16.999 +						= mp_ioapics[apic].mpc_apicid;
 16.1000 +
 16.1001 +		/*
 16.1002 +		 * Read the right value from the MPC table and
 16.1003 +		 * write it into the ID register.
 16.1004 +	 	 */
 16.1005 +		printk(KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
 16.1006 +					mp_ioapics[apic].mpc_apicid);
 16.1007 +
 16.1008 +		reg_00.ID = mp_ioapics[apic].mpc_apicid;
 16.1009 +		spin_lock_irqsave(&ioapic_lock, flags);
 16.1010 +		io_apic_write(apic, 0, *(int *)&reg_00);
 16.1011 +		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1012 +
 16.1013 +		/*
 16.1014 +		 * Sanity check
 16.1015 +		 */
 16.1016 +		spin_lock_irqsave(&ioapic_lock, flags);
 16.1017 +		*(int *)&reg_00 = io_apic_read(apic, 0);
 16.1018 +		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1019 +		if (reg_00.ID != mp_ioapics[apic].mpc_apicid)
 16.1020 +			panic("could not set ID!\n");
 16.1021 +		else
 16.1022 +			printk(" ok.\n");
 16.1023 +	}
 16.1024 +}
 16.1025 +
 16.1026 +/*
 16.1027 + * There is a nasty bug in some older SMP boards, their mptable lies
 16.1028 + * about the timer IRQ. We do the following to work around the situation:
 16.1029 + *
 16.1030 + *	- timer IRQ defaults to IO-APIC IRQ
 16.1031 + *	- if this function detects that timer IRQs are defunct, then we fall
 16.1032 + *	  back to ISA timer IRQs
 16.1033 + */
 16.1034 +static int __init timer_irq_works(void)
 16.1035 +{
 16.1036 +	unsigned int t1 = jiffies;
 16.1037 +
 16.1038 +	sti();
 16.1039 +	/* Let ten ticks pass... */
 16.1040 +	mdelay((10 * 1000) / HZ);
 16.1041 +
 16.1042 +	/*
 16.1043 +	 * Expect a few ticks at least, to be sure some possible
 16.1044 +	 * glue logic does not lock up after one or two first
 16.1045 +	 * ticks in a non-ExtINT mode.  Also the local APIC
 16.1046 +	 * might have cached one ExtINT interrupt.  Finally, at
 16.1047 +	 * least one tick may be lost due to delays.
 16.1048 +	 */
 16.1049 +	if (jiffies - t1 > 4)
 16.1050 +		return 1;
 16.1051 +
 16.1052 +	return 0;
 16.1053 +}
 16.1054 +
 16.1055 +/*
 16.1056 + * In the SMP+IOAPIC case it might happen that there are an unspecified
 16.1057 + * number of pending IRQ events unhandled. These cases are very rare,
 16.1058 + * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
 16.1059 + * better to do it this way as thus we do not have to be aware of
 16.1060 + * 'pending' interrupts in the IRQ path, except at this point.
 16.1061 + */
 16.1062 +/*
 16.1063 + * Edge triggered needs to resend any interrupt
 16.1064 + * that was delayed but this is now handled in the device
 16.1065 + * independent code.
 16.1066 + */
 16.1067 +#define enable_edge_ioapic_irq unmask_IO_APIC_irq
 16.1068 +
 16.1069 +static void disable_edge_ioapic_irq (unsigned int irq) { /* nothing */ }
 16.1070 +
 16.1071 +/*
 16.1072 + * Starting up a edge-triggered IO-APIC interrupt is
 16.1073 + * nasty - we need to make sure that we get the edge.
 16.1074 + * If it is already asserted for some reason, we need
 16.1075 + * return 1 to indicate that is was pending.
 16.1076 + *
 16.1077 + * This is not complete - we should be able to fake
 16.1078 + * an edge even if it isn't on the 8259A...
 16.1079 + */
 16.1080 +
 16.1081 +static unsigned int startup_edge_ioapic_irq(unsigned int irq)
 16.1082 +{
 16.1083 +	int was_pending = 0;
 16.1084 +	unsigned long flags;
 16.1085 +
 16.1086 +	spin_lock_irqsave(&ioapic_lock, flags);
 16.1087 +	if (irq < 16) {
 16.1088 +		disable_8259A_irq(irq);
 16.1089 +		if (i8259A_irq_pending(irq))
 16.1090 +			was_pending = 1;
 16.1091 +	}
 16.1092 +	__unmask_IO_APIC_irq(irq);
 16.1093 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1094 +
 16.1095 +	return was_pending;
 16.1096 +}
 16.1097 +
 16.1098 +#define shutdown_edge_ioapic_irq	disable_edge_ioapic_irq
 16.1099 +
 16.1100 +/*
 16.1101 + * Once we have recorded IRQ_PENDING already, we can mask the
 16.1102 + * interrupt for real. This prevents IRQ storms from unhandled
 16.1103 + * devices.
 16.1104 + */
 16.1105 +static void ack_edge_ioapic_irq(unsigned int irq)
 16.1106 +{
 16.1107 +	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
 16.1108 +					== (IRQ_PENDING | IRQ_DISABLED))
 16.1109 +		mask_IO_APIC_irq(irq);
 16.1110 +	ack_APIC_irq();
 16.1111 +}
 16.1112 +
 16.1113 +static void end_edge_ioapic_irq (unsigned int i) { /* nothing */ }
 16.1114 +
 16.1115 +
 16.1116 +/*
 16.1117 + * Level triggered interrupts can just be masked,
 16.1118 + * and shutting down and starting up the interrupt
 16.1119 + * is the same as enabling and disabling them -- except
 16.1120 + * with a startup need to return a "was pending" value.
 16.1121 + *
 16.1122 + * Level triggered interrupts are special because we
 16.1123 + * do not touch any IO-APIC register while handling
 16.1124 + * them. We ack the APIC in the end-IRQ handler, not
 16.1125 + * in the start-IRQ-handler. Protection against reentrance
 16.1126 + * from the same interrupt is still provided, both by the
 16.1127 + * generic IRQ layer and by the fact that an unacked local
 16.1128 + * APIC does not accept IRQs.
 16.1129 + */
 16.1130 +static unsigned int startup_level_ioapic_irq (unsigned int irq)
 16.1131 +{
 16.1132 +	unmask_IO_APIC_irq(irq);
 16.1133 +
 16.1134 +	return 0; /* don't check for pending */
 16.1135 +}
 16.1136 +
 16.1137 +#define shutdown_level_ioapic_irq	mask_IO_APIC_irq
 16.1138 +#define enable_level_ioapic_irq		unmask_IO_APIC_irq
 16.1139 +#define disable_level_ioapic_irq	mask_IO_APIC_irq
 16.1140 +
 16.1141 +static void end_level_ioapic_irq (unsigned int irq)
 16.1142 +{
 16.1143 +	unsigned long v;
 16.1144 +	int i;
 16.1145 +
 16.1146 +/*
 16.1147 + * It appears there is an erratum which affects at least version 0x11
 16.1148 + * of I/O APIC (that's the 82093AA and cores integrated into various
 16.1149 + * chipsets).  Under certain conditions a level-triggered interrupt is
 16.1150 + * erroneously delivered as edge-triggered one but the respective IRR
 16.1151 + * bit gets set nevertheless.  As a result the I/O unit expects an EOI
 16.1152 + * message but it will never arrive and further interrupts are blocked
 16.1153 + * from the source.  The exact reason is so far unknown, but the
 16.1154 + * phenomenon was observed when two consecutive interrupt requests
 16.1155 + * from a given source get delivered to the same CPU and the source is
 16.1156 + * temporarily disabled in between.
 16.1157 + *
 16.1158 + * A workaround is to simulate an EOI message manually.  We achieve it
 16.1159 + * by setting the trigger mode to edge and then to level when the edge
 16.1160 + * trigger mode gets detected in the TMR of a local APIC for a
 16.1161 + * level-triggered interrupt.  We mask the source for the time of the
 16.1162 + * operation to prevent an edge-triggered interrupt escaping meanwhile.
 16.1163 + * The idea is from Manfred Spraul.  --macro
 16.1164 + */
 16.1165 +	i = IO_APIC_VECTOR(irq);
 16.1166 +	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
 16.1167 +
 16.1168 +	ack_APIC_irq();
 16.1169 +
 16.1170 +	if (!(v & (1 << (i & 0x1f)))) {
 16.1171 +#ifdef APIC_LOCKUP_DEBUG
 16.1172 +		struct irq_pin_list *entry;
 16.1173 +#endif
 16.1174 +		spin_lock(&ioapic_lock);
 16.1175 +		__mask_and_edge_IO_APIC_irq(irq);
 16.1176 +#ifdef APIC_LOCKUP_DEBUG
 16.1177 +		for (entry = irq_2_pin + irq;;) {
 16.1178 +			unsigned int reg;
 16.1179 +
 16.1180 +			if (entry->pin == -1)
 16.1181 +				break;
 16.1182 +			reg = io_apic_read(entry->apic, 0x10 + entry->pin * 2);
 16.1183 +			if (reg & 0x00004000)
 16.1184 +				printk(KERN_CRIT "Aieee!!!  Remote IRR"
 16.1185 +					" still set after unlock!\n");
 16.1186 +			if (!entry->next)
 16.1187 +				break;
 16.1188 +			entry = irq_2_pin + entry->next;
 16.1189 +		}
 16.1190 +#endif
 16.1191 +		__unmask_and_level_IO_APIC_irq(irq);
 16.1192 +		spin_unlock(&ioapic_lock);
 16.1193 +	}
 16.1194 +}
 16.1195 +
 16.1196 +static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ }
 16.1197 +
 16.1198 +static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
 16.1199 +{
 16.1200 +	unsigned long flags;
 16.1201 +	/*
 16.1202 +	 * Only the first 8 bits are valid.
 16.1203 +	 */
 16.1204 +	mask = mask << 24;
 16.1205 +
 16.1206 +	spin_lock_irqsave(&ioapic_lock, flags);
 16.1207 +	__DO_ACTION(1, = mask, )
 16.1208 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1209 +}
 16.1210 +
 16.1211 +/*
 16.1212 + * Level and edge triggered IO-APIC interrupts need different handling,
 16.1213 + * so we use two separate IRQ descriptors. Edge triggered IRQs can be
 16.1214 + * handled with the level-triggered descriptor, but that one has slightly
 16.1215 + * more overhead. Level-triggered interrupts cannot be handled with the
 16.1216 + * edge-triggered handler, without risking IRQ storms and other ugly
 16.1217 + * races.
 16.1218 + */
 16.1219 +
 16.1220 +static struct hw_interrupt_type ioapic_edge_irq_type = {
 16.1221 +	"IO-APIC-edge",
 16.1222 +	startup_edge_ioapic_irq,
 16.1223 +	shutdown_edge_ioapic_irq,
 16.1224 +	enable_edge_ioapic_irq,
 16.1225 +	disable_edge_ioapic_irq,
 16.1226 +	ack_edge_ioapic_irq,
 16.1227 +	end_edge_ioapic_irq,
 16.1228 +	set_ioapic_affinity,
 16.1229 +};
 16.1230 +
 16.1231 +static struct hw_interrupt_type ioapic_level_irq_type = {
 16.1232 +	"IO-APIC-level",
 16.1233 +	startup_level_ioapic_irq,
 16.1234 +	shutdown_level_ioapic_irq,
 16.1235 +	enable_level_ioapic_irq,
 16.1236 +	disable_level_ioapic_irq,
 16.1237 +	mask_and_ack_level_ioapic_irq,
 16.1238 +	end_level_ioapic_irq,
 16.1239 +	set_ioapic_affinity,
 16.1240 +};
 16.1241 +
 16.1242 +static inline void init_IO_APIC_traps(void)
 16.1243 +{
 16.1244 +	int irq;
 16.1245 +
 16.1246 +	/*
 16.1247 +	 * NOTE! The local APIC isn't very good at handling
 16.1248 +	 * multiple interrupts at the same interrupt level.
 16.1249 +	 * As the interrupt level is determined by taking the
 16.1250 +	 * vector number and shifting that right by 4, we
 16.1251 +	 * want to spread these out a bit so that they don't
 16.1252 +	 * all fall in the same interrupt level.
 16.1253 +	 *
 16.1254 +	 * Also, we've got to be careful not to trash gate
 16.1255 +	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
 16.1256 +	 */
 16.1257 +	for (irq = 0; irq < NR_IRQS ; irq++) {
 16.1258 +		if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq)) {
 16.1259 +			/*
 16.1260 +			 * Hmm.. We don't have an entry for this,
 16.1261 +			 * so default to an old-fashioned 8259
 16.1262 +			 * interrupt if we can..
 16.1263 +			 */
 16.1264 +			if (irq < 16)
 16.1265 +				make_8259A_irq(irq);
 16.1266 +			else
 16.1267 +				/* Strange. Oh, well.. */
 16.1268 +				irq_desc[irq].handler = &no_irq_type;
 16.1269 +		}
 16.1270 +	}
 16.1271 +}
 16.1272 +
 16.1273 +static void enable_lapic_irq (unsigned int irq)
 16.1274 +{
 16.1275 +	unsigned long v;
 16.1276 +
 16.1277 +	v = apic_read(APIC_LVT0);
 16.1278 +	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
 16.1279 +}
 16.1280 +
 16.1281 +static void disable_lapic_irq (unsigned int irq)
 16.1282 +{
 16.1283 +	unsigned long v;
 16.1284 +
 16.1285 +	v = apic_read(APIC_LVT0);
 16.1286 +	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
 16.1287 +}
 16.1288 +
 16.1289 +static void ack_lapic_irq (unsigned int irq)
 16.1290 +{
 16.1291 +	ack_APIC_irq();
 16.1292 +}
 16.1293 +
 16.1294 +static void end_lapic_irq (unsigned int i) { /* nothing */ }
 16.1295 +
 16.1296 +static struct hw_interrupt_type lapic_irq_type = {
 16.1297 +	"local-APIC-edge",
 16.1298 +	NULL, /* startup_irq() not used for IRQ0 */
 16.1299 +	NULL, /* shutdown_irq() not used for IRQ0 */
 16.1300 +	enable_lapic_irq,
 16.1301 +	disable_lapic_irq,
 16.1302 +	ack_lapic_irq,
 16.1303 +	end_lapic_irq
 16.1304 +};
 16.1305 +
 16.1306 +
 16.1307 +/*
 16.1308 + * This looks a bit hackish but it's about the only one way of sending
 16.1309 + * a few INTA cycles to 8259As and any associated glue logic.  ICR does
 16.1310 + * not support the ExtINT mode, unfortunately.  We need to send these
 16.1311 + * cycles as some i82489DX-based boards have glue logic that keeps the
 16.1312 + * 8259A interrupt line asserted until INTA.  --macro
 16.1313 + */
 16.1314 +static inline void unlock_ExtINT_logic(void)
 16.1315 +{
 16.1316 +	int pin, i;
 16.1317 +	struct IO_APIC_route_entry entry0, entry1;
 16.1318 +	unsigned char save_control, save_freq_select;
 16.1319 +	unsigned long flags;
 16.1320 +
 16.1321 +	pin = find_isa_irq_pin(8, mp_INT);
 16.1322 +	if (pin == -1)
 16.1323 +		return;
 16.1324 +
 16.1325 +	spin_lock_irqsave(&ioapic_lock, flags);
 16.1326 +	*(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
 16.1327 +	*(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
 16.1328 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1329 +	clear_IO_APIC_pin(0, pin);
 16.1330 +
 16.1331 +	memset(&entry1, 0, sizeof(entry1));
 16.1332 +
 16.1333 +	entry1.dest_mode = 0;			/* physical delivery */
 16.1334 +	entry1.mask = 0;			/* unmask IRQ now */
 16.1335 +	entry1.dest.physical.physical_dest = hard_smp_processor_id();
 16.1336 +	entry1.delivery_mode = dest_ExtINT;
 16.1337 +	entry1.polarity = entry0.polarity;
 16.1338 +	entry1.trigger = 0;
 16.1339 +	entry1.vector = 0;
 16.1340 +
 16.1341 +	spin_lock_irqsave(&ioapic_lock, flags);
 16.1342 +	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
 16.1343 +	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
 16.1344 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1345 +
 16.1346 +	save_control = CMOS_READ(RTC_CONTROL);
 16.1347 +	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
 16.1348 +	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
 16.1349 +		   RTC_FREQ_SELECT);
 16.1350 +	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
 16.1351 +
 16.1352 +	i = 100;
 16.1353 +	while (i-- > 0) {
 16.1354 +		mdelay(10);
 16.1355 +		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
 16.1356 +			i -= 10;
 16.1357 +	}
 16.1358 +
 16.1359 +	CMOS_WRITE(save_control, RTC_CONTROL);
 16.1360 +	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 16.1361 +	clear_IO_APIC_pin(0, pin);
 16.1362 +
 16.1363 +	spin_lock_irqsave(&ioapic_lock, flags);
 16.1364 +	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
 16.1365 +	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
 16.1366 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1367 +}
 16.1368 +
 16.1369 +/*
 16.1370 + * This code may look a bit paranoid, but it's supposed to cooperate with
 16.1371 + * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
 16.1372 + * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
 16.1373 + * fanatically on his truly buggy board.
 16.1374 + */
 16.1375 +static inline void check_timer(void)
 16.1376 +{
 16.1377 +	extern int timer_ack;
 16.1378 +	int pin1, pin2;
 16.1379 +	int vector;
 16.1380 +
 16.1381 +	/*
 16.1382 +	 * get/set the timer IRQ vector:
 16.1383 +	 */
 16.1384 +	disable_8259A_irq(0);
 16.1385 +	vector = assign_irq_vector(0);
 16.1386 +	set_intr_gate(vector, interrupt[0]);
 16.1387 +
 16.1388 +	/*
 16.1389 +	 * Subtle, code in do_timer_interrupt() expects an AEOI
 16.1390 +	 * mode for the 8259A whenever interrupts are routed
 16.1391 +	 * through I/O APICs.  Also IRQ0 has to be enabled in
 16.1392 +	 * the 8259A which implies the virtual wire has to be
 16.1393 +	 * disabled in the local APIC.
 16.1394 +	 */
 16.1395 +	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 16.1396 +	init_8259A(1);
 16.1397 +	timer_ack = 1;
 16.1398 +	enable_8259A_irq(0);
 16.1399 +
 16.1400 +	pin1 = find_isa_irq_pin(0, mp_INT);
 16.1401 +	pin2 = find_isa_irq_pin(0, mp_ExtINT);
 16.1402 +
 16.1403 +	printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
 16.1404 +
 16.1405 +	if (pin1 != -1) {
 16.1406 +		/*
 16.1407 +		 * Ok, does IRQ0 through the IOAPIC work?
 16.1408 +		 */
 16.1409 +		unmask_IO_APIC_irq(0);
 16.1410 +		if (timer_irq_works()) {
 16.1411 +			return;
 16.1412 +		}
 16.1413 +		clear_IO_APIC_pin(0, pin1);
 16.1414 +		printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
 16.1415 +	}
 16.1416 +
 16.1417 +	printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
 16.1418 +	if (pin2 != -1) {
 16.1419 +		printk("\n..... (found pin %d) ...", pin2);
 16.1420 +		/*
 16.1421 +		 * legacy devices should be connected to IO APIC #0
 16.1422 +		 */
 16.1423 +		setup_ExtINT_IRQ0_pin(pin2, vector);
 16.1424 +		if (timer_irq_works()) {
 16.1425 +			printk("works.\n");
 16.1426 +			return;
 16.1427 +		}
 16.1428 +		/*
 16.1429 +		 * Cleanup, just in case ...
 16.1430 +		 */
 16.1431 +		clear_IO_APIC_pin(0, pin2);
 16.1432 +	}
 16.1433 +	printk(" failed.\n");
 16.1434 +
 16.1435 +	printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
 16.1436 +
 16.1437 +	disable_8259A_irq(0);
 16.1438 +	irq_desc[0].handler = &lapic_irq_type;
 16.1439 +	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
 16.1440 +	enable_8259A_irq(0);
 16.1441 +
 16.1442 +	if (timer_irq_works()) {
 16.1443 +		printk(" works.\n");
 16.1444 +		return;
 16.1445 +	}
 16.1446 +	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
 16.1447 +	printk(" failed.\n");
 16.1448 +
 16.1449 +	printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
 16.1450 +
 16.1451 +	init_8259A(0);
 16.1452 +	make_8259A_irq(0);
 16.1453 +	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
 16.1454 +
 16.1455 +	unlock_ExtINT_logic();
 16.1456 +
 16.1457 +	if (timer_irq_works()) {
 16.1458 +		printk(" works.\n");
 16.1459 +		return;
 16.1460 +	}
 16.1461 +	printk(" failed :(.\n");
 16.1462 +	panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
 16.1463 +}
 16.1464 +
 16.1465 +/*
 16.1466 + * IRQ's that are handled by the old PIC in all cases:
 16.1467 + * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
 16.1468 + *   Linux doesn't really care, as it's not actually used
 16.1469 + *   for any interrupt handling anyway.
 16.1470 + */
 16.1471 +#define PIC_IRQS	(1<<2)
 16.1472 +
 16.1473 +void __init setup_IO_APIC(void)
 16.1474 +{
 16.1475 +	enable_IO_APIC();
 16.1476 +
 16.1477 +	io_apic_irqs = ~PIC_IRQS;
 16.1478 +	printk("ENABLING IO-APIC IRQs\n");
 16.1479 +
 16.1480 +	/*
 16.1481 +	 * Set up the IO-APIC IRQ routing table by parsing the MP-BIOS
 16.1482 +	 * mptable:
 16.1483 +	 */
 16.1484 +	setup_ioapic_ids_from_mpc();
 16.1485 +	sync_Arb_IDs();
 16.1486 +	setup_IO_APIC_irqs();
 16.1487 +	init_IO_APIC_traps();
 16.1488 +	check_timer();
 16.1489 +	print_IO_APIC();
 16.1490 +}
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xen-2.4.16/arch/i386/ioremap.c	Wed Nov 20 12:02:17 2002 +0000
    17.3 @@ -0,0 +1,106 @@
    17.4 +/*
    17.5 + * arch/i386/mm/ioremap.c
    17.6 + *
    17.7 + * Re-map IO memory to kernel address space so that we can access it.
    17.8 + * This is needed for high PCI addresses that aren't mapped in the
    17.9 + * 640k-1MB IO memory area on PC's
   17.10 + *
   17.11 + * (C) Copyright 1995 1996 Linus Torvalds
   17.12 + */
   17.13 +
   17.14 +//#include <linux/vmalloc.h>
   17.15 +#include <asm/io.h>
   17.16 +#include <asm/pgalloc.h>
   17.17 +#include <asm/page.h>
   17.18 +
   17.19 +static unsigned long remap_base = 0;
   17.20 +
   17.21 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
   17.22 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY)
   17.23 +
   17.24 +#define PAGE_ALIGN(addr)    (((addr)+PAGE_SIZE-1)&PAGE_MASK)
   17.25 +
   17.26 +static void new_l2e(l2_pgentry_t *pl2e)
   17.27 +{
   17.28 +    l1_pgentry_t *pl1e = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
   17.29 +    if ( !pl1e ) BUG();
   17.30 +    clear_page(pl1e);
   17.31 +    *pl2e = mk_l2_pgentry(__pa(pl1e)|L2_PROT);
   17.32 +}
   17.33 +
   17.34 +void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
   17.35 +{
   17.36 +    unsigned long vaddr;
   17.37 +    unsigned long offset, cur=0, last_addr;
   17.38 +    l2_pgentry_t *pl2e;
   17.39 +    l1_pgentry_t *pl1e;
   17.40 +
   17.41 +    /* First time through, start allocating from end of real memory. */
   17.42 +    if ( !remap_base ) 
   17.43 +        remap_base = (unsigned long)phys_to_virt(MAX_USABLE_ADDRESS);
   17.44 +
   17.45 +    /* Don't allow wraparound or zero size */
   17.46 +    last_addr = phys_addr + size - 1;
   17.47 +    if (!size || last_addr < phys_addr)
   17.48 +        return NULL;
   17.49 +
   17.50 +    /*
   17.51 +     * Don't remap the low PCI/ISA area, it's always mapped..
   17.52 +     */
   17.53 +    if (phys_addr >= 0xA0000 && last_addr < 0x100000)
   17.54 +        return phys_to_virt(phys_addr);
   17.55 +
   17.56 +#if 0
   17.57 +    /*
   17.58 +     * Don't allow anybody to remap normal RAM that we're using..
   17.59 +     */
   17.60 +    if (phys_addr < virt_to_phys(high_memory)) {
   17.61 +        char *t_addr, *t_end;
   17.62 +        struct pfn_info *page;
   17.63 +
   17.64 +        t_addr = __va(phys_addr);
   17.65 +        t_end = t_addr + (size - 1);
   17.66 +	   
   17.67 +        for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
   17.68 +            if(!PageReserved(page))
   17.69 +                return NULL;
   17.70 +    }
   17.71 +#endif
   17.72 +
   17.73 +    /*
   17.74 +     * Mappings have to be page-aligned
   17.75 +     */
   17.76 +    offset = phys_addr & ~PAGE_MASK;
   17.77 +    phys_addr &= PAGE_MASK;
   17.78 +    size = PAGE_ALIGN(last_addr) - phys_addr;
   17.79 +
   17.80 +    /*
   17.81 +     * Ok, go for it..
   17.82 +     */
   17.83 +    vaddr = remap_base;
   17.84 +    remap_base += size;
   17.85 +    pl2e = idle0_pg_table + l2_table_offset(vaddr);
   17.86 +    if ( l2_pgentry_empty(*pl2e) ) new_l2e(pl2e);
   17.87 +    pl1e = l2_pgentry_to_l1(*pl2e++) + l1_table_offset(vaddr);
   17.88 +    for ( ; ; ) 
   17.89 +    {
   17.90 +        if ( !l1_pgentry_empty(*pl1e) ) BUG();
   17.91 +        *pl1e++ = mk_l1_pgentry((phys_addr+cur)|L1_PROT|flags);
   17.92 +        cur += PAGE_SIZE;
   17.93 +        if ( cur == size ) break;
   17.94 +        if ( !((unsigned long)pl1e & (PAGE_SIZE-1)) )
   17.95 +        {
   17.96 +            if ( l2_pgentry_empty(*pl2e) ) new_l2e(pl2e);
   17.97 +            pl1e = l2_pgentry_to_l1(*pl2e++);        
   17.98 +        }
   17.99 +    }
  17.100 +
  17.101 +    flush_tlb_all();
  17.102 +
  17.103 +    return (void *) (offset + (char *)vaddr);
  17.104 +}
  17.105 +
  17.106 +void iounmap(void *addr)
  17.107 +{
  17.108 +    /* NOP for now. */
  17.109 +}
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/xen-2.4.16/arch/i386/irq.c	Wed Nov 20 12:02:17 2002 +0000
    18.3 @@ -0,0 +1,895 @@
    18.4 +/*
    18.5 + *	linux/arch/i386/kernel/irq.c
    18.6 + *
    18.7 + *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
    18.8 + *
    18.9 + * This file contains the code used by various IRQ handling routines:
   18.10 + * asking for different IRQ's should be done through these routines
   18.11 + * instead of just grabbing them. Thus setup_irqs with different IRQ numbers
   18.12 + * shouldn't result in any weird surprises, and installing new handlers
   18.13 + * should be easier.
   18.14 + */
   18.15 +
   18.16 +/*
   18.17 + * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
   18.18 + *
   18.19 + * IRQs are in fact implemented a bit like signal handlers for the kernel.
   18.20 + * Naturally it's not a 1:1 relation, but there are similarities.
   18.21 + */
   18.22 +
   18.23 +#include <xeno/config.h>
   18.24 +#include <xeno/init.h>
   18.25 +#include <xeno/errno.h>
   18.26 +#include <xeno/sched.h>
   18.27 +#include <xeno/interrupt.h>
   18.28 +#include <xeno/irq.h>
   18.29 +#include <xeno/slab.h>
   18.30 +
   18.31 +#include <asm/msr.h>
   18.32 +#include <asm/hardirq.h>
   18.33 +#include <asm/ptrace.h>
   18.34 +#include <asm/atomic.h>
   18.35 +#include <asm/io.h>
   18.36 +#include <asm/smp.h>
   18.37 +#include <asm/system.h>
   18.38 +#include <asm/bitops.h>
   18.39 +#include <asm/pgalloc.h>
   18.40 +#include <xeno/delay.h>
   18.41 +
   18.42 +
   18.43 +/*
   18.44 + * Linux has a controller-independent x86 interrupt architecture.
   18.45 + * every controller has a 'controller-template', that is used
   18.46 + * by the main code to do the right thing. Each driver-visible
   18.47 + * interrupt source is transparently wired to the apropriate
   18.48 + * controller. Thus drivers need not be aware of the
   18.49 + * interrupt-controller.
   18.50 + *
   18.51 + * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
   18.52 + * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
   18.53 + * (IO-APICs assumed to be messaging to Pentium local-APICs)
   18.54 + *
   18.55 + * the code is designed to be easily extended with new/different
   18.56 + * interrupt controllers, without having to do assembly magic.
   18.57 + */
   18.58 +
   18.59 +/*
   18.60 + * Controller mappings for all interrupt sources:
   18.61 + */
   18.62 +irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
   18.63 +{ [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
   18.64 +
   18.65 +/*
   18.66 + * Special irq handlers.
   18.67 + */
   18.68 +
   18.69 +void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
   18.70 +
   18.71 +/*
   18.72 + * Generic no controller code
   18.73 + */
   18.74 +
   18.75 +static void enable_none(unsigned int irq) { }
   18.76 +static unsigned int startup_none(unsigned int irq) { return 0; }
   18.77 +static void disable_none(unsigned int irq) { }
   18.78 +static void ack_none(unsigned int irq)
   18.79 +{
   18.80 +/*
   18.81 + * 'what should we do if we get a hw irq event on an illegal vector'.
   18.82 + * each architecture has to answer this themselves, it doesnt deserve
   18.83 + * a generic callback i think.
   18.84 + */
   18.85 +#if CONFIG_X86
   18.86 +    printk("unexpected IRQ trap at vector %02x\n", irq);
   18.87 +#ifdef CONFIG_X86_LOCAL_APIC
   18.88 +    /*
   18.89 +	 * Currently unexpected vectors happen only on SMP and APIC.
   18.90 +	 * We _must_ ack these because every local APIC has only N
   18.91 +	 * irq slots per priority level, and a 'hanging, unacked' IRQ
   18.92 +	 * holds up an irq slot - in excessive cases (when multiple
   18.93 +	 * unexpected vectors occur) that might lock up the APIC
   18.94 +	 * completely.
   18.95 +	 */
   18.96 +    ack_APIC_irq();
   18.97 +#endif
   18.98 +#endif
   18.99 +}
  18.100 +
  18.101 +/* startup is the same as "enable", shutdown is same as "disable" */
  18.102 +#define shutdown_none	disable_none
  18.103 +#define end_none	enable_none
  18.104 +
  18.105 +struct hw_interrupt_type no_irq_type = {
  18.106 +    "none",
  18.107 +    startup_none,
  18.108 +    shutdown_none,
  18.109 +    enable_none,
  18.110 +    disable_none,
  18.111 +    ack_none,
  18.112 +    end_none
  18.113 +};
  18.114 +
  18.115 +atomic_t irq_err_count;
  18.116 +#ifdef CONFIG_X86_IO_APIC
  18.117 +#ifdef APIC_MISMATCH_DEBUG
  18.118 +atomic_t irq_mis_count;
  18.119 +#endif
  18.120 +#endif
  18.121 +
  18.122 +/*
  18.123 + * Generic, controller-independent functions:
  18.124 + */
  18.125 +
  18.126 +/*
  18.127 + * Global interrupt locks for SMP. Allow interrupts to come in on any
  18.128 + * CPU, yet make cli/sti act globally to protect critical regions..
  18.129 + */
  18.130 +
  18.131 +#ifdef CONFIG_SMP
  18.132 +unsigned char global_irq_holder = 0xff;
  18.133 +unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
  18.134 +	
  18.135 +#define MAXCOUNT 100000000
  18.136 +
  18.137 +/*
  18.138 + * I had a lockup scenario where a tight loop doing
  18.139 + * spin_unlock()/spin_lock() on CPU#1 was racing with
  18.140 + * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
  18.141 + * apparently the spin_unlock() information did not make it
  18.142 + * through to CPU#0 ... nasty, is this by design, do we have to limit
  18.143 + * 'memory update oscillation frequency' artificially like here?
  18.144 + *
  18.145 + * Such 'high frequency update' races can be avoided by careful design, but
  18.146 + * some of our major constructs like spinlocks use similar techniques,
  18.147 + * it would be nice to clarify this issue. Set this define to 0 if you
  18.148 + * want to check whether your system freezes.  I suspect the delay done
  18.149 + * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
  18.150 + * i thought that such things are guaranteed by design, since we use
  18.151 + * the 'LOCK' prefix.
  18.152 + */
  18.153 +#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
  18.154 +
  18.155 +#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
  18.156 +# define SYNC_OTHER_CORES(x) udelay(x+1)
  18.157 +#else
  18.158 +/*
  18.159 + * We have to allow irqs to arrive between __sti and __cli
  18.160 + */
  18.161 +# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
  18.162 +#endif
  18.163 +
  18.164 +static inline void wait_on_irq(int cpu)
  18.165 +{
  18.166 +    for (;;) {
  18.167 +
  18.168 +        /*
  18.169 +         * Wait until all interrupts are gone. Wait
  18.170 +         * for bottom half handlers unless we're
  18.171 +         * already executing in one..
  18.172 +         */
  18.173 +        if (!irqs_running())
  18.174 +            if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
  18.175 +                break;
  18.176 +
  18.177 +        /* Duh, we have to loop. Release the lock to avoid deadlocks */
  18.178 +        clear_bit(0,&global_irq_lock);
  18.179 +
  18.180 +        for (;;) {
  18.181 +            __sti();
  18.182 +            SYNC_OTHER_CORES(cpu);
  18.183 +            __cli();
  18.184 +            if (irqs_running())
  18.185 +                continue;
  18.186 +            if (global_irq_lock)
  18.187 +                continue;
  18.188 +            if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
  18.189 +                continue;
  18.190 +            if (!test_and_set_bit(0,&global_irq_lock))
  18.191 +                break;
  18.192 +        }
  18.193 +    }
  18.194 +}
  18.195 +
  18.196 +/*
  18.197 + * This is called when we want to synchronize with
  18.198 + * interrupts. We may for example tell a device to
  18.199 + * stop sending interrupts: but to make sure there
  18.200 + * are no interrupts that are executing on another
  18.201 + * CPU we need to call this function.
  18.202 + */
  18.203 +void synchronize_irq(void)
  18.204 +{
  18.205 +    if (irqs_running()) {
  18.206 +        /* Stupid approach */
  18.207 +        cli();
  18.208 +        sti();
  18.209 +    }
  18.210 +}
  18.211 +
  18.212 +static inline void get_irqlock(int cpu)
  18.213 +{
  18.214 +    if (test_and_set_bit(0,&global_irq_lock)) {
  18.215 +        /* do we already hold the lock? */
  18.216 +        if ((unsigned char) cpu == global_irq_holder)
  18.217 +            return;
  18.218 +        /* Uhhuh.. Somebody else got it. Wait.. */
  18.219 +        do {
  18.220 +            do {
  18.221 +                rep_nop();
  18.222 +            } while (test_bit(0,&global_irq_lock));
  18.223 +        } while (test_and_set_bit(0,&global_irq_lock));		
  18.224 +    }
  18.225 +    /* 
  18.226 +     * We also to make sure that nobody else is running
  18.227 +     * in an interrupt context. 
  18.228 +     */
  18.229 +    wait_on_irq(cpu);
  18.230 +
  18.231 +    /*
  18.232 +     * Ok, finally..
  18.233 +     */
  18.234 +    global_irq_holder = cpu;
  18.235 +}
  18.236 +
  18.237 +#define EFLAGS_IF_SHIFT 9
  18.238 +
  18.239 +/*
  18.240 + * A global "cli()" while in an interrupt context
  18.241 + * turns into just a local cli(). Interrupts
  18.242 + * should use spinlocks for the (very unlikely)
  18.243 + * case that they ever want to protect against
  18.244 + * each other.
  18.245 + *
  18.246 + * If we already have local interrupts disabled,
  18.247 + * this will not turn a local disable into a
  18.248 + * global one (problems with spinlocks: this makes
  18.249 + * save_flags+cli+sti usable inside a spinlock).
  18.250 + */
  18.251 +void __global_cli(void)
  18.252 +{
  18.253 +    unsigned int flags;
  18.254 +
  18.255 +    __save_flags(flags);
  18.256 +    if (flags & (1 << EFLAGS_IF_SHIFT)) {
  18.257 +        int cpu = smp_processor_id();
  18.258 +        __cli();
  18.259 +        if (!local_irq_count(cpu))
  18.260 +            get_irqlock(cpu);
  18.261 +    }
  18.262 +}
  18.263 +
  18.264 +void __global_sti(void)
  18.265 +{
  18.266 +    int cpu = smp_processor_id();
  18.267 +
  18.268 +    if (!local_irq_count(cpu))
  18.269 +        release_irqlock(cpu);
  18.270 +    __sti();
  18.271 +}
  18.272 +
  18.273 +/*
  18.274 + * SMP flags value to restore to:
  18.275 + * 0 - global cli
  18.276 + * 1 - global sti
  18.277 + * 2 - local cli
  18.278 + * 3 - local sti
  18.279 + */
  18.280 +unsigned long __global_save_flags(void)
  18.281 +{
  18.282 +    int retval;
  18.283 +    int local_enabled;
  18.284 +    unsigned long flags;
  18.285 +    int cpu = smp_processor_id();
  18.286 +
  18.287 +    __save_flags(flags);
  18.288 +    local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
  18.289 +    /* default to local */
  18.290 +    retval = 2 + local_enabled;
  18.291 +
  18.292 +    /* check for global flags if we're not in an interrupt */
  18.293 +    if (!local_irq_count(cpu)) {
  18.294 +        if (local_enabled)
  18.295 +            retval = 1;
  18.296 +        if (global_irq_holder == cpu)
  18.297 +            retval = 0;
  18.298 +    }
  18.299 +    return retval;
  18.300 +}
  18.301 +
  18.302 +void __global_restore_flags(unsigned long flags)
  18.303 +{
  18.304 +    switch (flags) {
  18.305 +    case 0:
  18.306 +        __global_cli();
  18.307 +        break;
  18.308 +    case 1:
  18.309 +        __global_sti();
  18.310 +        break;
  18.311 +    case 2:
  18.312 +        __cli();
  18.313 +        break;
  18.314 +    case 3:
  18.315 +        __sti();
  18.316 +        break;
  18.317 +    default:
  18.318 +        printk("global_restore_flags: %08lx (%08lx)\n",
  18.319 +               flags, (&flags)[-1]);
  18.320 +    }
  18.321 +}
  18.322 +
  18.323 +#endif
  18.324 +
  18.325 +/*
  18.326 + * This should really return information about whether
  18.327 + * we should do bottom half handling etc. Right now we
  18.328 + * end up _always_ checking the bottom half, which is a
  18.329 + * waste of time and is not what some drivers would
  18.330 + * prefer.
  18.331 + */
  18.332 +int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
  18.333 +{
  18.334 +    int status;
  18.335 +    int cpu = smp_processor_id();
  18.336 +
  18.337 +    irq_enter(cpu, irq);
  18.338 +
  18.339 +    status = 1;	/* Force the "do bottom halves" bit */
  18.340 +
  18.341 +    if (!(action->flags & SA_INTERRUPT))
  18.342 +        __sti();
  18.343 +
  18.344 +    do {
  18.345 +        status |= action->flags;
  18.346 +        action->handler(irq, action->dev_id, regs);
  18.347 +        action = action->next;
  18.348 +    } while (action);
  18.349 +
  18.350 +    __cli();
  18.351 +
  18.352 +    irq_exit(cpu, irq);
  18.353 +
  18.354 +    return status;
  18.355 +}
  18.356 +
  18.357 +/*
  18.358 + * Generic enable/disable code: this just calls
  18.359 + * down into the PIC-specific version for the actual
  18.360 + * hardware disable after having gotten the irq
  18.361 + * controller lock. 
  18.362 + */
  18.363 + 
  18.364 +/**
  18.365 + *	disable_irq_nosync - disable an irq without waiting
  18.366 + *	@irq: Interrupt to disable
  18.367 + *
  18.368 + *	Disable the selected interrupt line.  Disables and Enables are
  18.369 + *	nested.
  18.370 + *	Unlike disable_irq(), this function does not ensure existing
  18.371 + *	instances of the IRQ handler have completed before returning.
  18.372 + *
  18.373 + *	This function may be called from IRQ context.
  18.374 + */
  18.375 + 
  18.376 +inline void disable_irq_nosync(unsigned int irq)
  18.377 +{
  18.378 +    irq_desc_t *desc = irq_desc + irq;
  18.379 +    unsigned long flags;
  18.380 +
  18.381 +    spin_lock_irqsave(&desc->lock, flags);
  18.382 +    if (!desc->depth++) {
  18.383 +        desc->status |= IRQ_DISABLED;
  18.384 +        desc->handler->disable(irq);
  18.385 +    }
  18.386 +    spin_unlock_irqrestore(&desc->lock, flags);
  18.387 +}
  18.388 +
  18.389 +/**
  18.390 + *	disable_irq - disable an irq and wait for completion
  18.391 + *	@irq: Interrupt to disable
  18.392 + *
  18.393 + *	Disable the selected interrupt line.  Enables and Disables are
  18.394 + *	nested.
  18.395 + *	This function waits for any pending IRQ handlers for this interrupt
  18.396 + *	to complete before returning. If you use this function while
  18.397 + *	holding a resource the IRQ handler may need you will deadlock.
  18.398 + *
  18.399 + *	This function may be called - with care - from IRQ context.
  18.400 + */
  18.401 + 
  18.402 +void disable_irq(unsigned int irq)
  18.403 +{
  18.404 +    disable_irq_nosync(irq);
  18.405 +
  18.406 +    if (!local_irq_count(smp_processor_id())) {
  18.407 +        do {
  18.408 +            barrier();
  18.409 +            cpu_relax();
  18.410 +        } while (irq_desc[irq].status & IRQ_INPROGRESS);
  18.411 +    }
  18.412 +}
  18.413 +
  18.414 +/**
  18.415 + *	enable_irq - enable handling of an irq
  18.416 + *	@irq: Interrupt to enable
  18.417 + *
  18.418 + *	Undoes the effect of one call to disable_irq().  If this
  18.419 + *	matches the last disable, processing of interrupts on this
  18.420 + *	IRQ line is re-enabled.
  18.421 + *
  18.422 + *	This function may be called from IRQ context.
  18.423 + */
  18.424 + 
  18.425 +void enable_irq(unsigned int irq)
  18.426 +{
  18.427 +    irq_desc_t *desc = irq_desc + irq;
  18.428 +    unsigned long flags;
  18.429 +
  18.430 +    spin_lock_irqsave(&desc->lock, flags);
  18.431 +    switch (desc->depth) {
  18.432 +    case 1: {
  18.433 +        unsigned int status = desc->status & ~IRQ_DISABLED;
  18.434 +        desc->status = status;
  18.435 +        if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
  18.436 +            desc->status = status | IRQ_REPLAY;
  18.437 +            hw_resend_irq(desc->handler,irq);
  18.438 +        }
  18.439 +        desc->handler->enable(irq);
  18.440 +        /* fall-through */
  18.441 +    }
  18.442 +    default:
  18.443 +        desc->depth--;
  18.444 +        break;
  18.445 +    case 0:
  18.446 +        printk("enable_irq(%u) unbalanced from %p\n", irq,
  18.447 +               __builtin_return_address(0));
  18.448 +    }
  18.449 +    spin_unlock_irqrestore(&desc->lock, flags);
  18.450 +}
  18.451 +
  18.452 +/*
  18.453 + * do_IRQ handles all normal device IRQ's (the special
  18.454 + * SMP cross-CPU interrupts have their own specific
  18.455 + * handlers).
  18.456 + */
  18.457 +asmlinkage unsigned int do_IRQ(struct pt_regs regs)
  18.458 +{	
  18.459 +    /* 
  18.460 +     * We ack quickly, we don't want the irq controller
  18.461 +     * thinking we're snobs just because some other CPU has
  18.462 +     * disabled global interrupts (we have already done the
  18.463 +     * INT_ACK cycles, it's too late to try to pretend to the
  18.464 +     * controller that we aren't taking the interrupt).
  18.465 +     *
  18.466 +     * 0 return value means that this irq is already being
  18.467 +     * handled by some other CPU. (or is disabled)
  18.468 +     */
  18.469 +    int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code  */
  18.470 +    int cpu = smp_processor_id();
  18.471 +    irq_desc_t *desc = irq_desc + irq;
  18.472 +    struct irqaction * action;
  18.473 +    unsigned int status;
  18.474 +
  18.475 +    spin_lock(&desc->lock);
  18.476 +    desc->handler->ack(irq);
  18.477 +    /*
  18.478 +      REPLAY is when Linux resends an IRQ that was dropped earlier
  18.479 +      WAITING is used by probe to mark irqs that are being tested
  18.480 +    */
  18.481 +    status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
  18.482 +    status |= IRQ_PENDING; /* we _want_ to handle it */
  18.483 +
  18.484 +	/*
  18.485 +	 * If the IRQ is disabled for whatever reason, we cannot
  18.486 +	 * use the action we have.
  18.487 +	 */
  18.488 +    action = NULL;
  18.489 +    if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
  18.490 +        action = desc->action;
  18.491 +        status &= ~IRQ_PENDING; /* we commit to handling */
  18.492 +        status |= IRQ_INPROGRESS; /* we are handling it */
  18.493 +    }
  18.494 +    desc->status = status;
  18.495 +
  18.496 +	/*
  18.497 +	 * If there is no IRQ handler or it was disabled, exit early.
  18.498 +	   Since we set PENDING, if another processor is handling
  18.499 +	   a different instance of this same irq, the other processor
  18.500 +	   will take care of it.
  18.501 +	 */
  18.502 +    if (!action)
  18.503 +        goto out;
  18.504 +
  18.505 +	/*
  18.506 +	 * Edge triggered interrupts need to remember
  18.507 +	 * pending events.
  18.508 +	 * This applies to any hw interrupts that allow a second
  18.509 +	 * instance of the same irq to arrive while we are in do_IRQ
  18.510 +	 * or in the handler. But the code here only handles the _second_
  18.511 +	 * instance of the irq, not the third or fourth. So it is mostly
  18.512 +	 * useful for irq hardware that does not mask cleanly in an
  18.513 +	 * SMP environment.
  18.514 +	 */
  18.515 +    for (;;) {
  18.516 +        spin_unlock(&desc->lock);
  18.517 +        handle_IRQ_event(irq, &regs, action);
  18.518 +        spin_lock(&desc->lock);
  18.519 +		
  18.520 +        if (!(desc->status & IRQ_PENDING))
  18.521 +            break;
  18.522 +        desc->status &= ~IRQ_PENDING;
  18.523 +    }
  18.524 +    desc->status &= ~IRQ_INPROGRESS;
  18.525 + out:
  18.526 +    /*
  18.527 +	 * The ->end() handler has to deal with interrupts which got
  18.528 +	 * disabled while the handler was running.
  18.529 +	 */
  18.530 +    desc->handler->end(irq);
  18.531 +    spin_unlock(&desc->lock);
  18.532 +
  18.533 +    if (softirq_pending(cpu))
  18.534 +        do_softirq();
  18.535 +
  18.536 +    return 1;
  18.537 +}
  18.538 +
  18.539 +/**
  18.540 + *	request_irq - allocate an interrupt line
  18.541 + *	@irq: Interrupt line to allocate
  18.542 + *	@handler: Function to be called when the IRQ occurs
  18.543 + *	@irqflags: Interrupt type flags
  18.544 + *	@devname: An ascii name for the claiming device
  18.545 + *	@dev_id: A cookie passed back to the handler function
  18.546 + *
  18.547 + *	This call allocates interrupt resources and enables the
  18.548 + *	interrupt line and IRQ handling. From the point this
  18.549 + *	call is made your handler function may be invoked. Since
  18.550 + *	your handler function must clear any interrupt the board 
  18.551 + *	raises, you must take care both to initialise your hardware
  18.552 + *	and to set up the interrupt handler in the right order.
  18.553 + *
  18.554 + *	Dev_id must be globally unique. Normally the address of the
  18.555 + *	device data structure is used as the cookie. Since the handler
  18.556 + *	receives this value it makes sense to use it.
  18.557 + *
  18.558 + *	If your interrupt is shared you must pass a non NULL dev_id
  18.559 + *	as this is required when freeing the interrupt.
  18.560 + *
  18.561 + *	Flags:
  18.562 + *
  18.563 + *	SA_SHIRQ		Interrupt is shared
  18.564 + *
  18.565 + *	SA_INTERRUPT		Disable local interrupts while processing
  18.566 + */
  18.567 + 
  18.568 +int request_irq(unsigned int irq, 
  18.569 +		void (*handler)(int, void *, struct pt_regs *),
  18.570 +		unsigned long irqflags, 
  18.571 +		const char * devname,
  18.572 +		void *dev_id)
  18.573 +{
  18.574 +    int retval;
  18.575 +    struct irqaction * action;
  18.576 +
  18.577 +    if (irq >= NR_IRQS)
  18.578 +        return -EINVAL;
  18.579 +    if (!handler)
  18.580 +        return -EINVAL;
  18.581 +
  18.582 +    action = (struct irqaction *)
  18.583 +        kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  18.584 +    if (!action)
  18.585 +        return -ENOMEM;
  18.586 +
  18.587 +    action->handler = handler;
  18.588 +    action->flags = irqflags;
  18.589 +    action->mask = 0;
  18.590 +    action->name = devname;
  18.591 +    action->next = NULL;
  18.592 +    action->dev_id = dev_id;
  18.593 +
  18.594 +    retval = setup_irq(irq, action);
  18.595 +    if (retval)
  18.596 +        kfree(action);
  18.597 +
  18.598 +    return retval;
  18.599 +}
  18.600 +
  18.601 +/**
  18.602 + *	free_irq - free an interrupt
  18.603 + *	@irq: Interrupt line to free
  18.604 + *	@dev_id: Device identity to free
  18.605 + *
  18.606 + *	Remove an interrupt handler. The handler is removed and if the
  18.607 + *	interrupt line is no longer in use by any driver it is disabled.
  18.608 + *	On a shared IRQ the caller must ensure the interrupt is disabled
  18.609 + *	on the card it drives before calling this function. The function
  18.610 + *	does not return until any executing interrupts for this IRQ
  18.611 + *	have completed.
  18.612 + *
  18.613 + *	This function may be called from interrupt context. 
  18.614 + *
  18.615 + *	Bugs: Attempting to free an irq in a handler for the same irq hangs
  18.616 + *	      the machine.
  18.617 + */
  18.618 + 
  18.619 +void free_irq(unsigned int irq, void *dev_id)
  18.620 +{
  18.621 +    irq_desc_t *desc;
  18.622 +    struct irqaction **p;
  18.623 +    unsigned long flags;
  18.624 +
  18.625 +    if (irq >= NR_IRQS)
  18.626 +        return;
  18.627 +
  18.628 +    desc = irq_desc + irq;
  18.629 +    spin_lock_irqsave(&desc->lock,flags);
  18.630 +    p = &desc->action;
  18.631 +    for (;;) {
  18.632 +        struct irqaction * action = *p;
  18.633 +        if (action) {
  18.634 +            struct irqaction **pp = p;
  18.635 +            p = &action->next;
  18.636 +            if (action->dev_id != dev_id)
  18.637 +                continue;
  18.638 +
  18.639 +            /* Found it - now remove it from the list of entries */
  18.640 +            *pp = action->next;
  18.641 +            if (!desc->action) {
  18.642 +                desc->status |= IRQ_DISABLED;
  18.643 +                desc->handler->shutdown(irq);
  18.644 +            }
  18.645 +            spin_unlock_irqrestore(&desc->lock,flags);
  18.646 +
  18.647 +#ifdef CONFIG_SMP
  18.648 +            /* Wait to make sure it's not being used on another CPU */
  18.649 +            while (desc->status & IRQ_INPROGRESS) {
  18.650 +                barrier();
  18.651 +                cpu_relax();
  18.652 +            }
  18.653 +#endif
  18.654 +            kfree(action);
  18.655 +            return;
  18.656 +        }
  18.657 +        printk("Trying to free free IRQ%d\n",irq);
  18.658 +        spin_unlock_irqrestore(&desc->lock,flags);
  18.659 +        return;
  18.660 +    }
  18.661 +}
  18.662 +
  18.663 +/*
  18.664 + * IRQ autodetection code..
  18.665 + *
  18.666 + * This depends on the fact that any interrupt that
  18.667 + * comes in on to an unassigned handler will get stuck
  18.668 + * with "IRQ_WAITING" cleared and the interrupt
  18.669 + * disabled.
  18.670 + */
  18.671 +
  18.672 +static spinlock_t probe_sem = SPIN_LOCK_UNLOCKED;
  18.673 +
  18.674 +/**
  18.675 + *	probe_irq_on	- begin an interrupt autodetect
  18.676 + *
  18.677 + *	Commence probing for an interrupt. The interrupts are scanned
  18.678 + *	and a mask of potential interrupt lines is returned.
  18.679 + *
  18.680 + */
  18.681 + 
  18.682 +unsigned long probe_irq_on(void)
  18.683 +{
  18.684 +    unsigned int i;
  18.685 +    irq_desc_t *desc;
  18.686 +    unsigned long val;
  18.687 +    unsigned long s=0, e=0;
  18.688 +
  18.689 +    spin_lock(&probe_sem);
  18.690 +    /* 
  18.691 +     * something may have generated an irq long ago and we want to
  18.692 +     * flush such a longstanding irq before considering it as spurious. 
  18.693 +     */
  18.694 +    for (i = NR_IRQS-1; i > 0; i--)  {
  18.695 +        desc = irq_desc + i;
  18.696 +
  18.697 +        spin_lock_irq(&desc->lock);
  18.698 +        if (!irq_desc[i].action) 
  18.699 +            irq_desc[i].handler->startup(i);
  18.700 +        spin_unlock_irq(&desc->lock);
  18.701 +    }
  18.702 +
  18.703 +    /* Wait for longstanding interrupts to trigger (20ms delay). */
  18.704 +    rdtscl(s);
  18.705 +    do {
  18.706 +        synchronize_irq();
  18.707 +        rdtscl(e);
  18.708 +    } while ( ((e-s)/ticks_per_usec) < 20000 );
  18.709 +
  18.710 +    /*
  18.711 +     * enable any unassigned irqs
  18.712 +     * (we must startup again here because if a longstanding irq
  18.713 +     * happened in the previous stage, it may have masked itself)
  18.714 +     */
  18.715 +    for (i = NR_IRQS-1; i > 0; i--) {
  18.716 +        desc = irq_desc + i;
  18.717 +
  18.718 +        spin_lock_irq(&desc->lock);
  18.719 +        if (!desc->action) {
  18.720 +            desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
  18.721 +            if (desc->handler->startup(i))
  18.722 +                desc->status |= IRQ_PENDING;
  18.723 +        }
  18.724 +        spin_unlock_irq(&desc->lock);
  18.725 +    }
  18.726 +
  18.727 +    /*
  18.728 +     * Wait for spurious interrupts to trigger (100ms delay). 
  18.729 +     */
  18.730 +    rdtscl(s);
  18.731 +    do {
  18.732 +        synchronize_irq();
  18.733 +        rdtscl(e);
  18.734 +    } while ( ((e-s)/ticks_per_usec) < 100000 );
  18.735 +
  18.736 +    /*
  18.737 +     * Now filter out any obviously spurious interrupts
  18.738 +     */
  18.739 +    val = 0;
  18.740 +    for (i = 0; i < NR_IRQS; i++) {
  18.741 +        irq_desc_t *desc = irq_desc + i;
  18.742 +        unsigned int status;
  18.743 +
  18.744 +        spin_lock_irq(&desc->lock);
  18.745 +        status = desc->status;
  18.746 +
  18.747 +        if (status & IRQ_AUTODETECT) {
  18.748 +            /* It triggered already - consider it spurious. */
  18.749 +            if (!(status & IRQ_WAITING)) {
  18.750 +                desc->status = status & ~IRQ_AUTODETECT;
  18.751 +                desc->handler->shutdown(i);
  18.752 +            } else
  18.753 +                if (i < 32)
  18.754 +                    val |= 1 << i;
  18.755 +        }
  18.756 +        spin_unlock_irq(&desc->lock);
  18.757 +    }
  18.758 +
  18.759 +    return val;
  18.760 +}
  18.761 +
  18.762 +/*
  18.763 + * Return a mask of triggered interrupts (this
  18.764 + * can handle only legacy ISA interrupts).
  18.765 + */
  18.766 + 
  18.767 +/**
  18.768 + *	probe_irq_mask - scan a bitmap of interrupt lines
  18.769 + *	@val:	mask of interrupts to consider
  18.770 + *
  18.771 + *	Scan the ISA bus interrupt lines and return a bitmap of
  18.772 + *	active interrupts. The interrupt probe logic state is then
  18.773 + *	returned to its previous value.
  18.774 + *
  18.775 + *	Note: we need to scan all the irq's even though we will
  18.776 + *	only return ISA irq numbers - just so that we reset them
  18.777 + *	all to a known state.
  18.778 + */
  18.779 +unsigned int probe_irq_mask(unsigned long val)
  18.780 +{
  18.781 +    int i;
  18.782 +    unsigned int mask;
  18.783 +
  18.784 +    mask = 0;
  18.785 +    for (i = 0; i < NR_IRQS; i++) {
  18.786 +        irq_desc_t *desc = irq_desc + i;
  18.787 +        unsigned int status;
  18.788 +
  18.789 +        spin_lock_irq(&desc->lock);
  18.790 +        status = desc->status;
  18.791 +
  18.792 +        if (status & IRQ_AUTODETECT) {
  18.793 +            if (i < 16 && !(status & IRQ_WAITING))
  18.794 +                mask |= 1 << i;
  18.795 +
  18.796 +            desc->status = status & ~IRQ_AUTODETECT;
  18.797 +            desc->handler->shutdown(i);
  18.798 +        }
  18.799 +        spin_unlock_irq(&desc->lock);
  18.800 +    }
  18.801 +    spin_unlock(&probe_sem);
  18.802 +
  18.803 +    return mask & val;
  18.804 +}
  18.805 +
  18.806 +/*
  18.807 + * Return the one interrupt that triggered (this can
  18.808 + * handle any interrupt source).
  18.809 + */
  18.810 +
  18.811 +/**
  18.812 + *	probe_irq_off	- end an interrupt autodetect
  18.813 + *	@val: mask of potential interrupts (unused)
  18.814 + *
  18.815 + *	Scans the unused interrupt lines and returns the line which
  18.816 + *	appears to have triggered the interrupt. If no interrupt was
  18.817 + *	found then zero is returned. If more than one interrupt is
  18.818 + *	found then minus the first candidate is returned to indicate
  18.819 + *	their is doubt.
  18.820 + *
  18.821 + *	The interrupt probe logic state is returned to its previous
  18.822 + *	value.
  18.823 + *
  18.824 + *	BUGS: When used in a module (which arguably shouldnt happen)
  18.825 + *	nothing prevents two IRQ probe callers from overlapping. The
  18.826 + *	results of this are non-optimal.
  18.827 + */
  18.828 + 
  18.829 +int probe_irq_off(unsigned long val)
  18.830 +{
  18.831 +    int i, irq_found, nr_irqs;
  18.832 +
  18.833 +    nr_irqs = 0;
  18.834 +    irq_found = 0;
  18.835 +    for (i = 0; i < NR_IRQS; i++) {
  18.836 +        irq_desc_t *desc = irq_desc + i;
  18.837 +        unsigned int status;
  18.838 +
  18.839 +        spin_lock_irq(&desc->lock);
  18.840 +        status = desc->status;
  18.841 +
  18.842 +        if (status & IRQ_AUTODETECT) {
  18.843 +            if (!(status & IRQ_WAITING)) {
  18.844 +                if (!nr_irqs)
  18.845 +                    irq_found = i;
  18.846 +                nr_irqs++;
  18.847 +            }
  18.848 +            desc->status = status & ~IRQ_AUTODETECT;
  18.849 +            desc->handler->shutdown(i);
  18.850 +        }
  18.851 +        spin_unlock_irq(&desc->lock);
  18.852 +    }
  18.853 +    spin_unlock(&probe_sem);
  18.854 +
  18.855 +    if (nr_irqs > 1)
  18.856 +        irq_found = -irq_found;
  18.857 +    return irq_found;
  18.858 +}
  18.859 +
  18.860 +/* this was setup_x86_irq but it seems pretty generic */
  18.861 +int setup_irq(unsigned int irq, struct irqaction * new)
  18.862 +{
  18.863 +    int shared = 0;
  18.864 +    unsigned long flags;
  18.865 +    struct irqaction *old, **p;
  18.866 +    irq_desc_t *desc = irq_desc + irq;
  18.867 +
  18.868 +    /*
  18.869 +     * The following block of code has to be executed atomically
  18.870 +     */
  18.871 +    spin_lock_irqsave(&desc->lock,flags);
  18.872 +    p = &desc->action;
  18.873 +    if ((old = *p) != NULL) {
  18.874 +        /* Can't share interrupts unless both agree to */
  18.875 +        if (!(old->flags & new->flags & SA_SHIRQ)) {
  18.876 +            spin_unlock_irqrestore(&desc->lock,flags);
  18.877 +            return -EBUSY;
  18.878 +        }
  18.879 +
  18.880 +        /* add new interrupt at end of irq queue */
  18.881 +        do {
  18.882 +            p = &old->next;
  18.883 +            old = *p;
  18.884 +        } while (old);
  18.885 +        shared = 1;
  18.886 +    }
  18.887 +
  18.888 +    *p = new;
  18.889 +
  18.890 +    if (!shared) {
  18.891 +        desc->depth = 0;
  18.892 +        desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
  18.893 +        desc->handler->startup(irq);
  18.894 +    }
  18.895 +    spin_unlock_irqrestore(&desc->lock,flags);
  18.896 +
  18.897 +    return 0;
  18.898 +}
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen-2.4.16/arch/i386/mm.c	Wed Nov 20 12:02:17 2002 +0000
    19.3 @@ -0,0 +1,96 @@
    19.4 +#include <xeno/config.h>
    19.5 +#include <xeno/lib.h>
    19.6 +#include <xeno/init.h>
    19.7 +#include <xeno/mm.h>
    19.8 +#include <asm/page.h>
    19.9 +#include <asm/pgalloc.h>
   19.10 +#include <asm/fixmap.h>
   19.11 +
   19.12 +static inline void set_pte_phys (unsigned long vaddr,
   19.13 +                                 l1_pgentry_t entry)
   19.14 +{
   19.15 +    l2_pgentry_t *l2ent;
   19.16 +    l1_pgentry_t *l1ent;
   19.17 +
   19.18 +    l2ent = idle0_pg_table + l2_table_offset(vaddr);
   19.19 +    l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr);
   19.20 +    *l1ent = entry;
   19.21 +
   19.22 +    /* It's enough to flush this one mapping. */
   19.23 +    __flush_tlb_one(vaddr);
   19.24 +}
   19.25 +
   19.26 +void __set_fixmap (enum fixed_addresses idx, 
   19.27 +                   l1_pgentry_t entry)
   19.28 +{
   19.29 +    unsigned long address = __fix_to_virt(idx);
   19.30 +
   19.31 +    if (idx >= __end_of_fixed_addresses) {
   19.32 +        printk("Invalid __set_fixmap\n");
   19.33 +        return;
   19.34 +    }
   19.35 +    set_pte_phys(address, entry);
   19.36 +}
   19.37 +
   19.38 +static void __init fixrange_init (unsigned long start, 
   19.39 +                                  unsigned long end, l2_pgentry_t *pg_base)
   19.40 +{
   19.41 +    l2_pgentry_t *l2e;
   19.42 +    int i;
   19.43 +    unsigned long vaddr, page;
   19.44 +
   19.45 +    vaddr = start;
   19.46 +    i = l2_table_offset(vaddr);
   19.47 +    l2e = pg_base + i;
   19.48 +
   19.49 +    for ( ; (i < ENTRIES_PER_L2_PAGETABLE) && (vaddr != end); l2e++, i++ ) 
   19.50 +    {
   19.51 +        if ( !l2_pgentry_empty(*l2e) ) continue;
   19.52 +        page = (unsigned long)get_free_page(GFP_KERNEL);
   19.53 +        clear_page(page);
   19.54 +        *l2e = mk_l2_pgentry(__pa(page) | PAGE_HYPERVISOR);
   19.55 +        vaddr += 1 << L2_PAGETABLE_SHIFT;
   19.56 +    }
   19.57 +}
   19.58 +
   19.59 +void __init paging_init(void)
   19.60 +{
   19.61 +    unsigned long addr;
   19.62 +
   19.63 +    /* XXX initialised in boot.S */
   19.64 +    /*if ( cpu_has_pge ) set_in_cr4(X86_CR4_PGE);*/
   19.65 +    /*if ( cpu_has_pse ) set_in_cr4(X86_CR4_PSE);*/
   19.66 +    /*if ( cpu_has_pae ) set_in_cr4(X86_CR4_PAE);*/
   19.67 +
   19.68 +    /*
   19.69 +     * Fixed mappings, only the page table structure has to be
   19.70 +     * created - mappings will be set by set_fixmap():
   19.71 +     */
   19.72 +    addr = FIXADDR_START & ~((1<<L2_PAGETABLE_SHIFT)-1);
   19.73 +    fixrange_init(addr, 0, idle0_pg_table);
   19.74 +}
   19.75 +
   19.76 +void __init zap_low_mappings (void)
   19.77 +{
   19.78 +    int i;
   19.79 +    for (i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   19.80 +        idle0_pg_table[i] = mk_l2_pgentry(0);
   19.81 +    flush_tlb_all();
   19.82 +}
   19.83 +
   19.84 +
   19.85 +long do_set_guest_stack(unsigned long ss, unsigned long esp)
   19.86 +{
   19.87 +    int nr = smp_processor_id();
   19.88 +    struct tss_struct *t = &init_tss[nr];
   19.89 +
   19.90 +    if ( (ss == __HYPERVISOR_CS) || (ss == __HYPERVISOR_DS) )
   19.91 +        return -1;
   19.92 +
   19.93 +    current->thread.ss1  = ss;
   19.94 +    current->thread.esp1 = esp;
   19.95 +    t->ss1  = ss;
   19.96 +    t->esp1 = esp;
   19.97 +
   19.98 +    return 0;
   19.99 +}
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/xen-2.4.16/arch/i386/mpparse.c	Wed Nov 20 12:02:17 2002 +0000
    20.3 @@ -0,0 +1,630 @@
    20.4 +/*
    20.5 + *	Intel Multiprocessor Specificiation 1.1 and 1.4
    20.6 + *	compliant MP-table parsing routines.
    20.7 + *
    20.8 + *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
    20.9 + *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
   20.10 + *
   20.11 + *	Fixes
   20.12 + *		Erich Boleyn	:	MP v1.4 and additional changes.
   20.13 + *		Alan Cox	:	Added EBDA scanning
   20.14 + *		Ingo Molnar	:	various cleanups and rewrites
   20.15 + *	Maciej W. Rozycki	:	Bits for default MP configurations
   20.16 + */
   20.17 +
   20.18 +#include <xeno/config.h>
   20.19 +#include <xeno/init.h>
   20.20 +#include <xeno/lib.h>
   20.21 +#include <asm/io.h>
   20.22 +#include <xeno/irq.h>
   20.23 +#include <xeno/smp.h>
   20.24 +#include <asm/mpspec.h>
   20.25 +#include <asm/pgalloc.h>
   20.26 +
   20.27 +/* Have we found an MP table */
   20.28 +int smp_found_config;
   20.29 +
   20.30 +/*
   20.31 + * Various Linux-internal data structures created from the
   20.32 + * MP-table.
   20.33 + */
   20.34 +int apic_version [MAX_APICS];
   20.35 +int mp_bus_id_to_type [MAX_MP_BUSSES];
   20.36 +int mp_bus_id_to_node [MAX_MP_BUSSES];
   20.37 +int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
   20.38 +int mp_current_pci_id;
   20.39 +
   20.40 +/* I/O APIC entries */
   20.41 +struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
   20.42 +
   20.43 +/* # of MP IRQ source entries */
   20.44 +struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
   20.45 +
   20.46 +/* MP IRQ source entries */
   20.47 +int mp_irq_entries;
   20.48 +
   20.49 +int nr_ioapics;
   20.50 +
   20.51 +int pic_mode;
   20.52 +unsigned long mp_lapic_addr;
   20.53 +
   20.54 +/* Processor that is doing the boot up */
   20.55 +unsigned int boot_cpu_physical_apicid = -1U;
   20.56 +unsigned int boot_cpu_logical_apicid = -1U;
   20.57 +/* Internal processor count */
   20.58 +static unsigned int num_processors;
   20.59 +
   20.60 +/* Bitmask of physically existing CPUs */
   20.61 +unsigned long phys_cpu_present_map;
   20.62 +
   20.63 +/*
   20.64 + * Intel MP BIOS table parsing routines:
   20.65 + */
   20.66 +
   20.67 +/*
   20.68 + * Checksum an MP configuration block.
   20.69 + */
   20.70 +
   20.71 +static int __init mpf_checksum(unsigned char *mp, int len)
   20.72 +{
   20.73 +    int sum = 0;
   20.74 +
   20.75 +    while (len--)
   20.76 +        sum += *mp++;
   20.77 +
   20.78 +    return sum & 0xFF;
   20.79 +}
   20.80 +
   20.81 +/*
   20.82 + * Processor encoding in an MP configuration block
   20.83 + */
   20.84 +
   20.85 +static char __init *mpc_family(int family,int model)
   20.86 +{
   20.87 +    static char n[32];
   20.88 +    static char *model_defs[]=
   20.89 +    {
   20.90 +        "80486DX","80486DX",
   20.91 +        "80486SX","80486DX/2 or 80487",
   20.92 +        "80486SL","80486SX/2",
   20.93 +        "Unknown","80486DX/2-WB",
   20.94 +        "80486DX/4","80486DX/4-WB"
   20.95 +    };
   20.96 +
   20.97 +    switch (family) {
   20.98 +    case 0x04:
   20.99 +        if (model < 10)
  20.100 +            return model_defs[model];
  20.101 +        break;
  20.102 +
  20.103 +    case 0x05:
  20.104 +        return("Pentium(tm)");
  20.105 +
  20.106 +    case 0x06:
  20.107 +        return("Pentium(tm) Pro");
  20.108 +
  20.109 +    case 0x0F:
  20.110 +        if (model == 0x00)
  20.111 +            return("Pentium 4(tm)");
  20.112 +        if (model == 0x0F)
  20.113 +            return("Special controller");
  20.114 +    }
  20.115 +    sprintf(n,"Unknown CPU [%d:%d]",family, model);
  20.116 +    return n;
  20.117 +}
  20.118 +
  20.119 +/* 
  20.120 + * Have to match translation table entries to main table entries by counter
  20.121 + * hence the mpc_record variable .... can't see a less disgusting way of
  20.122 + * doing this ....
  20.123 + */
  20.124 +
  20.125 +static int mpc_record; 
  20.126 +
  20.127 +void __init MP_processor_info (struct mpc_config_processor *m)
  20.128 +{
  20.129 +    int ver, logical_apicid;
  20.130 + 	
  20.131 +    if (!(m->mpc_cpuflag & CPU_ENABLED))
  20.132 +        return;
  20.133 +
  20.134 +    logical_apicid = m->mpc_apicid;
  20.135 +    printk("Processor #%d %s APIC version %d\n",
  20.136 +           m->mpc_apicid,
  20.137 +           mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
  20.138 +                      (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
  20.139 +           m->mpc_apicver);
  20.140 +
  20.141 +    if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
  20.142 +        Dprintk("    Bootup CPU\n");
  20.143 +        boot_cpu_physical_apicid = m->mpc_apicid;
  20.144 +        boot_cpu_logical_apicid = logical_apicid;
  20.145 +    }
  20.146 +
  20.147 +    num_processors++;
  20.148 +
  20.149 +    if (m->mpc_apicid > MAX_APICS) {
  20.150 +        printk("Processor #%d INVALID. (Max ID: %d).\n",
  20.151 +               m->mpc_apicid, MAX_APICS);
  20.152 +        return;
  20.153 +    }
  20.154 +    ver = m->mpc_apicver;
  20.155 +
  20.156 +    phys_cpu_present_map |= 1 << m->mpc_apicid;
  20.157 +
  20.158 +    /*
  20.159 +     * Validate version
  20.160 +     */
  20.161 +    if (ver == 0x0) {
  20.162 +        printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
  20.163 +        ver = 0x10;
  20.164 +    }
  20.165 +    apic_version[m->mpc_apicid] = ver;
  20.166 +}
  20.167 +
  20.168 +static void __init MP_bus_info (struct mpc_config_bus *m)
  20.169 +{
  20.170 +    char str[7];
  20.171 +
  20.172 +    memcpy(str, m->mpc_bustype, 6);
  20.173 +    str[6] = 0;
  20.174 +	
  20.175 +    Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
  20.176 +
  20.177 +    if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
  20.178 +        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
  20.179 +    } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
  20.180 +        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
  20.181 +    } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
  20.182 +        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
  20.183 +        mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
  20.184 +        mp_current_pci_id++;
  20.185 +    } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
  20.186 +        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
  20.187 +    } else {
  20.188 +        printk("Unknown bustype %s - ignoring\n", str);
  20.189 +    }
  20.190 +}
  20.191 +
  20.192 +static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
  20.193 +{
  20.194 +    if (!(m->mpc_flags & MPC_APIC_USABLE))
  20.195 +        return;
  20.196 +
  20.197 +    printk("I/O APIC #%d Version %d at 0x%lX.\n",
  20.198 +           m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
  20.199 +    if (nr_ioapics >= MAX_IO_APICS) {
  20.200 +        printk("Max # of I/O APICs (%d) exceeded (found %d).\n",
  20.201 +               MAX_IO_APICS, nr_ioapics);
  20.202 +        panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
  20.203 +    }
  20.204 +    if (!m->mpc_apicaddr) {
  20.205 +        printk("WARNING: bogus zero I/O APIC address"
  20.206 +               " found in MP table, skipping!\n");
  20.207 +        return;
  20.208 +    }
  20.209 +    mp_ioapics[nr_ioapics] = *m;
  20.210 +    nr_ioapics++;
  20.211 +}
  20.212 +
  20.213 +static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
  20.214 +{
  20.215 +    mp_irqs [mp_irq_entries] = *m;
  20.216 +    Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
  20.217 +            " IRQ %02x, APIC ID %x, APIC INT %02x\n",
  20.218 +            m->mpc_irqtype, m->mpc_irqflag & 3,
  20.219 +            (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
  20.220 +            m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
  20.221 +    if (++mp_irq_entries == MAX_IRQ_SOURCES)
  20.222 +        panic("Max # of irq sources exceeded!!\n");
  20.223 +}
  20.224 +
  20.225 +static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
  20.226 +{
  20.227 +    Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
  20.228 +            " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
  20.229 +            m->mpc_irqtype, m->mpc_irqflag & 3,
  20.230 +            (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
  20.231 +            m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
  20.232 +    /*
  20.233 +     * Well it seems all SMP boards in existence
  20.234 +     * use ExtINT/LVT1 == LINT0 and
  20.235 +     * NMI/LVT2 == LINT1 - the following check
  20.236 +     * will show us if this assumptions is false.
  20.237 +     * Until then we do not have to add baggage.
  20.238 +     */
  20.239 +    if ((m->mpc_irqtype == mp_ExtINT) &&
  20.240 +        (m->mpc_destapiclint != 0))
  20.241 +        BUG();
  20.242 +    if ((m->mpc_irqtype == mp_NMI) &&
  20.243 +        (m->mpc_destapiclint != 1))
  20.244 +        BUG();
  20.245 +}
  20.246 +
  20.247 +
  20.248 +/*
  20.249 + * Read/parse the MPC
  20.250 + */
  20.251 +
  20.252 +static int __init smp_read_mpc(struct mp_config_table *mpc)
  20.253 +{
  20.254 +    char str[16];
  20.255 +    int count=sizeof(*mpc);
  20.256 +    unsigned char *mpt=((unsigned char *)mpc)+count;
  20.257 +
  20.258 +    if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
  20.259 +        panic("SMP mptable: bad signature [%c%c%c%c]!\n",
  20.260 +              mpc->mpc_signature[0],
  20.261 +              mpc->mpc_signature[1],
  20.262 +              mpc->mpc_signature[2],
  20.263 +              mpc->mpc_signature[3]);
  20.264 +        return 0;
  20.265 +    }
  20.266 +    if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
  20.267 +        panic("SMP mptable: checksum error!\n");
  20.268 +        return 0;
  20.269 +    }
  20.270 +    if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
  20.271 +        printk("SMP mptable: bad table version (%d)!!\n",
  20.272 +               mpc->mpc_spec);
  20.273 +        return 0;
  20.274 +    }
  20.275 +    if (!mpc->mpc_lapic) {
  20.276 +        printk("SMP mptable: null local APIC address!\n");
  20.277 +        return 0;
  20.278 +    }
  20.279 +    memcpy(str,mpc->mpc_oem,8);
  20.280 +    str[8]=0;
  20.281 +    printk("OEM ID: %s ",str);
  20.282 +
  20.283 +    memcpy(str,mpc->mpc_productid,12);
  20.284 +    str[12]=0;
  20.285 +    printk("Product ID: %s ",str);
  20.286 +
  20.287 +    printk("APIC at: 0x%lX\n", mpc->mpc_lapic);
  20.288 +
  20.289 +    /* save the local APIC address, it might be non-default. */
  20.290 +    mp_lapic_addr = mpc->mpc_lapic;
  20.291 +
  20.292 +    /*
  20.293 +     *	Now process the configuration blocks.
  20.294 +     */
  20.295 +    while (count < mpc->mpc_length) {
  20.296 +        switch(*mpt) {
  20.297 +        case MP_PROCESSOR:
  20.298 +        {
  20.299 +            struct mpc_config_processor *m=
  20.300 +                (struct mpc_config_processor *)mpt;
  20.301 +
  20.302 +            MP_processor_info(m);
  20.303 +            mpt += sizeof(*m);
  20.304 +            count += sizeof(*m);
  20.305 +            break;
  20.306 +        }
  20.307 +        case MP_BUS:
  20.308 +        {
  20.309 +            struct mpc_config_bus *m=
  20.310 +                (struct mpc_config_bus *)mpt;
  20.311 +            MP_bus_info(m);
  20.312 +            mpt += sizeof(*m);
  20.313 +            count += sizeof(*m);
  20.314 +            break;
  20.315 +        }
  20.316 +        case MP_IOAPIC:
  20.317 +        {
  20.318 +            struct mpc_config_ioapic *m=
  20.319 +                (struct mpc_config_ioapic *)mpt;
  20.320 +            MP_ioapic_info(m);
  20.321 +            mpt+=sizeof(*m);
  20.322 +            count+=sizeof(*m);
  20.323 +            break;
  20.324 +        }
  20.325 +        case MP_INTSRC:
  20.326 +        {
  20.327 +            struct mpc_config_intsrc *m=
  20.328 +                (struct mpc_config_intsrc *)mpt;
  20.329 +
  20.330 +            MP_intsrc_info(m);
  20.331 +            mpt+=sizeof(*m);
  20.332 +            count+=sizeof(*m);
  20.333 +            break;
  20.334 +        }
  20.335 +        case MP_LINTSRC:
  20.336 +        {
  20.337 +            struct mpc_config_lintsrc *m=
  20.338 +                (struct mpc_config_lintsrc *)mpt;
  20.339 +            MP_lintsrc_info(m);
  20.340 +            mpt+=sizeof(*m);
  20.341 +            count+=sizeof(*m);
  20.342 +            break;
  20.343 +        }
  20.344 +        default:
  20.345 +        {
  20.346 +            count = mpc->mpc_length;
  20.347 +            break;
  20.348 +        }
  20.349 +        }
  20.350 +        ++mpc_record;
  20.351 +    }
  20.352 +
  20.353 +    if (!num_processors)
  20.354 +        printk("SMP mptable: no processors registered!\n");
  20.355 +    return num_processors;
  20.356 +}
  20.357 +
  20.358 +static int __init ELCR_trigger(unsigned int irq)
  20.359 +{
  20.360 +    unsigned int port;
  20.361 +
  20.362 +    port = 0x4d0 + (irq >> 3);
  20.363 +    return (inb(port) >> (irq & 7)) & 1;
  20.364 +}
  20.365 +
  20.366 +static void __init construct_default_ioirq_mptable(int mpc_default_type)
  20.367 +{
  20.368 +    struct mpc_config_intsrc intsrc;
  20.369 +    int i;
  20.370 +    int ELCR_fallback = 0;
  20.371 +
  20.372 +    intsrc.mpc_type = MP_INTSRC;
  20.373 +    intsrc.mpc_irqflag = 0;			/* conforming */
  20.374 +    intsrc.mpc_srcbus = 0;
  20.375 +    intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
  20.376 +
  20.377 +    intsrc.mpc_irqtype = mp_INT;
  20.378 +
  20.379 +    /*
  20.380 +     *  If true, we have an ISA/PCI system with no IRQ entries
  20.381 +     *  in the MP table. To prevent the PCI interrupts from being set up
  20.382 +     *  incorrectly, we try to use the ELCR. The sanity check to see if
  20.383 +     *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
  20.384 +     *  never be level sensitive, so we simply see if the ELCR agrees.
  20.385 +     *  If it does, we assume it's valid.
  20.386 +     */
  20.387 +    if (mpc_default_type == 5) {
  20.388 +        printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
  20.389 +
  20.390 +        if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
  20.391 +            printk("ELCR contains invalid data... not using ELCR\n");
  20.392 +        else {
  20.393 +            printk("Using ELCR to identify PCI interrupts\n");
  20.394 +            ELCR_fallback = 1;
  20.395 +        }
  20.396 +    }
  20.397 +
  20.398 +    for (i = 0; i < 16; i++) {
  20.399 +        switch (mpc_default_type) {
  20.400 +        case 2:
  20.401 +            if (i == 0 || i == 13)
  20.402 +                continue;	/* IRQ0 & IRQ13 not connected */
  20.403 +            /* fall through */
  20.404 +        default:
  20.405 +            if (i == 2)
  20.406 +                continue;	/* IRQ2 is never connected */
  20.407 +        }
  20.408 +
  20.409 +        if (ELCR_fallback) {
  20.410 +            /*
  20.411 +             *  If the ELCR indicates a level-sensitive interrupt, we
  20.412 +             *  copy that information over to the MP table in the
  20.413 +             *  irqflag field (level sensitive, active high polarity).
  20.414 +             */
  20.415 +            if (ELCR_trigger(i))
  20.416 +                intsrc.mpc_irqflag = 13;
  20.417 +            else
  20.418 +                intsrc.mpc_irqflag = 0;
  20.419 +        }
  20.420 +
  20.421 +        intsrc.mpc_srcbusirq = i;
  20.422 +        intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
  20.423 +        MP_intsrc_info(&intsrc);
  20.424 +    }
  20.425 +
  20.426 +    intsrc.mpc_irqtype = mp_ExtINT;
  20.427 +    intsrc.mpc_srcbusirq = 0;
  20.428 +    intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
  20.429 +    MP_intsrc_info(&intsrc);
  20.430 +}
  20.431 +
  20.432 +static inline void __init construct_default_ISA_mptable(int mpc_default_type)
  20.433 +{
  20.434 +    struct mpc_config_processor processor;
  20.435 +    struct mpc_config_bus bus;
  20.436 +    struct mpc_config_ioapic ioapic;
  20.437 +    struct mpc_config_lintsrc lintsrc;
  20.438 +    int linttypes[2] = { mp_ExtINT, mp_NMI };
  20.439 +    int i;
  20.440 +
  20.441 +    /*
  20.442 +     * local APIC has default address
  20.443 +     */
  20.444 +    mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
  20.445 +
  20.446 +    /*
  20.447 +     * 2 CPUs, numbered 0 & 1.
  20.448 +     */
  20.449 +    processor.mpc_type = MP_PROCESSOR;
  20.450 +    /* Either an integrated APIC or a discrete 82489DX. */
  20.451 +    processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
  20.452 +    processor.mpc_cpuflag = CPU_ENABLED;
  20.453 +    processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
  20.454 +        (boot_cpu_data.x86_model << 4) |
  20.455 +        boot_cpu_data.x86_mask;
  20.456 +    processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
  20.457 +    processor.mpc_reserved[0] = 0;
  20.458 +    processor.mpc_reserved[1] = 0;
  20.459 +    for (i = 0; i < 2; i++) {
  20.460 +        processor.mpc_apicid = i;
  20.461 +        MP_processor_info(&processor);
  20.462 +    }
  20.463 +
  20.464 +    bus.mpc_type = MP_BUS;
  20.465 +    bus.mpc_busid = 0;
  20.466 +    switch (mpc_default_type) {
  20.467 +    default:
  20.468 +        printk("???\nUnknown standard configuration %d\n",
  20.469 +               mpc_default_type);
  20.470 +        /* fall through */
  20.471 +    case 1:
  20.472 +    case 5:
  20.473 +        memcpy(bus.mpc_bustype, "ISA   ", 6);
  20.474 +        break;
  20.475 +    case 2:
  20.476 +    case 6:
  20.477 +    case 3:
  20.478 +        memcpy(bus.mpc_bustype, "EISA  ", 6);
  20.479 +        break;
  20.480 +    case 4:
  20.481 +    case 7:
  20.482 +        memcpy(bus.mpc_bustype, "MCA   ", 6);
  20.483 +    }
  20.484 +    MP_bus_info(&bus);
  20.485 +    if (mpc_default_type > 4) {
  20.486 +        bus.mpc_busid = 1;
  20.487 +        memcpy(bus.mpc_bustype, "PCI   ", 6);
  20.488 +        MP_bus_info(&bus);
  20.489 +    }
  20.490 +
  20.491 +    ioapic.mpc_type = MP_IOAPIC;
  20.492 +    ioapic.mpc_apicid = 2;
  20.493 +    ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
  20.494 +    ioapic.mpc_flags = MPC_APIC_USABLE;
  20.495 +    ioapic.mpc_apicaddr = 0xFEC00000;
  20.496 +    MP_ioapic_info(&ioapic);
  20.497 +
  20.498 +    /*
  20.499 +     * We set up most of the low 16 IO-APIC pins according to MPS rules.
  20.500 +     */
  20.501 +    construct_default_ioirq_mptable(mpc_default_type);
  20.502 +
  20.503 +    lintsrc.mpc_type = MP_LINTSRC;
  20.504 +    lintsrc.mpc_irqflag = 0;		/* conforming */
  20.505 +    lintsrc.mpc_srcbusid = 0;
  20.506 +    lintsrc.mpc_srcbusirq = 0;
  20.507 +    lintsrc.mpc_destapic = MP_APIC_ALL;
  20.508 +    for (i = 0; i < 2; i++) {
  20.509 +        lintsrc.mpc_irqtype = linttypes[i];
  20.510 +        lintsrc.mpc_destapiclint = i;
  20.511 +        MP_lintsrc_info(&lintsrc);
  20.512 +    }
  20.513 +}
  20.514 +
  20.515 +static struct intel_mp_floating *mpf_found;
  20.516 +
  20.517 +/*
  20.518 + * Scan the memory blocks for an SMP configuration block.
  20.519 + */
  20.520 +void __init get_smp_config (void)
  20.521 +{
  20.522 +    struct intel_mp_floating *mpf = mpf_found;
  20.523 +	
  20.524 +    printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
  20.525 +    if (mpf->mpf_feature2 & (1<<7)) {
  20.526 +        printk("    IMCR and PIC compatibility mode.\n");
  20.527 +        pic_mode = 1;
  20.528 +    } else {
  20.529 +        printk("    Virtual Wire compatibility mode.\n");
  20.530 +        pic_mode = 0;
  20.531 +    }
  20.532 +
  20.533 +    /*
  20.534 +     * Now see if we need to read further.
  20.535 +     */
  20.536 +    if (mpf->mpf_feature1 != 0) {
  20.537 +
  20.538 +        printk("Default MP configuration #%d\n", mpf->mpf_feature1);
  20.539 +        construct_default_ISA_mptable(mpf->mpf_feature1);
  20.540 +
  20.541 +    } else if (mpf->mpf_physptr) {
  20.542 +
  20.543 +        /*
  20.544 +         * Read the physical hardware table.  Anything here will
  20.545 +         * override the defaults.
  20.546 +         */
  20.547 +        if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
  20.548 +            smp_found_config = 0;
  20.549 +            printk("BIOS bug, MP table errors detected!...\n");
  20.550 +            printk("... disabling SMP support. (tell your hw vendor)\n");
  20.551 +            return;
  20.552 +        }
  20.553 +        /*
  20.554 +         * If there are no explicit MP IRQ entries, then we are
  20.555 +         * broken.  We set up most of the low 16 IO-APIC pins to
  20.556 +         * ISA defaults and hope it will work.
  20.557 +         */
  20.558 +        if (!mp_irq_entries) {
  20.559 +            struct mpc_config_bus bus;
  20.560 +
  20.561 +            printk("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
  20.562 +
  20.563 +            bus.mpc_type = MP_BUS;
  20.564 +            bus.mpc_busid = 0;
  20.565 +            memcpy(bus.mpc_bustype, "ISA   ", 6);
  20.566 +            MP_bus_info(&bus);
  20.567 +
  20.568 +            construct_default_ioirq_mptable(0);
  20.569 +        }
  20.570 +
  20.571 +    } else
  20.572 +        BUG();
  20.573 +
  20.574 +    printk("Processors: %d\n", num_processors);
  20.575 +    /*
  20.576 +     * Only use the first configuration found.
  20.577 +     */
  20.578 +}
  20.579 +
  20.580 +static int __init smp_scan_config (unsigned long base, unsigned long length)
  20.581 +{
  20.582 +    unsigned long *bp = phys_to_virt(base);
  20.583 +    struct intel_mp_floating *mpf;
  20.584 +
  20.585 +    Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
  20.586 +    if (sizeof(*mpf) != 16)
  20.587 +        printk("Error: MPF size\n");
  20.588 +
  20.589 +    while (length > 0) {
  20.590 +        mpf = (struct intel_mp_floating *)bp;
  20.591 +        if ((*bp == SMP_MAGIC_IDENT) &&
  20.592 +            (mpf->mpf_length == 1) &&
  20.593 +            !mpf_checksum((unsigned char *)bp, 16) &&
  20.594 +            ((mpf->mpf_specification == 1)
  20.595 +             || (mpf->mpf_specification == 4)) ) {
  20.596 +
  20.597 +            smp_found_config = 1;
  20.598 +            printk("found SMP MP-table at %08lx\n",
  20.599 +                   virt_to_phys(mpf));
  20.600 +            reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
  20.601 +            if (mpf->mpf_physptr)
  20.602 +                reserve_bootmem(mpf->mpf_physptr, PAGE_SIZE);
  20.603 +            mpf_found = mpf;
  20.604 +            return 1;
  20.605 +        }
  20.606 +        bp += 4;
  20.607 +        length -= 16;
  20.608 +    }
  20.609 +    return 0;
  20.610 +}
  20.611 +
  20.612 +void __init find_intel_smp (void)
  20.613 +{
  20.614 +    /*
  20.615 +     * 1) Scan the bottom 1K for a signature
  20.616 +     * 2) Scan the top 1K of base RAM
  20.617 +     * 3) Scan the 64K of bios
  20.618 +     */
  20.619 +    if (smp_scan_config(0x0,0x400) ||
  20.620 +        smp_scan_config(639*0x400,0x400) ||
  20.621 +        smp_scan_config(0xF0000,0x10000))
  20.622 +        return;
  20.623 +}
  20.624 +
  20.625 +/*
  20.626 + * - Intel MP Configuration Table
  20.627 + * - or SGI Visual Workstation configuration
  20.628 + */
  20.629 +void __init find_smp_config (void)
  20.630 +{
  20.631 +    find_intel_smp();
  20.632 +}
  20.633 +
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/xen-2.4.16/arch/i386/pci-dma.c	Wed Nov 20 12:02:17 2002 +0000
    21.3 @@ -0,0 +1,37 @@
    21.4 +/*
    21.5 + * Dynamic DMA mapping support.
    21.6 + *
    21.7 + * On i386 there is no hardware dynamic DMA address translation,
    21.8 + * so consistent alloc/free are merely page allocation/freeing.
    21.9 + * The rest of the dynamic DMA mapping interface is implemented
   21.10 + * in asm/pci.h.
   21.11 + */
   21.12 +
   21.13 +#include <linux/types.h>
   21.14 +#include <linux/mm.h>
   21.15 +#include <linux/lib.h>
   21.16 +#include <linux/pci.h>
   21.17 +#include <asm/io.h>
   21.18 +
   21.19 +void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   21.20 +			   dma_addr_t *dma_handle)
   21.21 +{
   21.22 +	void *ret;
   21.23 +	int gfp = GFP_ATOMIC;
   21.24 +
   21.25 +	if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
   21.26 +		gfp |= GFP_DMA;
   21.27 +	ret = (void *)__get_free_pages(gfp, get_order(size));
   21.28 +
   21.29 +	if (ret != NULL) {
   21.30 +		memset(ret, 0, size);
   21.31 +		*dma_handle = virt_to_bus(ret);
   21.32 +	}
   21.33 +	return ret;
   21.34 +}
   21.35 +
   21.36 +void pci_free_consistent(struct pci_dev *hwdev, size_t size,
   21.37 +			 void *vaddr, dma_addr_t dma_handle)
   21.38 +{
   21.39 +	free_pages((unsigned long)vaddr, get_order(size));
   21.40 +}
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xen-2.4.16/arch/i386/pci-i386.c	Wed Nov 20 12:02:17 2002 +0000
    22.3 @@ -0,0 +1,354 @@
    22.4 +/*
    22.5 + *	Low-Level PCI Access for i386 machines
    22.6 + *
    22.7 + * Copyright 1993, 1994 Drew Eckhardt
    22.8 + *      Visionary Computing
    22.9 + *      (Unix and Linux consulting and custom programming)
   22.10 + *      Drew@Colorado.EDU
   22.11 + *      +1 (303) 786-7975
   22.12 + *
   22.13 + * Drew's work was sponsored by:
   22.14 + *	iX Multiuser Multitasking Magazine
   22.15 + *	Hannover, Germany
   22.16 + *	hm@ix.de
   22.17 + *
   22.18 + * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
   22.19 + *
   22.20 + * For more information, please consult the following manuals (look at
   22.21 + * http://www.pcisig.com/ for how to get them):
   22.22 + *
   22.23 + * PCI BIOS Specification
   22.24 + * PCI Local Bus Specification
   22.25 + * PCI to PCI Bridge Specification
   22.26 + * PCI System Design Guide
   22.27 + *
   22.28 + *
   22.29 + * CHANGELOG :
   22.30 + * Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
   22.31 + *	Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
   22.32 + *
   22.33 + * Jan 5,  1995 : Modified to probe PCI hardware at boot time by Frederic
   22.34 + *     Potter, potter@cao-vlsi.ibp.fr
   22.35 + *
   22.36 + * Jan 10, 1995 : Modified to store the information about configured pci
   22.37 + *      devices into a list, which can be accessed via /proc/pci by
   22.38 + *      Curtis Varner, cvarner@cs.ucr.edu
   22.39 + *
   22.40 + * Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
   22.41 + *	Alpha version. Intel & UMC chipset support only.
   22.42 + *
   22.43 + * Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
   22.44 + *	moved to drivers/pci/pci.c.
   22.45 + *
   22.46 + * Dec 7, 1996  : Added support for direct configuration access of boards
   22.47 + *      with Intel compatible access schemes (tsbogend@alpha.franken.de)
   22.48 + *
   22.49 + * Feb 3, 1997  : Set internal functions to static, save/restore flags
   22.50 + *	avoid dead locks reading broken PCI BIOS, werner@suse.de 
   22.51 + *
   22.52 + * Apr 26, 1997 : Fixed case when there is BIOS32, but not PCI BIOS
   22.53 + *	(mj@atrey.karlin.mff.cuni.cz)
   22.54 + *
   22.55 + * May 7,  1997 : Added some missing cli()'s. [mj]
   22.56 + * 
   22.57 + * Jun 20, 1997 : Corrected problems in "conf1" type accesses.
   22.58 + *      (paubert@iram.es)
   22.59 + *
   22.60 + * Aug 2,  1997 : Split to PCI BIOS handling and direct PCI access parts
   22.61 + *	and cleaned it up...     Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   22.62 + *
   22.63 + * Feb 6,  1998 : No longer using BIOS to find devices and device classes. [mj]
   22.64 + *
   22.65 + * May 1,  1998 : Support for peer host bridges. [mj]
   22.66 + *
   22.67 + * Jun 19, 1998 : Changed to use spinlocks, so that PCI configuration space
   22.68 + *	can be accessed from interrupts even on SMP systems. [mj]
   22.69 + *
   22.70 + * August  1998 : Better support for peer host bridges and more paranoid
   22.71 + *	checks for direct hardware access. Ugh, this file starts to look as
   22.72 + *	a large gallery of common hardware bug workarounds (watch the comments)
   22.73 + *	-- the PCI specs themselves are sane, but most implementors should be
   22.74 + *	hit hard with \hammer scaled \magstep5. [mj]
   22.75 + *
   22.76 + * Jan 23, 1999 : More improvements to peer host bridge logic. i450NX fixup. [mj]
   22.77 + *
   22.78 + * Feb 8,  1999 : Added UM8886BF I/O address fixup. [mj]
   22.79 + *
   22.80 + * August  1999 : New resource management and configuration access stuff. [mj]
   22.81 + *
   22.82 + * Sep 19, 1999 : Use PCI IRQ routing tables for detection of peer host bridges.
   22.83 + *		  Based on ideas by Chris Frantz and David Hinds. [mj]
   22.84 + *
   22.85 + * Sep 28, 1999 : Handle unreported/unassigned IRQs. Thanks to Shuu Yamaguchi
   22.86 + *		  for a lot of patience during testing. [mj]
   22.87 + *
   22.88 + * Oct  8, 1999 : Split to pci-i386.c, pci-pc.c and pci-visws.c. [mj]
   22.89 + */
   22.90 +
   22.91 +#include <linux/types.h>
   22.92 +//#include <linux/kernel.h>
   22.93 +#include <linux/pci.h>
   22.94 +#include <linux/init.h>
   22.95 +#include <linux/ioport.h>
   22.96 +#include <linux/errno.h>
   22.97 +
   22.98 +#include "pci-i386.h"
   22.99 +
  22.100 +void
  22.101 +pcibios_update_resource(struct pci_dev *dev, struct resource *root,
  22.102 +			struct resource *res, int resource)
  22.103 +{
  22.104 +	u32 new, check;
  22.105 +	int reg;
  22.106 +
  22.107 +	new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
  22.108 +	if (resource < 6) {
  22.109 +		reg = PCI_BASE_ADDRESS_0 + 4*resource;
  22.110 +	} else if (resource == PCI_ROM_RESOURCE) {
  22.111 +		res->flags |= PCI_ROM_ADDRESS_ENABLE;
  22.112 +		new |= PCI_ROM_ADDRESS_ENABLE;
  22.113 +		reg = dev->rom_base_reg;
  22.114 +	} else {
  22.115 +		/* Somebody might have asked allocation of a non-standard resource */
  22.116 +		return;
  22.117 +	}
  22.118 +	
  22.119 +	pci_write_config_dword(dev, reg, new);
  22.120 +	pci_read_config_dword(dev, reg, &check);
  22.121 +	if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
  22.122 +		printk(KERN_ERR "PCI: Error while updating region "
  22.123 +		       "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
  22.124 +		       new, check);
  22.125 +	}
  22.126 +}
  22.127 +
  22.128 +/*
  22.129 + * We need to avoid collisions with `mirrored' VGA ports
  22.130 + * and other strange ISA hardware, so we always want the
  22.131 + * addresses to be allocated in the 0x000-0x0ff region
  22.132 + * modulo 0x400.
  22.133 + *
  22.134 + * Why? Because some silly external IO cards only decode
  22.135 + * the low 10 bits of the IO address. The 0x00-0xff region
  22.136 + * is reserved for motherboard devices that decode all 16
  22.137 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
  22.138 + * but we want to try to avoid allocating at 0x2900-0x2bff
  22.139 + * which might have be mirrored at 0x0100-0x03ff..
  22.140 + */
  22.141 +void
  22.142 +pcibios_align_resource(void *data, struct resource *res, unsigned long size)
  22.143 +{
  22.144 +	if (res->flags & IORESOURCE_IO) {
  22.145 +		unsigned long start = res->start;
  22.146 +
  22.147 +		if (start & 0x300) {
  22.148 +			start = (start + 0x3ff) & ~0x3ff;
  22.149 +			res->start = start;
  22.150 +		}
  22.151 +	}
  22.152 +}
  22.153 +
  22.154 +
  22.155 +/*
  22.156 + *  Handle resources of PCI devices.  If the world were perfect, we could
  22.157 + *  just allocate all the resource regions and do nothing more.  It isn't.
  22.158 + *  On the other hand, we cannot just re-allocate all devices, as it would
  22.159 + *  require us to know lots of host bridge internals.  So we attempt to
  22.160 + *  keep as much of the original configuration as possible, but tweak it
  22.161 + *  when it's found to be wrong.
  22.162 + *
  22.163 + *  Known BIOS problems we have to work around:
  22.164 + *	- I/O or memory regions not configured
  22.165 + *	- regions configured, but not enabled in the command register
  22.166 + *	- bogus I/O addresses above 64K used
  22.167 + *	- expansion ROMs left enabled (this may sound harmless, but given
  22.168 + *	  the fact the PCI specs explicitly allow address decoders to be
  22.169 + *	  shared between expansion ROMs and other resource regions, it's
  22.170 + *	  at least dangerous)
  22.171 + *
  22.172 + *  Our solution:
  22.173 + *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
  22.174 + *	    This gives us fixed barriers on where we can allocate.
  22.175 + *	(2) Allocate resources for all enabled devices.  If there is
  22.176 + *	    a collision, just mark the resource as unallocated. Also
  22.177 + *	    disable expansion ROMs during this step.
  22.178 + *	(3) Try to allocate resources for disabled devices.  If the
  22.179 + *	    resources were assigned correctly, everything goes well,
  22.180 + *	    if they weren't, they won't disturb allocation of other
  22.181 + *	    resources.
  22.182 + *	(4) Assign new addresses to resources which were either
  22.183 + *	    not configured at all or misconfigured.  If explicitly
  22.184 + *	    requested by the user, configure expansion ROM address
  22.185 + *	    as well.
  22.186 + */
  22.187 +
  22.188 +static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
  22.189 +{
  22.190 +	struct list_head *ln;
  22.191 +	struct pci_bus *bus;
  22.192 +	struct pci_dev *dev;
  22.193 +	int idx;
  22.194 +	struct resource *r, *pr;
  22.195 +
  22.196 +	/* Depth-First Search on bus tree */
  22.197 +	for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
  22.198 +		bus = pci_bus_b(ln);
  22.199 +		if ((dev = bus->self)) {
  22.200 +			for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
  22.201 +				r = &dev->resource[idx];
  22.202 +				if (!r->start)
  22.203 +					continue;
  22.204 +				pr = pci_find_parent_resource(dev, r);
  22.205 +				if (!pr || request_resource(pr, r) < 0)
  22.206 +					printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, dev->slot_name);
  22.207 +			}
  22.208 +		}
  22.209 +		pcibios_allocate_bus_resources(&bus->children);
  22.210 +	}
  22.211 +}
  22.212 +
  22.213 +static void __init pcibios_allocate_resources(int pass)
  22.214 +{
  22.215 +	struct pci_dev *dev;
  22.216 +	int idx, disabled;
  22.217 +	u16 command;
  22.218 +	struct resource *r, *pr;
  22.219 +
  22.220 +	pci_for_each_dev(dev) {
  22.221 +		pci_read_config_word(dev, PCI_COMMAND, &command);
  22.222 +		for(idx = 0; idx < 6; idx++) {
  22.223 +			r = &dev->resource[idx];
  22.224 +			if (r->parent)		/* Already allocated */
  22.225 +				continue;
  22.226 +			if (!r->start)		/* Address not assigned at all */
  22.227 +				continue;
  22.228 +			if (r->flags & IORESOURCE_IO)
  22.229 +				disabled = !(command & PCI_COMMAND_IO);
  22.230 +			else
  22.231 +				disabled = !(command & PCI_COMMAND_MEMORY);
  22.232 +			if (pass == disabled) {
  22.233 +				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
  22.234 +				    r->start, r->end, r->flags, disabled, pass);
  22.235 +				pr = pci_find_parent_resource(dev, r);
  22.236 +				if (!pr || request_resource(pr, r) < 0) {
  22.237 +					printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, dev->slot_name);
  22.238 +					/* We'll assign a new address later */
  22.239 +					r->end -= r->start;
  22.240 +					r->start = 0;
  22.241 +				}
  22.242 +			}
  22.243 +		}
  22.244 +		if (!pass) {
  22.245 +			r = &dev->resource[PCI_ROM_RESOURCE];
  22.246 +			if (r->flags & PCI_ROM_ADDRESS_ENABLE) {
  22.247 +				/* Turn the ROM off, leave the resource region, but keep it unregistered. */
  22.248 +				u32 reg;
  22.249 +				DBG("PCI: Switching off ROM of %s\n", dev->slot_name);
  22.250 +				r->flags &= ~PCI_ROM_ADDRESS_ENABLE;
  22.251 +				pci_read_config_dword(dev, dev->rom_base_reg, &reg);
  22.252 +				pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
  22.253 +			}
  22.254 +		}
  22.255 +	}
  22.256 +}
  22.257 +
  22.258 +static void __init pcibios_assign_resources(void)
  22.259 +{
  22.260 +	struct pci_dev *dev;
  22.261 +	int idx;
  22.262 +	struct resource *r;
  22.263 +
  22.264 +	pci_for_each_dev(dev) {
  22.265 +		int class = dev->class >> 8;
  22.266 +
  22.267 +		/* Don't touch classless devices and host bridges */
  22.268 +		if (!class || class == PCI_CLASS_BRIDGE_HOST)
  22.269 +			continue;
  22.270 +
  22.271 +		for(idx=0; idx<6; idx++) {
  22.272 +			r = &dev->resource[idx];
  22.273 +
  22.274 +			/*
  22.275 +			 *  Don't touch IDE controllers and I/O ports of video cards!
  22.276 +			 */
  22.277 +			if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
  22.278 +			    (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
  22.279 +				continue;
  22.280 +
  22.281 +			/*
  22.282 +			 *  We shall assign a new address to this resource, either because
  22.283 +			 *  the BIOS forgot to do so or because we have decided the old
  22.284 +			 *  address was unusable for some reason.
  22.285 +			 */
  22.286 +			if (!r->start && r->end)
  22.287 +				pci_assign_resource(dev, idx);
  22.288 +		}
  22.289 +
  22.290 +		if (pci_probe & PCI_ASSIGN_ROMS) {
  22.291 +			r = &dev->resource[PCI_ROM_RESOURCE];
  22.292 +			r->end -= r->start;
  22.293 +			r->start = 0;
  22.294 +			if (r->end)
  22.295 +				pci_assign_resource(dev, PCI_ROM_RESOURCE);
  22.296 +		}
  22.297 +	}
  22.298 +}
  22.299 +
  22.300 +void __init pcibios_resource_survey(void)
  22.301 +{
  22.302 +	DBG("PCI: Allocating resources\n");
  22.303 +	pcibios_allocate_bus_resources(&pci_root_buses);
  22.304 +	pcibios_allocate_resources(0);
  22.305 +	pcibios_allocate_resources(1);
  22.306 +	pcibios_assign_resources();
  22.307 +}
  22.308 +
  22.309 +int pcibios_enable_resources(struct pci_dev *dev)
  22.310 +{
  22.311 +	u16 cmd, old_cmd;
  22.312 +	int idx;
  22.313 +	struct resource *r;
  22.314 +
  22.315 +	pci_read_config_word(dev, PCI_COMMAND, &cmd);
  22.316 +	old_cmd = cmd;
  22.317 +	for(idx=0; idx<6; idx++) {
  22.318 +		r = &dev->resource[idx];
  22.319 +		if (!r->start && r->end) {
  22.320 +			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name);
  22.321 +			return -EINVAL;
  22.322 +		}
  22.323 +		if (r->flags & IORESOURCE_IO)
  22.324 +			cmd |= PCI_COMMAND_IO;
  22.325 +		if (r->flags & IORESOURCE_MEM)
  22.326 +			cmd |= PCI_COMMAND_MEMORY;
  22.327 +	}
  22.328 +	if (dev->resource[PCI_ROM_RESOURCE].start)
  22.329 +		cmd |= PCI_COMMAND_MEMORY;
  22.330 +	if (cmd != old_cmd) {
  22.331 +		printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
  22.332 +		pci_write_config_word(dev, PCI_COMMAND, cmd);
  22.333 +	}
  22.334 +
  22.335 +	return 0;
  22.336 +}
  22.337 +
  22.338 +/*
  22.339 + *  If we set up a device for bus mastering, we need to check the latency
  22.340 + *  timer as certain crappy BIOSes forget to set it properly.
  22.341 + */
  22.342 +unsigned int pcibios_max_latency = 255;
  22.343 +
  22.344 +void pcibios_set_master(struct pci_dev *dev)
  22.345 +{
  22.346 +	u8 lat;
  22.347 +	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
  22.348 +	if (lat < 16)
  22.349 +		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
  22.350 +	else if (lat > pcibios_max_latency)
  22.351 +		lat = pcibios_max_latency;
  22.352 +	else
  22.353 +		return;
  22.354 +	printk("PCI: Setting latency timer of device %s to %d\n", dev->slot_name, lat);
  22.355 +	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
  22.356 +}
  22.357 +
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xen-2.4.16/arch/i386/pci-i386.h	Wed Nov 20 12:02:17 2002 +0000
    23.3 @@ -0,0 +1,69 @@
    23.4 +/*
    23.5 + *	Low-Level PCI Access for i386 machines.
    23.6 + *
    23.7 + *	(c) 1999 Martin Mares <mj@ucw.cz>
    23.8 + */
    23.9 +
   23.10 +#undef DEBUG
   23.11 +
   23.12 +#ifdef DEBUG
   23.13 +#define DBG(x...) printk(x)
   23.14 +#else
   23.15 +#define DBG(x...)
   23.16 +#endif
   23.17 +
   23.18 +#define PCI_PROBE_BIOS		0x0001
   23.19 +#define PCI_PROBE_CONF1		0x0002
   23.20 +#define PCI_PROBE_CONF2		0x0004
   23.21 +#define PCI_NO_SORT		0x0100
   23.22 +#define PCI_BIOS_SORT		0x0200
   23.23 +#define PCI_NO_CHECKS		0x0400
   23.24 +#define PCI_ASSIGN_ROMS		0x1000
   23.25 +#define PCI_BIOS_IRQ_SCAN	0x2000
   23.26 +#define PCI_ASSIGN_ALL_BUSSES	0x4000
   23.27 +
   23.28 +extern unsigned int pci_probe;
   23.29 +
   23.30 +/* pci-i386.c */
   23.31 +
   23.32 +extern unsigned int pcibios_max_latency;
   23.33 +
   23.34 +void pcibios_resource_survey(void);
   23.35 +int pcibios_enable_resources(struct pci_dev *);
   23.36 +
   23.37 +/* pci-pc.c */
   23.38 +
   23.39 +extern int pcibios_last_bus;
   23.40 +extern struct pci_bus *pci_root_bus;
   23.41 +extern struct pci_ops *pci_root_ops;
   23.42 +
   23.43 +/* pci-irq.c */
   23.44 +
   23.45 +struct irq_info {
   23.46 +	u8 bus, devfn;			/* Bus, device and function */
   23.47 +	struct {
   23.48 +		u8 link;		/* IRQ line ID, chipset dependent, 0=not routed */
   23.49 +		u16 bitmap;		/* Available IRQs */
   23.50 +	} __attribute__((packed)) irq[4];
   23.51 +	u8 slot;			/* Slot number, 0=onboard */
   23.52 +	u8 rfu;
   23.53 +} __attribute__((packed));
   23.54 +
   23.55 +struct irq_routing_table {
   23.56 +	u32 signature;			/* PIRQ_SIGNATURE should be here */
   23.57 +	u16 version;			/* PIRQ_VERSION */
   23.58 +	u16 size;			/* Table size in bytes */
   23.59 +	u8 rtr_bus, rtr_devfn;		/* Where the interrupt router lies */
   23.60 +	u16 exclusive_irqs;		/* IRQs devoted exclusively to PCI usage */
   23.61 +	u16 rtr_vendor, rtr_device;	/* Vendor and device ID of interrupt router */
   23.62 +	u32 miniport_data;		/* Crap */
   23.63 +	u8 rfu[11];
   23.64 +	u8 checksum;			/* Modulo 256 checksum must give zero */
   23.65 +	struct irq_info slots[0];
   23.66 +} __attribute__((packed));
   23.67 +
   23.68 +extern unsigned int pcibios_irq_mask;
   23.69 +
   23.70 +void pcibios_irq_init(void);
   23.71 +void pcibios_fixup_irqs(void);
   23.72 +void pcibios_enable_irq(struct pci_dev *dev);
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xen-2.4.16/arch/i386/pci-irq.c	Wed Nov 20 12:02:17 2002 +0000
    24.3 @@ -0,0 +1,753 @@
    24.4 +/*
    24.5 + *	Low-Level PCI Support for PC -- Routing of Interrupts
    24.6 + *
    24.7 + *	(c) 1999--2000 Martin Mares <mj@ucw.cz>
    24.8 + */
    24.9 +
   24.10 +#include <linux/config.h>
   24.11 +#include <linux/types.h>
   24.12 +//#include <linux/kernel.h>
   24.13 +#include <linux/pci.h>
   24.14 +#include <linux/init.h>
   24.15 +#include <linux/slab.h>
   24.16 +#include <linux/interrupt.h>
   24.17 +#include <linux/irq.h>
   24.18 +#include <linux/sched.h>
   24.19 +
   24.20 +#include <asm/io.h>
   24.21 +#include <asm/smp.h>
   24.22 +#include <asm/io_apic.h>
   24.23 +
   24.24 +#include "pci-i386.h"
   24.25 +
   24.26 +#define PIRQ_SIGNATURE	(('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
   24.27 +#define PIRQ_VERSION 0x0100
   24.28 +
   24.29 +static struct irq_routing_table *pirq_table;
   24.30 +
   24.31 +/*
   24.32 + * Never use: 0, 1, 2 (timer, keyboard, and cascade)
   24.33 + * Avoid using: 13, 14 and 15 (FP error and IDE).
   24.34 + * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
   24.35 + */
   24.36 +unsigned int pcibios_irq_mask = 0xfff8;
   24.37 +
   24.38 +static int pirq_penalty[16] = {
   24.39 +	1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
   24.40 +	0, 0, 0, 0, 1000, 100000, 100000, 100000
   24.41 +};
   24.42 +
   24.43 +struct irq_router {
   24.44 +	char *name;
   24.45 +	u16 vendor, device;
   24.46 +	int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
   24.47 +	int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
   24.48 +};
   24.49 +
   24.50 +/*
   24.51 + *  Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
   24.52 + */
   24.53 +
   24.54 +static struct irq_routing_table * __init pirq_find_routing_table(void)
   24.55 +{
   24.56 +	u8 *addr;
   24.57 +	struct irq_routing_table *rt;
   24.58 +	int i;
   24.59 +	u8 sum;
   24.60 +
   24.61 +	for(addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) {
   24.62 +		rt = (struct irq_routing_table *) addr;
   24.63 +		if (rt->signature != PIRQ_SIGNATURE ||
   24.64 +		    rt->version != PIRQ_VERSION ||
   24.65 +		    rt->size % 16 ||
   24.66 +		    rt->size < sizeof(struct irq_routing_table))
   24.67 +			continue;
   24.68 +		sum = 0;
   24.69 +		for(i=0; i<rt->size; i++)
   24.70 +			sum += addr[i];
   24.71 +		if (!sum) {
   24.72 +			DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt);
   24.73 +			return rt;
   24.74 +		}
   24.75 +	}
   24.76 +	return NULL;
   24.77 +}
   24.78 +
   24.79 +/*
   24.80 + *  If we have a IRQ routing table, use it to search for peer host
   24.81 + *  bridges.  It's a gross hack, but since there are no other known
   24.82 + *  ways how to get a list of buses, we have to go this way.
   24.83 + */
   24.84 +
   24.85 +static void __init pirq_peer_trick(void)
   24.86 +{
   24.87 +	struct irq_routing_table *rt = pirq_table;
   24.88 +	u8 busmap[256];
   24.89 +	int i;
   24.90 +	struct irq_info *e;
   24.91 +
   24.92 +	memset(busmap, 0, sizeof(busmap));
   24.93 +	for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
   24.94 +		e = &rt->slots[i];
   24.95 +#ifdef DEBUG
   24.96 +		{
   24.97 +			int j;
   24.98 +			DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
   24.99 +			for(j=0; j<4; j++)
  24.100 +				DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
  24.101 +			DBG("\n");
  24.102 +		}
  24.103 +#endif
  24.104 +		busmap[e->bus] = 1;
  24.105 +	}
  24.106 +	for(i=1; i<256; i++)
  24.107 +		/*
  24.108 +		 *  It might be a secondary bus, but in this case its parent is already
  24.109 +		 *  known (ascending bus order) and therefore pci_scan_bus returns immediately.
  24.110 +		 */
  24.111 +		if (busmap[i] && pci_scan_bus(i, pci_root_bus->ops, NULL))
  24.112 +			printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
  24.113 +	pcibios_last_bus = -1;
  24.114 +}
  24.115 +
  24.116 +/*
  24.117 + *  Code for querying and setting of IRQ routes on various interrupt routers.
  24.118 + */
  24.119 +
  24.120 +static void eisa_set_level_irq(unsigned int irq)
  24.121 +{
  24.122 +	unsigned char mask = 1 << (irq & 7);
  24.123 +	unsigned int port = 0x4d0 + (irq >> 3);
  24.124 +	unsigned char val = inb(port);
  24.125 +
  24.126 +	if (!(val & mask)) {
  24.127 +		DBG(" -> edge");
  24.128 +		outb(val | mask, port);
  24.129 +	}
  24.130 +}
  24.131 +
  24.132 +/*
  24.133 + * Common IRQ routing practice: nybbles in config space,
  24.134 + * offset by some magic constant.
  24.135 + */
  24.136 +static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
  24.137 +{
  24.138 +	u8 x;
  24.139 +	unsigned reg = offset + (nr >> 1);
  24.140 +
  24.141 +	pci_read_config_byte(router, reg, &x);
  24.142 +	return (nr & 1) ? (x >> 4) : (x & 0xf);
  24.143 +}
  24.144 +
  24.145 +static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
  24.146 +{
  24.147 +	u8 x;
  24.148 +	unsigned reg = offset + (nr >> 1);
  24.149 +
  24.150 +	pci_read_config_byte(router, reg, &x);
  24.151 +	x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
  24.152 +	pci_write_config_byte(router, reg, x);
  24.153 +}
  24.154 +
  24.155 +/*
  24.156 + * ALI pirq entries are damn ugly, and completely undocumented.
  24.157 + * This has been figured out from pirq tables, and it's not a pretty
  24.158 + * picture.
  24.159 + */
  24.160 +static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.161 +{
  24.162 +	static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
  24.163 +
  24.164 +	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
  24.165 +}
  24.166 +
  24.167 +static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.168 +{
  24.169 +	static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
  24.170 +	unsigned int val = irqmap[irq];
  24.171 +		
  24.172 +	if (val) {
  24.173 +		write_config_nybble(router, 0x48, pirq-1, val);
  24.174 +		return 1;
  24.175 +	}
  24.176 +	return 0;
  24.177 +}
  24.178 +
  24.179 +/*
  24.180 + * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
  24.181 + * just a pointer to the config space.
  24.182 + */
  24.183 +static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.184 +{
  24.185 +	u8 x;
  24.186 +
  24.187 +	pci_read_config_byte(router, pirq, &x);
  24.188 +	return (x < 16) ? x : 0;
  24.189 +}
  24.190 +
  24.191 +static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.192 +{
  24.193 +	pci_write_config_byte(router, pirq, irq);
  24.194 +	return 1;
  24.195 +}
  24.196 +
  24.197 +/*
  24.198 + * The VIA pirq rules are nibble-based, like ALI,
  24.199 + * but without the ugly irq number munging.
  24.200 + */
  24.201 +static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.202 +{
  24.203 +	return read_config_nybble(router, 0x55, pirq);
  24.204 +}
  24.205 +
  24.206 +static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.207 +{
  24.208 +	write_config_nybble(router, 0x55, pirq, irq);
  24.209 +	return 1;
  24.210 +}
  24.211 +
  24.212 +/*
  24.213 + * OPTI: high four bits are nibble pointer..
  24.214 + * I wonder what the low bits do?
  24.215 + */
  24.216 +static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.217 +{
  24.218 +	return read_config_nybble(router, 0xb8, pirq >> 4);
  24.219 +}
  24.220 +
  24.221 +static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.222 +{
  24.223 +	write_config_nybble(router, 0xb8, pirq >> 4, irq);
  24.224 +	return 1;
  24.225 +}
  24.226 +
  24.227 +/*
  24.228 + * Cyrix: nibble offset 0x5C
  24.229 + */
  24.230 +static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.231 +{
  24.232 +	return read_config_nybble(router, 0x5C, pirq-1);
  24.233 +}
  24.234 +
  24.235 +static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.236 +{
  24.237 +	write_config_nybble(router, 0x5C, pirq-1, irq);
  24.238 +	return 1;
  24.239 +}
  24.240 +
  24.241 +/*
  24.242 + *	PIRQ routing for SiS 85C503 router used in several SiS chipsets
  24.243 + *	According to the SiS 5595 datasheet (preliminary V1.0, 12/24/1997)
  24.244 + *	the related registers work as follows:
  24.245 + *	
  24.246 + *	general: one byte per re-routable IRQ,
  24.247 + *		 bit 7      IRQ mapping enabled (0) or disabled (1)
  24.248 + *		 bits [6:4] reserved
  24.249 + *		 bits [3:0] IRQ to map to
  24.250 + *		     allowed: 3-7, 9-12, 14-15
  24.251 + *		     reserved: 0, 1, 2, 8, 13
  24.252 + *
  24.253 + *	individual registers in device config space:
  24.254 + *
  24.255 + *	0x41/0x42/0x43/0x44:	PCI INT A/B/C/D - bits as in general case
  24.256 + *
  24.257 + *	0x61:			IDEIRQ: bits as in general case - but:
  24.258 + *				bits [6:5] must be written 01
  24.259 + *				bit 4 channel-select primary (0), secondary (1)
  24.260 + *
  24.261 + *	0x62:			USBIRQ: bits as in general case - but:
  24.262 + *				bit 4 OHCI function disabled (0), enabled (1)
  24.263 + *	
  24.264 + *	0x6a:			ACPI/SCI IRQ - bits as in general case
  24.265 + *
  24.266 + *	0x7e:			Data Acq. Module IRQ - bits as in general case
  24.267 + *
  24.268 + *	Apparently there are systems implementing PCI routing table using both
  24.269 + *	link values 0x01-0x04 and 0x41-0x44 for PCI INTA..D, but register offsets
  24.270 + *	like 0x62 as link values for USBIRQ e.g. So there is no simple
  24.271 + *	"register = offset + pirq" relation.
  24.272 + *	Currently we support PCI INTA..D and USBIRQ and try our best to handle
  24.273 + *	both link mappings.
  24.274 + *	IDE/ACPI/DAQ mapping is currently unsupported (left untouched as set by BIOS).
  24.275 + */
  24.276 +
  24.277 +static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.278 +{
  24.279 +	u8 x;
  24.280 +	int reg = pirq;
  24.281 +
  24.282 +	switch(pirq) {
  24.283 +		case 0x01:
  24.284 +		case 0x02:
  24.285 +		case 0x03:
  24.286 +		case 0x04:
  24.287 +			reg += 0x40;
  24.288 +		case 0x41:
  24.289 +		case 0x42:
  24.290 +		case 0x43:
  24.291 +		case 0x44:
  24.292 +		case 0x62:
  24.293 +			pci_read_config_byte(router, reg, &x);
  24.294 +			if (reg != 0x62)
  24.295 +				break;
  24.296 +			if (!(x & 0x40))
  24.297 +				return 0;
  24.298 +			break;
  24.299 +		case 0x61:
  24.300 +		case 0x6a:
  24.301 +		case 0x7e:
  24.302 +			printk(KERN_INFO "SiS pirq: advanced IDE/ACPI/DAQ mapping not yet implemented\n");
  24.303 +			return 0;
  24.304 +		default:			
  24.305 +			printk(KERN_INFO "SiS router pirq escape (%d)\n", pirq);
  24.306 +			return 0;
  24.307 +	}
  24.308 +	return (x & 0x80) ? 0 : (x & 0x0f);
  24.309 +}
  24.310 +
  24.311 +static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.312 +{
  24.313 +	u8 x;
  24.314 +	int reg = pirq;
  24.315 +
  24.316 +	switch(pirq) {
  24.317 +		case 0x01:
  24.318 +		case 0x02:
  24.319 +		case 0x03:
  24.320 +		case 0x04:
  24.321 +			reg += 0x40;
  24.322 +		case 0x41:
  24.323 +		case 0x42:
  24.324 +		case 0x43:
  24.325 +		case 0x44:
  24.326 +		case 0x62:
  24.327 +			x = (irq&0x0f) ? (irq&0x0f) : 0x80;
  24.328 +			if (reg != 0x62)
  24.329 +				break;
  24.330 +			/* always mark OHCI enabled, as nothing else knows about this */
  24.331 +			x |= 0x40;
  24.332 +			break;
  24.333 +		case 0x61:
  24.334 +		case 0x6a:
  24.335 +		case 0x7e:
  24.336 +			printk(KERN_INFO "advanced SiS pirq mapping not yet implemented\n");
  24.337 +			return 0;
  24.338 +		default:			
  24.339 +			printk(KERN_INFO "SiS router pirq escape (%d)\n", pirq);
  24.340 +			return 0;
  24.341 +	}
  24.342 +	pci_write_config_byte(router, reg, x);
  24.343 +
  24.344 +	return 1;
  24.345 +}
  24.346 +
  24.347 +/*
  24.348 + * VLSI: nibble offset 0x74 - educated guess due to routing table and
  24.349 + *       config space of VLSI 82C534 PCI-bridge/router (1004:0102)
  24.350 + *       Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
  24.351 + *       devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
  24.352 + *       for the busbridge to the docking station.
  24.353 + */
  24.354 +
  24.355 +static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.356 +{
  24.357 +	if (pirq > 8) {
  24.358 +		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
  24.359 +		return 0;
  24.360 +	}
  24.361 +	return read_config_nybble(router, 0x74, pirq-1);
  24.362 +}
  24.363 +
  24.364 +static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.365 +{
  24.366 +	if (pirq > 8) {
  24.367 +		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
  24.368 +		return 0;
  24.369 +	}
  24.370 +	write_config_nybble(router, 0x74, pirq-1, irq);
  24.371 +	return 1;
  24.372 +}
  24.373 +
  24.374 +/*
  24.375 + * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
  24.376 + * and Redirect I/O registers (0x0c00 and 0x0c01).  The Index register
  24.377 + * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a.  The Redirect
  24.378 + * register is a straight binary coding of desired PIC IRQ (low nibble).
  24.379 + *
  24.380 + * The 'link' value in the PIRQ table is already in the correct format
  24.381 + * for the Index register.  There are some special index values:
  24.382 + * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
  24.383 + * and 0x03 for SMBus.
  24.384 + */
  24.385 +static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.386 +{
  24.387 +	outb_p(pirq, 0xc00);
  24.388 +	return inb(0xc01) & 0xf;
  24.389 +}
  24.390 +
  24.391 +static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.392 +{
  24.393 +	outb_p(pirq, 0xc00);
  24.394 +	outb_p(irq, 0xc01);
  24.395 +	return 1;
  24.396 +}
  24.397 +
  24.398 +/* Support for AMD756 PCI IRQ Routing
  24.399 + * Jhon H. Caicedo <jhcaiced@osso.org.co>
  24.400 + * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
  24.401 + * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
  24.402 + * The AMD756 pirq rules are nibble-based
  24.403 + * offset 0x56 0-3 PIRQA  4-7  PIRQB
  24.404 + * offset 0x57 0-3 PIRQC  4-7  PIRQD
  24.405 + */
  24.406 +static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  24.407 +{
  24.408 +	u8 irq;
  24.409 +	irq = 0;
  24.410 +	if (pirq <= 4)
  24.411 +	{
  24.412 +		irq = read_config_nybble(router, 0x56, pirq - 1);
  24.413 +	}
  24.414 +	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
  24.415 +		dev->vendor, dev->device, pirq, irq);
  24.416 +	return irq;
  24.417 +}
  24.418 +
  24.419 +static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.420 +{
  24.421 +	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", 
  24.422 +		dev->vendor, dev->device, pirq, irq);
  24.423 +	if (pirq <= 4)
  24.424 +	{
  24.425 +		write_config_nybble(router, 0x56, pirq - 1, irq);
  24.426 +	}
  24.427 +	return 1;
  24.428 +}
  24.429 +
  24.430 +#ifdef CONFIG_PCI_BIOS
  24.431 +
  24.432 +static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  24.433 +{
  24.434 +	struct pci_dev *bridge;
  24.435 +	int pin = pci_get_interrupt_pin(dev, &bridge);
  24.436 +	return pcibios_set_irq_routing(bridge, pin, irq);
  24.437 +}
  24.438 +
  24.439 +static struct irq_router pirq_bios_router =
  24.440 +	{ "BIOS", 0, 0, NULL, pirq_bios_set };
  24.441 +
  24.442 +#endif
  24.443 +
  24.444 +static struct irq_router pirq_routers[] = {
  24.445 +	{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_0, pirq_piix_get, pirq_piix_set },
  24.446 +	{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, pirq_piix_get, pirq_piix_set },
  24.447 +	{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, pirq_piix_get, pirq_piix_set },
  24.448 +	{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX,   pirq_piix_get, pirq_piix_set },
  24.449 +	{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_0, pirq_piix_get, pirq_piix_set },
  24.450 +	{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, pirq_piix_get, pirq_piix_set },
  24.451 +
  24.452 +	{ "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set },
  24.453 +
  24.454 +	{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set },
  24.455 +	{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set },
  24.456 +	{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, pirq_via_get, pirq_via_set },
  24.457 +
  24.458 +	{ "OPTI", PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C700, pirq_opti_get, pirq_opti_set },
  24.459 +
  24.460 +	{ "NatSemi", PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, pirq_cyrix_get, pirq_cyrix_set },
  24.461 +	{ "SIS", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, pirq_sis_get, pirq_sis_set },
  24.462 +	{ "VLSI 82C534", PCI_VENDOR_ID_VLSI, PCI_DEVICE_ID_VLSI_82C534, pirq_vlsi_get, pirq_vlsi_set },
  24.463 +	{ "ServerWorks", PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4,
  24.464 +	  pirq_serverworks_get, pirq_serverworks_set },
  24.465 +	{ "ServerWorks", PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5,
  24.466 +	  pirq_serverworks_get, pirq_serverworks_set },
  24.467 +	{ "AMD756 VIPER", PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B,
  24.468 +		pirq_amd756_get, pirq_amd756_set },
  24.469 +
  24.470 +	{ "default", 0, 0, NULL, NULL }
  24.471 +};
  24.472 +
  24.473 +static struct irq_router *pirq_router;
  24.474 +static struct pci_dev *pirq_router_dev;
  24.475 +
  24.476 +static void __init pirq_find_router(void)
  24.477 +{
  24.478 +	struct irq_routing_table *rt = pirq_table;
  24.479 +	struct irq_router *r;
  24.480 +
  24.481 +#ifdef CONFIG_PCI_BIOS
  24.482 +	if (!rt->signature) {
  24.483 +		printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
  24.484 +		pirq_router = &pirq_bios_router;
  24.485 +		return;
  24.486 +	}
  24.487 +#endif
  24.488 +
  24.489 +	DBG("PCI: Attempting to find IRQ router for %04x:%04x\n",
  24.490 +	    rt->rtr_vendor, rt->rtr_device);
  24.491 +
  24.492 +	/* fall back to default router if nothing else found */
  24.493 +	pirq_router = &pirq_routers[ARRAY_SIZE(pirq_routers) - 1];
  24.494 +
  24.495 +	pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
  24.496 +	if (!pirq_router_dev) {
  24.497 +		DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
  24.498 +		return;
  24.499 +	}
  24.500 +
  24.501 +	for(r=pirq_routers; r->vendor; r++) {
  24.502 +		/* Exact match against router table entry? Use it! */
  24.503 +		if (r->vendor == rt->rtr_vendor && r->device == rt->rtr_device) {
  24.504 +			pirq_router = r;
  24.505 +			break;
  24.506 +		}
  24.507 +		/* Match against router device entry? Use it as a fallback */
  24.508 +		if (r->vendor == pirq_router_dev->vendor && r->device == pirq_router_dev->device) {
  24.509 +			pirq_router = r;
  24.510 +		}
  24.511 +	}
  24.512 +	printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
  24.513 +		pirq_router->name,
  24.514 +		pirq_router_dev->vendor,
  24.515 +		pirq_router_dev->device,
  24.516 +		pirq_router_dev->slot_name);
  24.517 +}
  24.518 +
  24.519 +static struct irq_info *pirq_get_info(struct pci_dev *dev)
  24.520 +{
  24.521 +	struct irq_routing_table *rt = pirq_table;
  24.522 +	int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
  24.523 +	struct irq_info *info;
  24.524 +
  24.525 +	for (info = rt->slots; entries--; info++)
  24.526 +		if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
  24.527 +			return info;
  24.528 +	return NULL;
  24.529 +}
  24.530 +
  24.531 +static void pcibios_test_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
  24.532 +{
  24.533 +}
  24.534 +
  24.535 +static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
  24.536 +{
  24.537 +	u8 pin;
  24.538 +	struct irq_info *info;
  24.539 +	int i, pirq, newirq;
  24.540 +	int irq = 0;
  24.541 +	u32 mask;
  24.542 +	struct irq_router *r = pirq_router;
  24.543 +	struct pci_dev *dev2;
  24.544 +	char *msg = NULL;
  24.545 +
  24.546 +	if (!pirq_table)
  24.547 +		return 0;
  24.548 +
  24.549 +	/* Find IRQ routing entry */
  24.550 +	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
  24.551 +	if (!pin) {
  24.552 +		DBG(" -> no interrupt pin\n");
  24.553 +		return 0;
  24.554 +	}
  24.555 +	pin = pin - 1;
  24.556 +	
  24.557 +	DBG("IRQ for %s:%d", dev->slot_name, pin);
  24.558 +	info = pirq_get_info(dev);
  24.559 +	if (!info) {
  24.560 +		DBG(" -> not found in routing table\n");
  24.561 +		return 0;
  24.562 +	}
  24.563 +	pirq = info->irq[pin].link;
  24.564 +	mask = info->irq[pin].bitmap;
  24.565 +	if (!pirq) {
  24.566 +		DBG(" -> not routed\n");
  24.567 +		return 0;
  24.568 +	}
  24.569 +	DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
  24.570 +	mask &= pcibios_irq_mask;
  24.571 +
  24.572 +	/*
  24.573 +	 * Find the best IRQ to assign: use the one
  24.574 +	 * reported by the device if possible.
  24.575 +	 */
  24.576 +	newirq = dev->irq;
  24.577 +	if (!newirq && assign) {
  24.578 +		for (i = 0; i < 16; i++) {
  24.579 +			if (!(mask & (1 << i)))
  24.580 +				continue;
  24.581 +			if (pirq_penalty[i] < pirq_penalty[newirq] &&
  24.582 +			    !request_irq(i, pcibios_test_irq_handler, SA_SHIRQ, "pci-test", dev)) {
  24.583 +				free_irq(i, dev);
  24.584 +				newirq = i;
  24.585 +			}
  24.586 +		}
  24.587 +	}
  24.588 +	DBG(" -> newirq=%d", newirq);
  24.589 +
  24.590 +	/* Check if it is hardcoded */
  24.591 +	if ((pirq & 0xf0) == 0xf0) {
  24.592 +		irq = pirq & 0xf;
  24.593 +		DBG(" -> hardcoded IRQ %d\n", irq);
  24.594 +		msg = "Hardcoded";
  24.595 +	} else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq))) {
  24.596 +		DBG(" -> got IRQ %d\n", irq);
  24.597 +		msg = "Found";
  24.598 +	} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
  24.599 +		DBG(" -> assigning IRQ %d", newirq);
  24.600 +		if (r->set(pirq_router_dev, dev, pirq, newirq)) {
  24.601 +			eisa_set_level_irq(newirq);
  24.602 +			DBG(" ... OK\n");
  24.603 +			msg = "Assigned";
  24.604 +			irq = newirq;
  24.605 +		}
  24.606 +	}
  24.607 +
  24.608 +	if (!irq) {
  24.609 +		DBG(" ... failed\n");
  24.610 +		if (newirq && mask == (1 << newirq)) {
  24.611 +			msg = "Guessed";
  24.612 +			irq = newirq;
  24.613 +		} else
  24.614 +			return 0;
  24.615 +	}
  24.616 +	printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, dev->slot_name);
  24.617 +
  24.618 +	/* Update IRQ for all devices with the same pirq value */
  24.619 +	pci_for_each_dev(dev2) {
  24.620 +		pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
  24.621 +		if (!pin)
  24.622 +			continue;
  24.623 +		pin--;
  24.624 +		info = pirq_get_info(dev2);
  24.625 +		if (!info)
  24.626 +			continue;
  24.627 +		if (info->irq[pin].link == pirq) {
  24.628 +			/* We refuse to override the dev->irq information. Give a warning! */
  24.629 +		    	if (dev2->irq && dev2->irq != irq) {
  24.630 +		    		printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
  24.631 +				       dev2->slot_name, dev2->irq, irq);
  24.632 +		    		continue;
  24.633 +		    	}
  24.634 +			dev2->irq = irq;
  24.635 +			pirq_penalty[irq]++;
  24.636 +			if (dev != dev2)
  24.637 +				printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, dev2->slot_name);
  24.638 +		}
  24.639 +	}
  24.640 +	return 1;
  24.641 +}
  24.642 +
  24.643 +void __init pcibios_irq_init(void)
  24.644 +{
  24.645 +	DBG("PCI: IRQ init\n");
  24.646 +	pirq_table = pirq_find_routing_table();
  24.647 +#ifdef CONFIG_PCI_BIOS
  24.648 +	if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
  24.649 +		pirq_table = pcibios_get_irq_routing_table();
  24.650 +#endif
  24.651 +	if (pirq_table) {
  24.652 +		pirq_peer_trick();
  24.653 +		pirq_find_router();
  24.654 +		if (pirq_table->exclusive_irqs) {
  24.655 +			int i;
  24.656 +			for (i=0; i<16; i++)
  24.657 +				if (!(pirq_table->exclusive_irqs & (1 << i)))
  24.658 +					pirq_penalty[i] += 100;
  24.659 +		}
  24.660 +		/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
  24.661 +		if (io_apic_assign_pci_irqs)
  24.662 +			pirq_table = NULL;
  24.663 +	}
  24.664 +}
  24.665 +
  24.666 +void __init pcibios_fixup_irqs(void)
  24.667 +{
  24.668 +	struct pci_dev *dev;
  24.669 +	u8 pin;
  24.670 +
  24.671 +	DBG("PCI: IRQ fixup\n");
  24.672 +	pci_for_each_dev(dev) {
  24.673 +		/*
  24.674 +		 * If the BIOS has set an out of range IRQ number, just ignore it.
  24.675 +		 * Also keep track of which IRQ's are already in use.
  24.676 +		 */
  24.677 +		if (dev->irq >= 16) {
  24.678 +			DBG("%s: ignoring bogus IRQ %d\n", dev->slot_name, dev->irq);
  24.679 +			dev->irq = 0;
  24.680 +		}
  24.681 +		/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
  24.682 +		if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
  24.683 +			pirq_penalty[dev->irq] = 0;
  24.684 +		pirq_penalty[dev->irq]++;
  24.685 +	}
  24.686 +
  24.687 +	pci_for_each_dev(dev) {
  24.688 +		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
  24.689 +#ifdef CONFIG_X86_IO_APIC
  24.690 +		/*
  24.691 +		 * Recalculate IRQ numbers if we use the I/O APIC.
  24.692 +		 */
  24.693 +		if (io_apic_assign_pci_irqs)
  24.694 +		{
  24.695 +			int irq;
  24.696 +
  24.697 +			if (pin) {
  24.698 +				pin--;		/* interrupt pins are numbered starting from 1 */
  24.699 +				irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
  24.700 +	/*
  24.701 +	 * Busses behind bridges are typically not listed in the MP-table.
  24.702 +	 * In this case we have to look up the IRQ based on the parent bus,
  24.703 +	 * parent slot, and pin number. The SMP code detects such bridged
  24.704 +	 * busses itself so we should get into this branch reliably.
  24.705 +	 */
  24.706 +				if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
  24.707 +					struct pci_dev * bridge = dev->bus->self;
  24.708 +
  24.709 +					pin = (pin + PCI_SLOT(dev->devfn)) % 4;
  24.710 +					irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
  24.711 +							PCI_SLOT(bridge->devfn), pin);
  24.712 +					if (irq >= 0)
  24.713 +						printk(KERN_WARNING "PCI: using PPB(B%d,I%d,P%d) to get irq %d\n", 
  24.714 +							bridge->bus->number, PCI_SLOT(bridge->devfn), pin, irq);
  24.715 +				}
  24.716 +				if (irq >= 0) {
  24.717 +					printk(KERN_INFO "PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %d\n",
  24.718 +						dev->bus->number, PCI_SLOT(dev->devfn), pin, irq);
  24.719 +					dev->irq = irq;
  24.720 +				}
  24.721 +			}
  24.722 +		}
  24.723 +#endif
  24.724 +		/*
  24.725 +		 * Still no IRQ? Try to lookup one...
  24.726 +		 */
  24.727 +		if (pin && !dev->irq)
  24.728 +			pcibios_lookup_irq(dev, 0);
  24.729 +	}
  24.730 +}
  24.731 +
  24.732 +void pcibios_penalize_isa_irq(int irq)
  24.733 +{
  24.734 +	/*
  24.735 +	 *  If any ISAPnP device reports an IRQ in its list of possible
  24.736 +	 *  IRQ's, we try to avoid assigning it to PCI devices.
  24.737 +	 */
  24.738 +	pirq_penalty[irq] += 100;
  24.739 +}
  24.740 +
  24.741 +void pcibios_enable_irq(struct pci_dev *dev)
  24.742 +{
  24.743 +	u8 pin;
  24.744 +	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
  24.745 +	if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
  24.746 +		char *msg;
  24.747 +		if (io_apic_assign_pci_irqs)
  24.748 +			msg = " Probably buggy MP table.";
  24.749 +		else if (pci_probe & PCI_BIOS_IRQ_SCAN)
  24.750 +			msg = "";
  24.751 +		else
  24.752 +			msg = " Please try using pci=biosirq.";
  24.753 +		printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
  24.754 +		       'A' + pin - 1, dev->slot_name, msg);
  24.755 +	}
  24.756 +}
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xen-2.4.16/arch/i386/pci-pc.c	Wed Nov 20 12:02:17 2002 +0000
    25.3 @@ -0,0 +1,1276 @@
    25.4 +/*
    25.5 + *	Low-Level PCI Support for PC
    25.6 + *
    25.7 + *	(c) 1999--2000 Martin Mares <mj@ucw.cz>
    25.8 + */
    25.9 +
   25.10 +#include <linux/config.h>
   25.11 +#include <linux/types.h>
   25.12 +//#include <linux/kernel.h>
   25.13 +#include <linux/sched.h>
   25.14 +#include <linux/pci.h>
   25.15 +#include <linux/init.h>
   25.16 +#include <linux/ioport.h>
   25.17 +
   25.18 +//#include <asm/segment.h>
   25.19 +#include <asm/io.h>
   25.20 +
   25.21 +#include "pci-i386.h"
   25.22 +
   25.23 +#define __KERNEL_CS __HYPERVISOR_CS
   25.24 +#define __KERNEL_DS __HYPERVISOR_DS
   25.25 +
   25.26 +unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2;
   25.27 +
   25.28 +int pcibios_last_bus = -1;
   25.29 +struct pci_bus *pci_root_bus = NULL;
   25.30 +struct pci_ops *pci_root_ops = NULL;
   25.31 +
   25.32 +int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value) = NULL;
   25.33 +int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value) = NULL;
   25.34 +
   25.35 +/*
   25.36 + * This interrupt-safe spinlock protects all accesses to PCI
   25.37 + * configuration space.
   25.38 + */
   25.39 +spinlock_t pci_config_lock = SPIN_LOCK_UNLOCKED;
   25.40 +
   25.41 +
   25.42 +/*
   25.43 + * Functions for accessing PCI configuration space with type 1 accesses
   25.44 + */
   25.45 +
   25.46 +#ifdef CONFIG_PCI_DIRECT
   25.47 +
   25.48 +#define PCI_CONF1_ADDRESS(bus, dev, fn, reg) \
   25.49 +	(0x80000000 | (bus << 16) | (dev << 11) | (fn << 8) | (reg & ~3))
   25.50 +
   25.51 +static int pci_conf1_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
   25.52 +{
   25.53 +	unsigned long flags;
   25.54 +
   25.55 +	if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
   25.56 +		return -EINVAL;
   25.57 +
   25.58 +	spin_lock_irqsave(&pci_config_lock, flags);
   25.59 +
   25.60 +	outl(PCI_CONF1_ADDRESS(bus, dev, fn, reg), 0xCF8);
   25.61 +
   25.62 +	switch (len) {
   25.63 +	case 1:
   25.64 +		*value = inb(0xCFC + (reg & 3));
   25.65 +		break;
   25.66 +	case 2:
   25.67 +		*value = inw(0xCFC + (reg & 2));
   25.68 +		break;
   25.69 +	case 4:
   25.70 +		*value = inl(0xCFC);
   25.71 +		break;
   25.72 +	}
   25.73 +
   25.74 +	spin_unlock_irqrestore(&pci_config_lock, flags);
   25.75 +
   25.76 +	return 0;
   25.77 +}
   25.78 +
   25.79 +static int pci_conf1_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
   25.80 +{
   25.81 +	unsigned long flags;
   25.82 +
   25.83 +	if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) 
   25.84 +		return -EINVAL;
   25.85 +
   25.86 +	spin_lock_irqsave(&pci_config_lock, flags);
   25.87 +
   25.88 +	outl(PCI_CONF1_ADDRESS(bus, dev, fn, reg), 0xCF8);
   25.89 +
   25.90 +	switch (len) {
   25.91 +	case 1:
   25.92 +		outb((u8)value, 0xCFC + (reg & 3));
   25.93 +		break;
   25.94 +	case 2:
   25.95 +		outw((u16)value, 0xCFC + (reg & 2));
   25.96 +		break;
   25.97 +	case 4:
   25.98 +		outl((u32)value, 0xCFC);
   25.99 +		break;
  25.100 +	}
  25.101 +
  25.102 +	spin_unlock_irqrestore(&pci_config_lock, flags);
  25.103 +
  25.104 +	return 0;
  25.105 +}
  25.106 +
  25.107 +#undef PCI_CONF1_ADDRESS
  25.108 +
  25.109 +static int pci_conf1_read_config_byte(struct pci_dev *dev, int where, u8 *value)
  25.110 +{
  25.111 +	int result; 
  25.112 +	u32 data;
  25.113 +
  25.114 +	if (!value) 
  25.115 +		return -EINVAL;
  25.116 +
  25.117 +	result = pci_conf1_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.118 +		PCI_FUNC(dev->devfn), where, 1, &data);
  25.119 +
  25.120 +	*value = (u8)data;
  25.121 +
  25.122 +	return result;
  25.123 +}
  25.124 +
  25.125 +static int pci_conf1_read_config_word(struct pci_dev *dev, int where, u16 *value)
  25.126 +{
  25.127 +	int result; 
  25.128 +	u32 data;
  25.129 +
  25.130 +	if (!value) 
  25.131 +		return -EINVAL;
  25.132 +
  25.133 +	result = pci_conf1_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.134 +		PCI_FUNC(dev->devfn), where, 2, &data);
  25.135 +
  25.136 +	*value = (u16)data;
  25.137 +
  25.138 +	return result;
  25.139 +}
  25.140 +
  25.141 +static int pci_conf1_read_config_dword(struct pci_dev *dev, int where, u32 *value)
  25.142 +{
  25.143 +	if (!value) 
  25.144 +		return -EINVAL;
  25.145 +
  25.146 +	return pci_conf1_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.147 +		PCI_FUNC(dev->devfn), where, 4, value);
  25.148 +}
  25.149 +
  25.150 +static int pci_conf1_write_config_byte(struct pci_dev *dev, int where, u8 value)
  25.151 +{
  25.152 +	return pci_conf1_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.153 +		PCI_FUNC(dev->devfn), where, 1, value);
  25.154 +}
  25.155 +
  25.156 +static int pci_conf1_write_config_word(struct pci_dev *dev, int where, u16 value)
  25.157 +{
  25.158 +	return pci_conf1_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.159 +		PCI_FUNC(dev->devfn), where, 2, value);
  25.160 +}
  25.161 +
  25.162 +static int pci_conf1_write_config_dword(struct pci_dev *dev, int where, u32 value)
  25.163 +{
  25.164 +	return pci_conf1_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.165 +		PCI_FUNC(dev->devfn), where, 4, value);
  25.166 +}
  25.167 +
  25.168 +static struct pci_ops pci_direct_conf1 = {
  25.169 +	pci_conf1_read_config_byte,
  25.170 +	pci_conf1_read_config_word,
  25.171 +	pci_conf1_read_config_dword,
  25.172 +	pci_conf1_write_config_byte,
  25.173 +	pci_conf1_write_config_word,
  25.174 +	pci_conf1_write_config_dword
  25.175 +};
  25.176 +
  25.177 +
  25.178 +/*
  25.179 + * Functions for accessing PCI configuration space with type 2 accesses
  25.180 + */
  25.181 +
  25.182 +#define PCI_CONF2_ADDRESS(dev, reg)	(u16)(0xC000 | (dev << 8) | reg)
  25.183 +
  25.184 +static int pci_conf2_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
  25.185 +{
  25.186 +	unsigned long flags;
  25.187 +
  25.188 +	if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
  25.189 +		return -EINVAL;
  25.190 +
  25.191 +	if (dev & 0x10) 
  25.192 +		return PCIBIOS_DEVICE_NOT_FOUND;
  25.193 +
  25.194 +	spin_lock_irqsave(&pci_config_lock, flags);
  25.195 +
  25.196 +	outb((u8)(0xF0 | (fn << 1)), 0xCF8);
  25.197 +	outb((u8)bus, 0xCFA);
  25.198 +
  25.199 +	switch (len) {
  25.200 +	case 1:
  25.201 +		*value = inb(PCI_CONF2_ADDRESS(dev, reg));
  25.202 +		break;
  25.203 +	case 2:
  25.204 +		*value = inw(PCI_CONF2_ADDRESS(dev, reg));
  25.205 +		break;
  25.206 +	case 4:
  25.207 +		*value = inl(PCI_CONF2_ADDRESS(dev, reg));
  25.208 +		break;
  25.209 +	}
  25.210 +
  25.211 +	outb (0, 0xCF8);
  25.212 +
  25.213 +	spin_unlock_irqrestore(&pci_config_lock, flags);
  25.214 +
  25.215 +	return 0;
  25.216 +}
  25.217 +
  25.218 +static int pci_conf2_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
  25.219 +{
  25.220 +	unsigned long flags;
  25.221 +
  25.222 +	if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) 
  25.223 +		return -EINVAL;
  25.224 +
  25.225 +	if (dev & 0x10) 
  25.226 +		return PCIBIOS_DEVICE_NOT_FOUND;
  25.227 +
  25.228 +	spin_lock_irqsave(&pci_config_lock, flags);
  25.229 +
  25.230 +	outb((u8)(0xF0 | (fn << 1)), 0xCF8);
  25.231 +	outb((u8)bus, 0xCFA);
  25.232 +
  25.233 +	switch (len) {
  25.234 +	case 1:
  25.235 +		outb ((u8)value, PCI_CONF2_ADDRESS(dev, reg));
  25.236 +		break;
  25.237 +	case 2:
  25.238 +		outw ((u16)value, PCI_CONF2_ADDRESS(dev, reg));
  25.239 +		break;
  25.240 +	case 4:
  25.241 +		outl ((u32)value, PCI_CONF2_ADDRESS(dev, reg));
  25.242 +		break;
  25.243 +	}
  25.244 +
  25.245 +	outb (0, 0xCF8);    
  25.246 +
  25.247 +	spin_unlock_irqrestore(&pci_config_lock, flags);
  25.248 +
  25.249 +	return 0;
  25.250 +}
  25.251 +
  25.252 +#undef PCI_CONF2_ADDRESS
  25.253 +
  25.254 +static int pci_conf2_read_config_byte(struct pci_dev *dev, int where, u8 *value)
  25.255 +{
  25.256 +	int result; 
  25.257 +	u32 data;
  25.258 +	result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.259 +		PCI_FUNC(dev->devfn), where, 1, &data);
  25.260 +	*value = (u8)data;
  25.261 +	return result;
  25.262 +}
  25.263 +
  25.264 +static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value)
  25.265 +{
  25.266 +	int result; 
  25.267 +	u32 data;
  25.268 +	result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.269 +		PCI_FUNC(dev->devfn), where, 2, &data);
  25.270 +	*value = (u16)data;
  25.271 +	return result;
  25.272 +}
  25.273 +
  25.274 +static int pci_conf2_read_config_dword(struct pci_dev *dev, int where, u32 *value)
  25.275 +{
  25.276 +	return pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.277 +		PCI_FUNC(dev->devfn), where, 4, value);
  25.278 +}
  25.279 +
  25.280 +static int pci_conf2_write_config_byte(struct pci_dev *dev, int where, u8 value)
  25.281 +{
  25.282 +	return pci_conf2_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.283 +		PCI_FUNC(dev->devfn), where, 1, value);
  25.284 +}
  25.285 +
  25.286 +static int pci_conf2_write_config_word(struct pci_dev *dev, int where, u16 value)
  25.287 +{
  25.288 +	return pci_conf2_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.289 +		PCI_FUNC(dev->devfn), where, 2, value);
  25.290 +}
  25.291 +
  25.292 +static int pci_conf2_write_config_dword(struct pci_dev *dev, int where, u32 value)
  25.293 +{
  25.294 +	return pci_conf2_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.295 +		PCI_FUNC(dev->devfn), where, 4, value);
  25.296 +}
  25.297 +
  25.298 +static struct pci_ops pci_direct_conf2 = {
  25.299 +	pci_conf2_read_config_byte,
  25.300 +	pci_conf2_read_config_word,
  25.301 +	pci_conf2_read_config_dword,
  25.302 +	pci_conf2_write_config_byte,
  25.303 +	pci_conf2_write_config_word,
  25.304 +	pci_conf2_write_config_dword
  25.305 +};
  25.306 +
  25.307 +
  25.308 +/*
  25.309 + * Before we decide to use direct hardware access mechanisms, we try to do some
  25.310 + * trivial checks to ensure it at least _seems_ to be working -- we just test
  25.311 + * whether bus 00 contains a host bridge (this is similar to checking
  25.312 + * techniques used in XFree86, but ours should be more reliable since we
  25.313 + * attempt to make use of direct access hints provided by the PCI BIOS).
  25.314 + *
  25.315 + * This should be close to trivial, but it isn't, because there are buggy
  25.316 + * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
  25.317 + */
  25.318 +static int __devinit pci_sanity_check(struct pci_ops *o)
  25.319 +{
  25.320 +	u16 x;
  25.321 +	struct pci_bus bus;		/* Fake bus and device */
  25.322 +	struct pci_dev dev;
  25.323 +
  25.324 +	if (pci_probe & PCI_NO_CHECKS)
  25.325 +		return 1;
  25.326 +	bus.number = 0;
  25.327 +	dev.bus = &bus;
  25.328 +	for(dev.devfn=0; dev.devfn < 0x100; dev.devfn++)
  25.329 +		if ((!o->read_word(&dev, PCI_CLASS_DEVICE, &x) &&
  25.330 +		     (x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA)) ||
  25.331 +		    (!o->read_word(&dev, PCI_VENDOR_ID, &x) &&
  25.332 +		     (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ)))
  25.333 +			return 1;
  25.334 +	DBG("PCI: Sanity check failed\n");
  25.335 +	return 0;
  25.336 +}
  25.337 +
  25.338 +static struct pci_ops * __devinit pci_check_direct(void)
  25.339 +{
  25.340 +	unsigned int tmp;
  25.341 +	unsigned long flags;
  25.342 +
  25.343 +	__save_flags(flags); __cli();
  25.344 +
  25.345 +	/*
  25.346 +	 * Check if configuration type 1 works.
  25.347 +	 */
  25.348 +	if (pci_probe & PCI_PROBE_CONF1) {
  25.349 +		outb (0x01, 0xCFB);
  25.350 +		tmp = inl (0xCF8);
  25.351 +		outl (0x80000000, 0xCF8);
  25.352 +		if (inl (0xCF8) == 0x80000000 &&
  25.353 +		    pci_sanity_check(&pci_direct_conf1)) {
  25.354 +			outl (tmp, 0xCF8);
  25.355 +			__restore_flags(flags);
  25.356 +			printk("PCI: Using configuration type 1\n");
  25.357 +			request_region(0xCF8, 8, "PCI conf1");
  25.358 +			return &pci_direct_conf1;
  25.359 +		}
  25.360 +		outl (tmp, 0xCF8);
  25.361 +	}
  25.362 +
  25.363 +	/*
  25.364 +	 * Check if configuration type 2 works.
  25.365 +	 */
  25.366 +	if (pci_probe & PCI_PROBE_CONF2) {
  25.367 +		outb (0x00, 0xCFB);
  25.368 +		outb (0x00, 0xCF8);
  25.369 +		outb (0x00, 0xCFA);
  25.370 +		if (inb (0xCF8) == 0x00 && inb (0xCFA) == 0x00 &&
  25.371 +		    pci_sanity_check(&pci_direct_conf2)) {
  25.372 +			__restore_flags(flags);
  25.373 +			printk("PCI: Using configuration type 2\n");
  25.374 +			request_region(0xCF8, 4, "PCI conf2");
  25.375 +			return &pci_direct_conf2;
  25.376 +		}
  25.377 +	}
  25.378 +
  25.379 +	__restore_flags(flags);
  25.380 +	return NULL;
  25.381 +}
  25.382 +
  25.383 +#endif
  25.384 +
  25.385 +/*
  25.386 + * BIOS32 and PCI BIOS handling.
  25.387 + */
  25.388 +
  25.389 +#ifdef CONFIG_PCI_BIOS
  25.390 +
  25.391 +#define PCIBIOS_PCI_FUNCTION_ID 	0xb1XX
  25.392 +#define PCIBIOS_PCI_BIOS_PRESENT 	0xb101
  25.393 +#define PCIBIOS_FIND_PCI_DEVICE		0xb102
  25.394 +#define PCIBIOS_FIND_PCI_CLASS_CODE	0xb103
  25.395 +#define PCIBIOS_GENERATE_SPECIAL_CYCLE	0xb106
  25.396 +#define PCIBIOS_READ_CONFIG_BYTE	0xb108
  25.397 +#define PCIBIOS_READ_CONFIG_WORD	0xb109
  25.398 +#define PCIBIOS_READ_CONFIG_DWORD	0xb10a
  25.399 +#define PCIBIOS_WRITE_CONFIG_BYTE	0xb10b
  25.400 +#define PCIBIOS_WRITE_CONFIG_WORD	0xb10c
  25.401 +#define PCIBIOS_WRITE_CONFIG_DWORD	0xb10d
  25.402 +#define PCIBIOS_GET_ROUTING_OPTIONS	0xb10e
  25.403 +#define PCIBIOS_SET_PCI_HW_INT		0xb10f
  25.404 +
  25.405 +/* BIOS32 signature: "_32_" */
  25.406 +#define BIOS32_SIGNATURE	(('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
  25.407 +
  25.408 +/* PCI signature: "PCI " */
  25.409 +#define PCI_SIGNATURE		(('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24))
  25.410 +
  25.411 +/* PCI service signature: "$PCI" */
  25.412 +#define PCI_SERVICE		(('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24))
  25.413 +
  25.414 +/* PCI BIOS hardware mechanism flags */
  25.415 +#define PCIBIOS_HW_TYPE1		0x01
  25.416 +#define PCIBIOS_HW_TYPE2		0x02
  25.417 +#define PCIBIOS_HW_TYPE1_SPEC		0x10
  25.418 +#define PCIBIOS_HW_TYPE2_SPEC		0x20
  25.419 +
  25.420 +/*
  25.421 + * This is the standard structure used to identify the entry point
  25.422 + * to the BIOS32 Service Directory, as documented in
  25.423 + * 	Standard BIOS 32-bit Service Directory Proposal
  25.424 + * 	Revision 0.4 May 24, 1993
  25.425 + * 	Phoenix Technologies Ltd.
  25.426 + *	Norwood, MA
  25.427 + * and the PCI BIOS specification.
  25.428 + */
  25.429 +
  25.430 +union bios32 {
  25.431 +	struct {
  25.432 +		unsigned long signature;	/* _32_ */
  25.433 +		unsigned long entry;		/* 32 bit physical address */
  25.434 +		unsigned char revision;		/* Revision level, 0 */
  25.435 +		unsigned char length;		/* Length in paragraphs should be 01 */
  25.436 +		unsigned char checksum;		/* All bytes must add up to zero */
  25.437 +		unsigned char reserved[5]; 	/* Must be zero */
  25.438 +	} fields;
  25.439 +	char chars[16];
  25.440 +};
  25.441 +
  25.442 +/*
  25.443 + * Physical address of the service directory.  I don't know if we're
  25.444 + * allowed to have more than one of these or not, so just in case
  25.445 + * we'll make pcibios_present() take a memory start parameter and store
  25.446 + * the array there.
  25.447 + */
  25.448 +
  25.449 +static struct {
  25.450 +	unsigned long address;
  25.451 +	unsigned short segment;
  25.452 +} bios32_indirect = { 0, __KERNEL_CS };
  25.453 +
  25.454 +/*
  25.455 + * Returns the entry point for the given service, NULL on error
  25.456 + */
  25.457 +
  25.458 +static unsigned long bios32_service(unsigned long service)
  25.459 +{
  25.460 +	unsigned char return_code;	/* %al */
  25.461 +	unsigned long address;		/* %ebx */
  25.462 +	unsigned long length;		/* %ecx */
  25.463 +	unsigned long entry;		/* %edx */
  25.464 +	unsigned long flags;
  25.465 +
  25.466 +	__save_flags(flags); __cli();
  25.467 +	__asm__("lcall *(%%edi); cld"
  25.468 +		: "=a" (return_code),
  25.469 +		  "=b" (address),
  25.470 +		  "=c" (length),
  25.471 +		  "=d" (entry)
  25.472 +		: "0" (service),
  25.473 +		  "1" (0),
  25.474 +		  "D" (&bios32_indirect));
  25.475 +	__restore_flags(flags);
  25.476 +
  25.477 +	switch (return_code) {
  25.478 +		case 0:
  25.479 +			return address + entry;
  25.480 +		case 0x80:	/* Not present */
  25.481 +			printk("bios32_service(0x%lx): not present\n", service);
  25.482 +			return 0;
  25.483 +		default: /* Shouldn't happen */
  25.484 +			printk("bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
  25.485 +				service, return_code);
  25.486 +			return 0;
  25.487 +	}
  25.488 +}
  25.489 +
  25.490 +static struct {
  25.491 +	unsigned long address;
  25.492 +	unsigned short segment;
  25.493 +} pci_indirect = { 0, __KERNEL_CS };
  25.494 +
  25.495 +static int pci_bios_present;
  25.496 +
  25.497 +static int __devinit check_pcibios(void)
  25.498 +{
  25.499 +	u32 signature, eax, ebx, ecx;
  25.500 +	u8 status, major_ver, minor_ver, hw_mech;
  25.501 +	unsigned long flags, pcibios_entry;
  25.502 +
  25.503 +	if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
  25.504 +		pci_indirect.address = pcibios_entry + PAGE_OFFSET;
  25.505 +
  25.506 +		__save_flags(flags); __cli();
  25.507 +		__asm__(
  25.508 +			"lcall *(%%edi); cld\n\t"
  25.509 +			"jc 1f\n\t"
  25.510 +			"xor %%ah, %%ah\n"
  25.511 +			"1:"
  25.512 +			: "=d" (signature),
  25.513 +			  "=a" (eax),
  25.514 +			  "=b" (ebx),
  25.515 +			  "=c" (ecx)
  25.516 +			: "1" (PCIBIOS_PCI_BIOS_PRESENT),
  25.517 +			  "D" (&pci_indirect)
  25.518 +			: "memory");
  25.519 +		__restore_flags(flags);
  25.520 +
  25.521 +		status = (eax >> 8) & 0xff;
  25.522 +		hw_mech = eax & 0xff;
  25.523 +		major_ver = (ebx >> 8) & 0xff;
  25.524 +		minor_ver = ebx & 0xff;
  25.525 +		if (pcibios_last_bus < 0)
  25.526 +			pcibios_last_bus = ecx & 0xff;
  25.527 +		DBG("PCI: BIOS probe returned s=%02x hw=%02x ver=%02x.%02x l=%02x\n",
  25.528 +			status, hw_mech, major_ver, minor_ver, pcibios_last_bus);
  25.529 +		if (status || signature != PCI_SIGNATURE) {
  25.530 +			printk (KERN_ERR "PCI: BIOS BUG #%x[%08x] found\n",
  25.531 +				status, signature);
  25.532 +			return 0;
  25.533 +		}
  25.534 +		printk("PCI: PCI BIOS revision %x.%02x entry at 0x%lx, last bus=%d\n",
  25.535 +			major_ver, minor_ver, pcibios_entry, pcibios_last_bus);
  25.536 +#ifdef CONFIG_PCI_DIRECT
  25.537 +		if (!(hw_mech & PCIBIOS_HW_TYPE1))
  25.538 +			pci_probe &= ~PCI_PROBE_CONF1;
  25.539 +		if (!(hw_mech & PCIBIOS_HW_TYPE2))
  25.540 +			pci_probe &= ~PCI_PROBE_CONF2;
  25.541 +#endif
  25.542 +		return 1;
  25.543 +	}
  25.544 +	return 0;
  25.545 +}
  25.546 +
  25.547 +static int __devinit pci_bios_find_device (unsigned short vendor, unsigned short device_id,
  25.548 +					unsigned short index, unsigned char *bus, unsigned char *device_fn)
  25.549 +{
  25.550 +	unsigned short bx;
  25.551 +	unsigned short ret;
  25.552 +
  25.553 +	__asm__("lcall *(%%edi); cld\n\t"
  25.554 +		"jc 1f\n\t"
  25.555 +		"xor %%ah, %%ah\n"
  25.556 +		"1:"
  25.557 +		: "=b" (bx),
  25.558 +		  "=a" (ret)
  25.559 +		: "1" (PCIBIOS_FIND_PCI_DEVICE),
  25.560 +		  "c" (device_id),
  25.561 +		  "d" (vendor),
  25.562 +		  "S" ((int) index),
  25.563 +		  "D" (&pci_indirect));
  25.564 +	*bus = (bx >> 8) & 0xff;
  25.565 +	*device_fn = bx & 0xff;
  25.566 +	return (int) (ret & 0xff00) >> 8;
  25.567 +}
  25.568 +
  25.569 +static int pci_bios_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
  25.570 +{
  25.571 +	unsigned long result = 0;
  25.572 +	unsigned long flags;
  25.573 +	unsigned long bx = ((bus << 8) | (dev << 3) | fn);
  25.574 +
  25.575 +	if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
  25.576 +		return -EINVAL;
  25.577 +
  25.578 +	spin_lock_irqsave(&pci_config_lock, flags);
  25.579 +
  25.580 +	switch (len) {
  25.581 +	case 1:
  25.582 +		__asm__("lcall *(%%esi); cld\n\t"
  25.583 +			"jc 1f\n\t"
  25.584 +			"xor %%ah, %%ah\n"
  25.585 +			"1:"
  25.586 +			: "=c" (*value),
  25.587 +			  "=a" (result)
  25.588 +			: "1" (PCIBIOS_READ_CONFIG_BYTE),
  25.589 +			  "b" (bx),
  25.590 +			  "D" ((long)reg),
  25.591 +			  "S" (&pci_indirect));
  25.592 +		break;
  25.593 +	case 2:
  25.594 +		__asm__("lcall *(%%esi); cld\n\t"
  25.595 +			"jc 1f\n\t"
  25.596 +			"xor %%ah, %%ah\n"
  25.597 +			"1:"
  25.598 +			: "=c" (*value),
  25.599 +			  "=a" (result)
  25.600 +			: "1" (PCIBIOS_READ_CONFIG_WORD),
  25.601 +			  "b" (bx),
  25.602 +			  "D" ((long)reg),
  25.603 +			  "S" (&pci_indirect));
  25.604 +		break;
  25.605 +	case 4:
  25.606 +		__asm__("lcall *(%%esi); cld\n\t"
  25.607 +			"jc 1f\n\t"
  25.608 +			"xor %%ah, %%ah\n"
  25.609 +			"1:"
  25.610 +			: "=c" (*value),
  25.611 +			  "=a" (result)
  25.612 +			: "1" (PCIBIOS_READ_CONFIG_DWORD),
  25.613 +			  "b" (bx),
  25.614 +			  "D" ((long)reg),
  25.615 +			  "S" (&pci_indirect));
  25.616 +		break;
  25.617 +	}
  25.618 +
  25.619 +	spin_unlock_irqrestore(&pci_config_lock, flags);
  25.620 +
  25.621 +	return (int)((result & 0xff00) >> 8);
  25.622 +}
  25.623 +
  25.624 +static int pci_bios_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
  25.625 +{
  25.626 +	unsigned long result = 0;
  25.627 +	unsigned long flags;
  25.628 +	unsigned long bx = ((bus << 8) | (dev << 3) | fn);
  25.629 +
  25.630 +	if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) 
  25.631 +		return -EINVAL;
  25.632 +
  25.633 +	spin_lock_irqsave(&pci_config_lock, flags);
  25.634 +
  25.635 +	switch (len) {
  25.636 +	case 1:
  25.637 +		__asm__("lcall *(%%esi); cld\n\t"
  25.638 +			"jc 1f\n\t"
  25.639 +			"xor %%ah, %%ah\n"
  25.640 +			"1:"
  25.641 +			: "=a" (result)
  25.642 +			: "0" (PCIBIOS_WRITE_CONFIG_BYTE),
  25.643 +			  "c" (value),
  25.644 +			  "b" (bx),
  25.645 +			  "D" ((long)reg),
  25.646 +			  "S" (&pci_indirect));
  25.647 +		break;
  25.648 +	case 2:
  25.649 +		__asm__("lcall *(%%esi); cld\n\t"
  25.650 +			"jc 1f\n\t"
  25.651 +			"xor %%ah, %%ah\n"
  25.652 +			"1:"
  25.653 +			: "=a" (result)
  25.654 +			: "0" (PCIBIOS_WRITE_CONFIG_WORD),
  25.655 +			  "c" (value),
  25.656 +			  "b" (bx),
  25.657 +			  "D" ((long)reg),
  25.658 +			  "S" (&pci_indirect));
  25.659 +		break;
  25.660 +	case 4:
  25.661 +		__asm__("lcall *(%%esi); cld\n\t"
  25.662 +			"jc 1f\n\t"
  25.663 +			"xor %%ah, %%ah\n"
  25.664 +			"1:"
  25.665 +			: "=a" (result)
  25.666 +			: "0" (PCIBIOS_WRITE_CONFIG_DWORD),
  25.667 +			  "c" (value),
  25.668 +			  "b" (bx),
  25.669 +			  "D" ((long)reg),
  25.670 +			  "S" (&pci_indirect));
  25.671 +		break;
  25.672 +	}
  25.673 +
  25.674 +	spin_unlock_irqrestore(&pci_config_lock, flags);
  25.675 +
  25.676 +	return (int)((result & 0xff00) >> 8);
  25.677 +}
  25.678 +
  25.679 +static int pci_bios_read_config_byte(struct pci_dev *dev, int where, u8 *value)
  25.680 +{
  25.681 +	int result; 
  25.682 +	u32 data;
  25.683 +
  25.684 +	if (!value) 
  25.685 +		return -EINVAL;
  25.686 +
  25.687 +	result = pci_bios_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.688 +		PCI_FUNC(dev->devfn), where, 1, &data);
  25.689 +
  25.690 +	*value = (u8)data;
  25.691 +
  25.692 +	return result;
  25.693 +}
  25.694 +
  25.695 +static int pci_bios_read_config_word(struct pci_dev *dev, int where, u16 *value)
  25.696 +{
  25.697 +	int result; 
  25.698 +	u32 data;
  25.699 +
  25.700 +	if (!value) 
  25.701 +		return -EINVAL;
  25.702 +
  25.703 +	result = pci_bios_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.704 +		PCI_FUNC(dev->devfn), where, 2, &data);
  25.705 +
  25.706 +	*value = (u16)data;
  25.707 +
  25.708 +	return result;
  25.709 +}
  25.710 +
  25.711 +static int pci_bios_read_config_dword(struct pci_dev *dev, int where, u32 *value)
  25.712 +{
  25.713 +	if (!value) 
  25.714 +		return -EINVAL;
  25.715 +	
  25.716 +	return pci_bios_read(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.717 +		PCI_FUNC(dev->devfn), where, 4, value);
  25.718 +}
  25.719 +
  25.720 +static int pci_bios_write_config_byte(struct pci_dev *dev, int where, u8 value)
  25.721 +{
  25.722 +	return pci_bios_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.723 +		PCI_FUNC(dev->devfn), where, 1, value);
  25.724 +}
  25.725 +
  25.726 +static int pci_bios_write_config_word(struct pci_dev *dev, int where, u16 value)
  25.727 +{
  25.728 +	return pci_bios_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.729 +		PCI_FUNC(dev->devfn), where, 2, value);
  25.730 +}
  25.731 +
  25.732 +static int pci_bios_write_config_dword(struct pci_dev *dev, int where, u32 value)
  25.733 +{
  25.734 +	return pci_bios_write(0, dev->bus->number, PCI_SLOT(dev->devfn), 
  25.735 +		PCI_FUNC(dev->devfn), where, 4, value);
  25.736 +}
  25.737 +
  25.738 +
  25.739 +/*
  25.740 + * Function table for BIOS32 access
  25.741 + */
  25.742 +
  25.743 +static struct pci_ops pci_bios_access = {
  25.744 +      pci_bios_read_config_byte,
  25.745 +      pci_bios_read_config_word,
  25.746 +      pci_bios_read_config_dword,
  25.747 +      pci_bios_write_config_byte,
  25.748 +      pci_bios_write_config_word,
  25.749 +      pci_bios_write_config_dword
  25.750 +};
  25.751 +
  25.752 +/*
  25.753 + * Try to find PCI BIOS.
  25.754 + */
  25.755 +
  25.756 +static struct pci_ops * __devinit pci_find_bios(void)
  25.757 +{
  25.758 +	union bios32 *check;
  25.759 +	unsigned char sum;
  25.760 +	int i, length;
  25.761 +
  25.762 +	/*
  25.763 +	 * Follow the standard procedure for locating the BIOS32 Service
  25.764 +	 * directory by scanning the permissible address range from
  25.765 +	 * 0xe0000 through 0xfffff for a valid BIOS32 structure.
  25.766 +	 */
  25.767 +
  25.768 +	for (check = (union bios32 *) __va(0xe0000);
  25.769 +	     check <= (union bios32 *) __va(0xffff0);
  25.770 +	     ++check) {
  25.771 +		if (check->fields.signature != BIOS32_SIGNATURE)
  25.772 +			continue;
  25.773 +		length = check->fields.length * 16;
  25.774 +		if (!length)
  25.775 +			continue;
  25.776 +		sum = 0;
  25.777 +		for (i = 0; i < length ; ++i)
  25.778 +			sum += check->chars[i];
  25.779 +		if (sum != 0)
  25.780 +			continue;
  25.781 +		if (check->fields.revision != 0) {
  25.782 +			printk("PCI: unsupported BIOS32 revision %d at 0x%p\n",
  25.783 +				check->fields.revision, check);
  25.784 +			continue;
  25.785 +		}
  25.786 +		DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check);
  25.787 +		if (check->fields.entry >= 0x100000) {
  25.788 +			printk("PCI: BIOS32 entry (0x%p) in high memory, cannot use.\n", check);
  25.789 +			return NULL;
  25.790 +		} else {
  25.791 +			unsigned long bios32_entry = check->fields.entry;
  25.792 +			DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
  25.793 +			bios32_indirect.address = bios32_entry + PAGE_OFFSET;
  25.794 +			if (check_pcibios())
  25.795 +				return &pci_bios_access;
  25.796 +		}
  25.797 +		break;	/* Hopefully more than one BIOS32 cannot happen... */
  25.798 +	}
  25.799 +
  25.800 +	return NULL;
  25.801 +}
  25.802 +
  25.803 +/*
  25.804 + * Sort the device list according to PCI BIOS. Nasty hack, but since some
  25.805 + * fool forgot to define the `correct' device order in the PCI BIOS specs
  25.806 + * and we want to be (possibly bug-to-bug ;-]) compatible with older kernels
  25.807 + * which used BIOS ordering, we are bound to do this...
  25.808 + */
  25.809 +
  25.810 +static void __devinit pcibios_sort(void)
  25.811 +{
  25.812 +	LIST_HEAD(sorted_devices);
  25.813 +	struct list_head *ln;
  25.814 +	struct pci_dev *dev, *d;
  25.815 +	int idx, found;
  25.816 +	unsigned char bus, devfn;
  25.817 +
  25.818 +	DBG("PCI: Sorting device list...\n");
  25.819 +	while (!list_empty(&pci_devices)) {
  25.820 +		ln = pci_devices.next;
  25.821 +		dev = pci_dev_g(ln);
  25.822 +		idx = found = 0;
  25.823 +		while (pci_bios_find_device(dev->vendor, dev->device, idx, &bus, &devfn) == PCIBIOS_SUCCESSFUL) {
  25.824 +			idx++;
  25.825 +			for (ln=pci_devices.next; ln != &pci_devices; ln=ln->next) {
  25.826 +				d = pci_dev_g(ln);
  25.827 +				if (d->bus->number == bus && d->devfn == devfn) {
  25.828 +					list_del(&d->global_list);
  25.829 +					list_add_tail(&d->global_list, &sorted_devices);
  25.830 +					if (d == dev)
  25.831 +						found = 1;
  25.832 +					break;
  25.833 +				}
  25.834 +			}
  25.835 +			if (ln == &pci_devices) {
  25.836 +				printk("PCI: BIOS reporting unknown device %02x:%02x\n", bus, devfn);
  25.837 +				/*
  25.838 +				 * We must not continue scanning as several buggy BIOSes
  25.839 +				 * return garbage after the last device. Grr.
  25.840 +				 */
  25.841 +				break;
  25.842 +			}
  25.843 +		}
  25.844 +		if (!found) {
  25.845 +			printk("PCI: Device %02x:%02x not found by BIOS\n",
  25.846 +				dev->bus->number, dev->devfn);
  25.847 +			list_del(&dev->global_list);
  25.848 +			list_add_tail(&dev->global_list, &sorted_devices);
  25.849 +		}
  25.850 +	}
  25.851 +	list_splice(&sorted_devices, &pci_devices);
  25.852 +}
  25.853 +
  25.854 +/*
  25.855 + *  BIOS Functions for IRQ Routing
  25.856 + */
  25.857 +
  25.858 +struct irq_routing_options {
  25.859 +	u16 size;
  25.860 +	struct irq_info *table;
  25.861 +	u16 segment;
  25.862 +} __attribute__((packed));
  25.863 +
  25.864 +struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
  25.865 +{
  25.866 +	struct irq_routing_options opt;
  25.867 +	struct irq_routing_table *rt = NULL;
  25.868 +	int ret, map;
  25.869 +	unsigned long page;
  25.870 +
  25.871 +	if (!pci_bios_present)
  25.872 +		return NULL;
  25.873 +	page = __get_free_page(GFP_KERNEL);
  25.874 +	if (!page)
  25.875 +		return NULL;
  25.876 +	opt.table = (struct irq_info *) page;
  25.877 +	opt.size = PAGE_SIZE;
  25.878 +	opt.segment = __KERNEL_DS;
  25.879 +
  25.880 +	DBG("PCI: Fetching IRQ routing table... ");
  25.881 +	__asm__("push %%es\n\t"
  25.882 +		"push %%ds\n\t"
  25.883 +		"pop  %%es\n\t"
  25.884 +		"lcall *(%%esi); cld\n\t"
  25.885 +		"pop %%es\n\t"
  25.886 +		"jc 1f\n\t"
  25.887 +		"xor %%ah, %%ah\n"
  25.888 +		"1:"
  25.889 +		: "=a" (ret),
  25.890 +		  "=b" (map)
  25.891 +		: "0" (PCIBIOS_GET_ROUTING_OPTIONS),
  25.892 +		  "1" (0),
  25.893 +		  "D" ((long) &opt),
  25.894 +		  "S" (&pci_indirect));
  25.895 +	DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
  25.896 +	if (ret & 0xff00)
  25.897 +		printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
  25.898 +	else if (opt.size) {
  25.899 +		rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
  25.900 +		if (rt) {
  25.901 +			memset(rt, 0, sizeof(struct irq_routing_table));
  25.902 +			rt->size = opt.size + sizeof(struct irq_routing_table);
  25.903 +			rt->exclusive_irqs = map;
  25.904 +			memcpy(rt->slots, (void *) page, opt.size);
  25.905 +			printk("PCI: Using BIOS Interrupt Routing Table\n");
  25.906 +		}
  25.907 +	}
  25.908 +	free_page(page);
  25.909 +	return rt;
  25.910 +}
  25.911 +
  25.912 +
  25.913 +int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
  25.914 +{
  25.915 +	int ret;
  25.916 +
  25.917 +	__asm__("lcall *(%%esi); cld\n\t"
  25.918 +		"jc 1f\n\t"
  25.919 +		"xor %%ah, %%ah\n"
  25.920 +		"1:"
  25.921 +		: "=a" (ret)
  25.922 +		: "0" (PCIBIOS_SET_PCI_HW_INT),
  25.923 +		  "b" ((dev->bus->number << 8) | dev->devfn),
  25.924 +		  "c" ((irq << 8) | (pin + 10)),
  25.925 +		  "S" (&pci_indirect));
  25.926 +	return !(ret & 0xff00);
  25.927 +}
  25.928 +
  25.929 +#endif
  25.930 +
  25.931 +/*
  25.932 + * Several buggy motherboards address only 16 devices and mirror
  25.933 + * them to next 16 IDs. We try to detect this `feature' on all
  25.934 + * primary buses (those containing host bridges as they are
  25.935 + * expected to be unique) and remove the ghost devices.
  25.936 + */
  25.937 +
  25.938 +static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
  25.939 +{
  25.940 +	struct list_head *ln, *mn;
  25.941 +	struct pci_dev *d, *e;
  25.942 +	int mirror = PCI_DEVFN(16,0);
  25.943 +	int seen_host_bridge = 0;
  25.944 +	int i;
  25.945 +
  25.946 +	DBG("PCI: Scanning for ghost devices on bus %d\n", b->number);
  25.947 +	for (ln=b->devices.next; ln != &b->devices; ln=ln->next) {
  25.948 +		d = pci_dev_b(ln);
  25.949 +		if ((d->class >> 8) == PCI_CLASS_BRIDGE_HOST)
  25.950 +			seen_host_bridge++;
  25.951 +		for (mn=ln->next; mn != &b->devices; mn=mn->next) {
  25.952 +			e = pci_dev_b(mn);
  25.953 +			if (e->devfn != d->devfn + mirror ||
  25.954 +			    e->vendor != d->vendor ||
  25.955 +			    e->device != d->device ||
  25.956 +			    e->class != d->class)
  25.957 +				continue;
  25.958 +			for(i=0; i<PCI_NUM_RESOURCES; i++)
  25.959 +				if (e->resource[i].start != d->resource[i].start ||
  25.960 +				    e->resource[i].end != d->resource[i].end ||
  25.961 +				    e->resource[i].flags != d->resource[i].flags)
  25.962 +					continue;
  25.963 +			break;
  25.964 +		}
  25.965 +		if (mn == &b->devices)
  25.966 +			return;
  25.967 +	}
  25.968 +	if (!seen_host_bridge)
  25.969 +		return;
  25.970 +	printk("PCI: Ignoring ghost devices on bus %02x\n", b->number);
  25.971 +
  25.972 +	ln = &b->devices;
  25.973 +	while (ln->next != &b->devices) {
  25.974 +		d = pci_dev_b(ln->next);
  25.975 +		if (d->devfn >= mirror) {
  25.976 +			list_del(&d->global_list);
  25.977 +			list_del(&d->bus_list);
  25.978 +			kfree(d);
  25.979 +		} else
  25.980 +			ln = ln->next;
  25.981 +	}
  25.982 +}
  25.983 +
  25.984 +/*
  25.985 + * Discover remaining PCI buses in case there are peer host bridges.
  25.986 + * We use the number of last PCI bus provided by the PCI BIOS.
  25.987 + */
  25.988 +static void __devinit pcibios_fixup_peer_bridges(void)
  25.989 +{
  25.990 +	int n;
  25.991 +	struct pci_bus bus;
  25.992 +	struct pci_dev dev;
  25.993 +	u16 l;
  25.994 +
  25.995 +	if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
  25.996 +		return;
  25.997 +	DBG("PCI: Peer bridge fixup\n");
  25.998 +	for (n=0; n <= pcibios_last_bus; n++) {
  25.999 +		if (pci_bus_exists(&pci_root_buses, n))
 25.1000 +			continue;
 25.1001 +		bus.number = n;
 25.1002 +		bus.ops = pci_root_ops;
 25.1003 +		dev.bus = &bus;
 25.1004 +		for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
 25.1005 +			if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
 25.1006 +			    l != 0x0000 && l != 0xffff) {
 25.1007 +				DBG("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
 25.1008 +				printk("PCI: Discovered peer bus %02x\n", n);
 25.1009 +				pci_scan_bus(n, pci_root_ops, NULL);
 25.1010 +				break;
 25.1011 +			}
 25.1012 +	}
 25.1013 +}
 25.1014 +
 25.1015 +/*
 25.1016 + * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
 25.1017 + */
 25.1018 +
 25.1019 +static void __devinit pci_fixup_i450nx(struct pci_dev *d)
 25.1020 +{
 25.1021 +	/*
 25.1022 +	 * i450NX -- Find and scan all secondary buses on all PXB's.
 25.1023 +	 */
 25.1024 +	int pxb, reg;
 25.1025 +	u8 busno, suba, subb;
 25.1026 +	printk("PCI: Searching for i450NX host bridges on %s\n", d->slot_name);
 25.1027 +	reg = 0xd0;
 25.1028 +	for(pxb=0; pxb<2; pxb++) {
 25.1029 +		pci_read_config_byte(d, reg++, &busno);
 25.1030 +		pci_read_config_byte(d, reg++, &suba);
 25.1031 +		pci_read_config_byte(d, reg++, &subb);
 25.1032 +		DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb);
 25.1033 +		if (busno)
 25.1034 +			pci_scan_bus(busno, pci_root_ops, NULL);	/* Bus A */
 25.1035 +		if (suba < subb)
 25.1036 +			pci_scan_bus(suba+1, pci_root_ops, NULL);	/* Bus B */
 25.1037 +	}
 25.1038 +	pcibios_last_bus = -1;
 25.1039 +}
 25.1040 +
 25.1041 +static void __devinit pci_fixup_i450gx(struct pci_dev *d)
 25.1042 +{
 25.1043 +	/*
 25.1044 +	 * i450GX and i450KX -- Find and scan all secondary buses.
 25.1045 +	 * (called separately for each PCI bridge found)
 25.1046 +	 */
 25.1047 +	u8 busno;
 25.1048 +	pci_read_config_byte(d, 0x4a, &busno);
 25.1049 +	printk("PCI: i440KX/GX host bridge %s: secondary bus %02x\n", d->slot_name, busno);
 25.1050 +	pci_scan_bus(busno, pci_root_ops, NULL);
 25.1051 +	pcibios_last_bus = -1;
 25.1052 +}
 25.1053 +
 25.1054 +static void __devinit  pci_fixup_umc_ide(struct pci_dev *d)
 25.1055 +{
 25.1056 +	/*
 25.1057 +	 * UM8886BF IDE controller sets region type bits incorrectly,
 25.1058 +	 * therefore they look like memory despite of them being I/O.
 25.1059 +	 */
 25.1060 +	int i;
 25.1061 +
 25.1062 +	printk("PCI: Fixing base address flags for device %s\n", d->slot_name);
 25.1063 +	for(i=0; i<4; i++)
 25.1064 +		d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
 25.1065 +}
 25.1066 +
 25.1067 +static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
 25.1068 +{
 25.1069 +	int i;
 25.1070 +
 25.1071 +	/*
 25.1072 +	 * PCI IDE controllers use non-standard I/O port decoding, respect it.
 25.1073 +	 */
 25.1074 +	if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
 25.1075 +		return;
 25.1076 +	DBG("PCI: IDE base address fixup for %s\n", d->slot_name);
 25.1077 +	for(i=0; i<4; i++) {
 25.1078 +		struct resource *r = &d->resource[i];
 25.1079 +		if ((r->start & ~0x80) == 0x374) {
 25.1080 +			r->start |= 2;
 25.1081 +			r->end = r->start;
 25.1082 +		}
 25.1083 +	}
 25.1084 +}
 25.1085 +
 25.1086 +static void __devinit  pci_fixup_ide_trash(struct pci_dev *d)
 25.1087 +{
 25.1088 +	int i;
 25.1089 +
 25.1090 +	/*
 25.1091 +	 * There exist PCI IDE controllers which have utter garbage
 25.1092 +	 * in first four base registers. Ignore that.
 25.1093 +	 */
 25.1094 +	DBG("PCI: IDE base address trash cleared for %s\n", d->slot_name);
 25.1095 +	for(i=0; i<4; i++)
 25.1096 +		d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
 25.1097 +}
 25.1098 +
 25.1099 +static void __devinit  pci_fixup_latency(struct pci_dev *d)
 25.1100 +{
 25.1101 +	/*
 25.1102 +	 *  SiS 5597 and 5598 chipsets require latency timer set to
 25.1103 +	 *  at most 32 to avoid lockups.
 25.1104 +	 */
 25.1105 +	DBG("PCI: Setting max latency to 32\n");
 25.1106 +	pcibios_max_latency = 32;
 25.1107 +}
 25.1108 +
 25.1109 +static void __devinit pci_fixup_piix4_acpi(struct pci_dev *d)
 25.1110 +{
 25.1111 +	/*
 25.1112 +	 * PIIX4 ACPI device: hardwired IRQ9
 25.1113 +	 */
 25.1114 +	d->irq = 9;
 25.1115 +}
 25.1116 +
 25.1117 +/*
 25.1118 + * Nobody seems to know what this does. Damn.
 25.1119 + *
 25.1120 + * But it does seem to fix some unspecified problem
 25.1121 + * with 'movntq' copies on Athlons.
 25.1122 + *
 25.1123 + * VIA 8363 chipset:
 25.1124 + *  - bit 7 at offset 0x55: Debug (RW)
 25.1125 + */
 25.1126 +static void __init pci_fixup_via_athlon_bug(struct pci_dev *d)
 25.1127 +{
 25.1128 +	u8 v;
 25.1129 +	pci_read_config_byte(d, 0x55, &v);
 25.1130 +	if (v & 0x80) {
 25.1131 +		printk("Trying to stomp on Athlon bug...\n");
 25.1132 +		v &= 0x7f; /* clear bit 55.7 */
 25.1133 +		pci_write_config_byte(d, 0x55, v);
 25.1134 +	}
 25.1135 +}
 25.1136 +
 25.1137 +struct pci_fixup pcibios_fixups[] = {
 25.1138 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82451NX,	pci_fixup_i450nx },
 25.1139 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454GX,	pci_fixup_i450gx },
 25.1140 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_UMC,	PCI_DEVICE_ID_UMC_UM8886BF,	pci_fixup_umc_ide },
 25.1141 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5513,		pci_fixup_ide_trash },
 25.1142 +	{ PCI_FIXUP_HEADER,	PCI_ANY_ID,		PCI_ANY_ID,			pci_fixup_ide_bases },
 25.1143 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5597,		pci_fixup_latency },
 25.1144 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5598,		pci_fixup_latency },
 25.1145 + 	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82371AB_3,	pci_fixup_piix4_acpi },
 25.1146 +	{ PCI_FIXUP_HEADER,	PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	pci_fixup_via_athlon_bug },
 25.1147 +	{ 0 }
 25.1148 +};
 25.1149 +
 25.1150 +/*
 25.1151 + *  Called after each bus is probed, but before its children
 25.1152 + *  are examined.
 25.1153 + */
 25.1154 +
 25.1155 +void __devinit  pcibios_fixup_bus(struct pci_bus *b)
 25.1156 +{
 25.1157 +	pcibios_fixup_ghosts(b);
 25.1158 +	pci_read_bridge_bases(b);
 25.1159 +}
 25.1160 +
 25.1161 +
 25.1162 +void __devinit pcibios_config_init(void)
 25.1163 +{
 25.1164 +	/*
 25.1165 +	 * Try all known PCI access methods. Note that we support using 
 25.1166 +	 * both PCI BIOS and direct access, with a preference for direct.
 25.1167 +	 */
 25.1168 +
 25.1169 +#ifdef CONFIG_PCI_BIOS
 25.1170 +	if ((pci_probe & PCI_PROBE_BIOS) 
 25.1171 +		&& ((pci_root_ops = pci_find_bios()))) {
 25.1172 +		pci_probe |= PCI_BIOS_SORT;
 25.1173 +		pci_bios_present = 1;
 25.1174 +		pci_config_read = pci_bios_read;
 25.1175 +		pci_config_write = pci_bios_write;
 25.1176 +	}
 25.1177 +#endif
 25.1178 +
 25.1179 +#ifdef CONFIG_PCI_DIRECT
 25.1180 +	if ((pci_probe & (PCI_PROBE_CONF1 | PCI_PROBE_CONF2)) 
 25.1181 +		&& (pci_root_ops = pci_check_direct())) {
 25.1182 +		if (pci_root_ops == &pci_direct_conf1) {
 25.1183 +			pci_config_read = pci_conf1_read;
 25.1184 +			pci_config_write = pci_conf1_write;
 25.1185 +		}
 25.1186 +		else {
 25.1187 +			pci_config_read = pci_conf2_read;
 25.1188 +			pci_config_write = pci_conf2_write;
 25.1189 +		}
 25.1190 +	}
 25.1191 +#endif
 25.1192 +
 25.1193 +	return;
 25.1194 +}
 25.1195 +
 25.1196 +void __init pcibios_init(void)
 25.1197 +{
 25.1198 +	if (!pci_root_ops)
 25.1199 +		pcibios_config_init();
 25.1200 +	if (!pci_root_ops) {
 25.1201 +		printk("PCI: System does not support PCI\n");
 25.1202 +		return;
 25.1203 +	}
 25.1204 +
 25.1205 +	printk("PCI: Probing PCI hardware\n");
 25.1206 +	pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL);
 25.1207 +
 25.1208 +	pcibios_irq_init();
 25.1209 +	pcibios_fixup_peer_bridges();
 25.1210 +	pcibios_fixup_irqs();
 25.1211 +	pcibios_resource_survey();
 25.1212 +
 25.1213 +#ifdef CONFIG_PCI_BIOS
 25.1214 +	if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
 25.1215 +		pcibios_sort();
 25.1216 +#endif
 25.1217 +}
 25.1218 +
 25.1219 +char * __devinit  pcibios_setup(char *str)
 25.1220 +{
 25.1221 +	if (!strcmp(str, "off")) {
 25.1222 +		pci_probe = 0;
 25.1223 +		return NULL;
 25.1224 +	}
 25.1225 +#ifdef CONFIG_PCI_BIOS
 25.1226 +	else if (!strcmp(str, "bios")) {
 25.1227 +		pci_probe = PCI_PROBE_BIOS;
 25.1228 +		return NULL;
 25.1229 +	} else if (!strcmp(str, "nobios")) {
 25.1230 +		pci_probe &= ~PCI_PROBE_BIOS;
 25.1231 +		return NULL;
 25.1232 +	} else if (!strcmp(str, "nosort")) {
 25.1233 +		pci_probe |= PCI_NO_SORT;
 25.1234 +		return NULL;
 25.1235 +	} else if (!strcmp(str, "biosirq")) {
 25.1236 +		pci_probe |= PCI_BIOS_IRQ_SCAN;
 25.1237 +		return NULL;
 25.1238 +	}
 25.1239 +#endif
 25.1240 +#ifdef CONFIG_PCI_DIRECT
 25.1241 +	else if (!strcmp(str, "conf1")) {
 25.1242 +		pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
 25.1243 +		return NULL;
 25.1244 +	}
 25.1245 +	else if (!strcmp(str, "conf2")) {
 25.1246 +		pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
 25.1247 +		return NULL;
 25.1248 +	}
 25.1249 +#endif
 25.1250 +	else if (!strcmp(str, "rom")) {
 25.1251 +		pci_probe |= PCI_ASSIGN_ROMS;
 25.1252 +		return NULL;
 25.1253 +	} else if (!strcmp(str, "assign-busses")) {
 25.1254 +		pci_probe |= PCI_ASSIGN_ALL_BUSSES;
 25.1255 +		return NULL;
 25.1256 +	} else if (!strncmp(str, "irqmask=", 8)) {
 25.1257 +		pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
 25.1258 +		return NULL;
 25.1259 +	} else if (!strncmp(str, "lastbus=", 8)) {
 25.1260 +		pcibios_last_bus = simple_strtol(str+8, NULL, 0);
 25.1261 +		return NULL;
 25.1262 +	}
 25.1263 +	return str;
 25.1264 +}
 25.1265 +
 25.1266 +unsigned int pcibios_assign_all_busses(void)
 25.1267 +{
 25.1268 +	return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
 25.1269 +}
 25.1270 +
 25.1271 +int pcibios_enable_device(struct pci_dev *dev)
 25.1272 +{
 25.1273 +	int err;
 25.1274 +
 25.1275 +	if ((err = pcibios_enable_resources(dev)) < 0)
 25.1276 +		return err;
 25.1277 +	pcibios_enable_irq(dev);
 25.1278 +	return 0;
 25.1279 +}
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/xen-2.4.16/arch/i386/process.c	Wed Nov 20 12:02:17 2002 +0000
    26.3 @@ -0,0 +1,402 @@
    26.4 +/*
    26.5 + *  linux/arch/i386/kernel/process.c
    26.6 + *
    26.7 + *  Copyright (C) 1995  Linus Torvalds
    26.8 + *
    26.9 + *  Pentium III FXSR, SSE support
   26.10 + *	Gareth Hughes <gareth@valinux.com>, May 2000
   26.11 + */
   26.12 +
   26.13 +/*
   26.14 + * This file handles the architecture-dependent parts of process handling..
   26.15 + */
   26.16 +
   26.17 +#define __KERNEL_SYSCALLS__
   26.18 +#include <stdarg.h>
   26.19 +
   26.20 +#include <xeno/config.h>
   26.21 +#include <xeno/lib.h>
   26.22 +#include <xeno/errno.h>
   26.23 +#include <xeno/sched.h>
   26.24 +#include <xeno/smp.h>
   26.25 +#include <asm/ptrace.h>
   26.26 +#include <xeno/delay.h>
   26.27 +#include <asm/mc146818rtc.h>
   26.28 +
   26.29 +#include <asm/system.h>
   26.30 +#include <asm/io.h>
   26.31 +#include <asm/processor.h>
   26.32 +#include <asm/desc.h>
   26.33 +#include <asm/i387.h>
   26.34 +
   26.35 +#include <xeno/irq.h>
   26.36 +#include <xeno/event.h>
   26.37 +
   26.38 +asmlinkage void ret_from_newdomain(void) __asm__("ret_from_newdomain");
   26.39 +
   26.40 +int hlt_counter;
   26.41 +
   26.42 +void disable_hlt(void)
   26.43 +{
   26.44 +    hlt_counter++;
   26.45 +}
   26.46 +
   26.47 +void enable_hlt(void)
   26.48 +{
   26.49 +    hlt_counter--;
   26.50 +}
   26.51 +
   26.52 +/*
   26.53 + * We use this if we don't have any better
   26.54 + * idle routine..
   26.55 + */
   26.56 +static void default_idle(void)
   26.57 +{
   26.58 +    if (!hlt_counter) {
   26.59 +        __cli();
   26.60 +        if (!current->hyp_events)
   26.61 +            safe_halt();
   26.62 +        else
   26.63 +            __sti();
   26.64 +    }
   26.65 +}
   26.66 +
   26.67 +/*
   26.68 + * The idle thread. There's no useful work to be
   26.69 + * done, so just try to conserve power and have a
   26.70 + * low exit latency (ie sit in a loop waiting for
   26.71 + * somebody to say that they'd like to reschedule)
   26.72 + */
   26.73 +void cpu_idle (void)
   26.74 +{
   26.75 +    ASSERT(current->domain == IDLE_DOMAIN_ID);
   26.76 +    current->has_cpu = 1;
   26.77 +
   26.78 +    /*
   26.79 +     * Declares CPU setup done to the boot processor.
   26.80 +     * Therefore memory barrier to ensure state is visible.
   26.81 +     */
   26.82 +    smp_mb();
   26.83 +    init_idle();
   26.84 +
   26.85 +    for ( ; ; )
   26.86 +    {
   26.87 +        while (!current->hyp_events)
   26.88 +            default_idle();
   26.89 +        do_hyp_events();
   26.90 +    }
   26.91 +}
   26.92 +
   26.93 +static long no_idt[2];
   26.94 +static int reboot_mode;
   26.95 +int reboot_thru_bios = 0;
   26.96 +
   26.97 +#ifdef CONFIG_SMP
   26.98 +int reboot_smp = 0;
   26.99 +static int reboot_cpu = -1;
  26.100 +/* shamelessly grabbed from lib/vsprintf.c for readability */
  26.101 +#define is_digit(c)	((c) >= '0' && (c) <= '9')
  26.102 +#endif
  26.103 +
  26.104 +
  26.105 +static inline void kb_wait(void)
  26.106 +{
  26.107 +    int i;
  26.108 +
  26.109 +    for (i=0; i<0x10000; i++)
  26.110 +        if ((inb_p(0x64) & 0x02) == 0)
  26.111 +            break;
  26.112 +}
  26.113 +
  26.114 +
  26.115 +void machine_restart(char * __unused)
  26.116 +{
  26.117 +#if CONFIG_SMP
  26.118 +    int cpuid;
  26.119 +	
  26.120 +    cpuid = GET_APIC_ID(apic_read(APIC_ID));
  26.121 +
  26.122 +    if (reboot_smp) {
  26.123 +
  26.124 +        /* check to see if reboot_cpu is valid 
  26.125 +           if its not, default to the BSP */
  26.126 +        if ((reboot_cpu == -1) ||  
  26.127 +            (reboot_cpu > (NR_CPUS -1))  || 
  26.128 +            !(phys_cpu_present_map & (1<<cpuid))) 
  26.129 +            reboot_cpu = boot_cpu_physical_apicid;
  26.130 +
  26.131 +        reboot_smp = 0;  /* use this as a flag to only go through this once*/
  26.132 +        /* re-run this function on the other CPUs
  26.133 +           it will fall though this section since we have 
  26.134 +           cleared reboot_smp, and do the reboot if it is the
  26.135 +           correct CPU, otherwise it halts. */
  26.136 +        if (reboot_cpu != cpuid)
  26.137 +            smp_call_function((void *)machine_restart , NULL, 1, 0);
  26.138 +    }
  26.139 +
  26.140 +    /* if reboot_cpu is still -1, then we want a tradional reboot, 
  26.141 +       and if we are not running on the reboot_cpu,, halt */
  26.142 +    if ((reboot_cpu != -1) && (cpuid != reboot_cpu)) {
  26.143 +        for (;;)
  26.144 +            __asm__ __volatile__ ("hlt");
  26.145 +    }
  26.146 +    /*
  26.147 +     * Stop all CPUs and turn off local APICs and the IO-APIC, so
  26.148 +     * other OSs see a clean IRQ state.
  26.149 +     */
  26.150 +    smp_send_stop();
  26.151 +    disable_IO_APIC();
  26.152 +#endif
  26.153 +
  26.154 +    if(!reboot_thru_bios) {
  26.155 +        /* rebooting needs to touch the page at absolute addr 0 */
  26.156 +        *((unsigned short *)__va(0x472)) = reboot_mode;
  26.157 +        for (;;) {
  26.158 +            int i;
  26.159 +            for (i=0; i<100; i++) {
  26.160 +                kb_wait();
  26.161 +                udelay(50);
  26.162 +                outb(0xfe,0x64);         /* pulse reset low */
  26.163 +                udelay(50);
  26.164 +            }
  26.165 +            /* That didn't work - force a triple fault.. */
  26.166 +            __asm__ __volatile__("lidt %0": :"m" (no_idt));
  26.167 +            __asm__ __volatile__("int3");
  26.168 +        }
  26.169 +    }
  26.170 +
  26.171 +    panic("Need to reinclude BIOS reboot code\n");
  26.172 +}
  26.173 +
  26.174 +void machine_halt(void)
  26.175 +{
  26.176 +    machine_restart(0);
  26.177 +}
  26.178 +
  26.179 +void machine_power_off(void)
  26.180 +{
  26.181 +    machine_restart(0);
  26.182 +}
  26.183 +
  26.184 +extern void show_trace(unsigned long* esp);
  26.185 +
  26.186 +void show_regs(struct pt_regs * regs)
  26.187 +{
  26.188 +    unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
  26.189 +
  26.190 +    printk("\n");
  26.191 +    printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip, smp_processor_id());
  26.192 +    if (regs->xcs & 3)
  26.193 +        printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
  26.194 +    printk(" EFLAGS: %08lx\n",regs->eflags);
  26.195 +    printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
  26.196 +           regs->eax,regs->ebx,regs->ecx,regs->edx);
  26.197 +    printk("ESI: %08lx EDI: %08lx EBP: %08lx",
  26.198 +           regs->esi, regs->edi, regs->ebp);
  26.199 +    printk(" DS: %04x ES: %04x\n",
  26.200 +           0xffff & regs->xds,0xffff & regs->xes);
  26.201 +
  26.202 +    __asm__("movl %%cr0, %0": "=r" (cr0));
  26.203 +    __asm__("movl %%cr2, %0": "=r" (cr2));
  26.204 +    __asm__("movl %%cr3, %0": "=r" (cr3));
  26.205 +    /* This could fault if %cr4 does not exist */
  26.206 +    __asm__("1: movl %%cr4, %0		\n"
  26.207 +            "2:				\n"
  26.208 +            ".section __ex_table,\"a\"	\n"
  26.209 +            ".long 1b,2b			\n"
  26.210 +            ".previous			\n"
  26.211 +            : "=r" (cr4): "0" (0));
  26.212 +    printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
  26.213 +    show_trace(&regs->esp);
  26.214 +}
  26.215 +
  26.216 +/*
  26.217 + * No need to lock the MM as we are the last user
  26.218 + */
  26.219 +void release_segments(struct mm_struct *mm)
  26.220 +{
  26.221 +#if 0
  26.222 +    void * ldt = mm.context.segments;
  26.223 +
  26.224 +    /*
  26.225 +     * free the LDT
  26.226 +     */
  26.227 +    if (ldt) {
  26.228 +        mm.context.segments = NULL;
  26.229 +        clear_LDT();
  26.230 +        vfree(ldt);
  26.231 +    }
  26.232 +#endif
  26.233 +}
  26.234 +
  26.235 +
  26.236 +/*
  26.237 + * Free current thread data structures etc..
  26.238 + */
  26.239 +void exit_thread(void)
  26.240 +{
  26.241 +    /* nothing to do ... */
  26.242 +}
  26.243 +
  26.244 +void flush_thread(void)
  26.245 +{
  26.246 +    struct task_struct *tsk = current;
  26.247 +
  26.248 +    memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
  26.249 +    /*
  26.250 +     * Forget coprocessor state..
  26.251 +     */
  26.252 +    clear_fpu(tsk);
  26.253 +    tsk->flags &= ~PF_DONEFPUINIT;
  26.254 +}
  26.255 +
  26.256 +void release_thread(struct task_struct *dead_task)
  26.257 +{
  26.258 +#if 0
  26.259 +    if (dead_task->mm) {
  26.260 +        void * ldt = dead_task->mm.context.segments;
  26.261 +
  26.262 +        // temporary debugging check
  26.263 +        if (ldt) {
  26.264 +            printk("WARNING: dead process %8s still has LDT? <%p>\n",
  26.265 +                   dead_task->comm, ldt);
  26.266 +            BUG();
  26.267 +        }
  26.268 +    }
  26.269 +#endif
  26.270 +}
  26.271 +
  26.272 +/*
  26.273 + * we do not have to muck with descriptors here, that is
  26.274 + * done in switch_mm() as needed.
  26.275 + */
  26.276 +void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
  26.277 +{
  26.278 +#if 0
  26.279 +    struct mm_struct * old_mm;
  26.280 +    void *old_ldt, *ldt;
  26.281 +
  26.282 +    ldt = NULL;
  26.283 +    old_mm = current->mm;
  26.284 +    if (old_mm && (old_ldt = old_mm.context.segments) != NULL) {
  26.285 +        /*
  26.286 +         * Completely new LDT, we initialize it from the parent:
  26.287 +         */
  26.288 +        ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
  26.289 +        if (!ldt)
  26.290 +            printk(KERN_WARNING "ldt allocation failed\n");
  26.291 +        else
  26.292 +            memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
  26.293 +    }
  26.294 +    new_mm.context.segments = ldt;
  26.295 +    new_mm.context.cpuvalid = ~0UL;	/* valid on all CPU's - they can't have stale data */
  26.296 +#endif
  26.297 +}
  26.298 +
  26.299 +
  26.300 +void new_thread(struct task_struct *p,
  26.301 +                unsigned long start_pc,
  26.302 +                unsigned long start_stack,
  26.303 +                unsigned long start_info)
  26.304 +{
  26.305 +    struct pt_regs * regs;
  26.306 +
  26.307 +    regs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
  26.308 +    memset(regs, 0, sizeof(*regs));
  26.309 +
  26.310 +    /*
  26.311 +     * Initial register values:
  26.312 +     *  DS,ES,FS,GS = __GUEST_DS
  26.313 +     *       CS:EIP = __GUEST_CS:start_pc
  26.314 +     *       SS:ESP = __GUEST_DS:start_stack
  26.315 +     *          ESI = start_info
  26.316 +     *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
  26.317 +     */
  26.318 +    p->thread.fs = p->thread.gs = __GUEST_DS;
  26.319 +    regs->xds = regs->xes = regs->xss = __GUEST_DS;
  26.320 +    regs->xcs = __GUEST_CS;
  26.321 +    regs->eip = start_pc;
  26.322 +    regs->esp = start_stack;
  26.323 +    regs->esi = start_info;
  26.324 +
  26.325 +    p->thread.esp = (unsigned long) regs;
  26.326 +    p->thread.esp0 = (unsigned long) (regs+1);
  26.327 +
  26.328 +    p->thread.eip = (unsigned long) ret_from_newdomain;
  26.329 +
  26.330 +    __save_flags(regs->eflags);
  26.331 +    regs->eflags |= X86_EFLAGS_IF;
  26.332 +}
  26.333 +
  26.334 +
  26.335 +/*
  26.336 + * This special macro can be used to load a debugging register
  26.337 + */
  26.338 +#define loaddebug(thread,register) \
  26.339 +		__asm__("movl %0,%%db" #register  \
  26.340 +			: /* no output */ \
  26.341 +			:"r" (thread->debugreg[register]))
  26.342 +
  26.343 +/*
  26.344 + *	switch_to(x,yn) should switch tasks from x to y.
  26.345 + *
  26.346 + * We fsave/fwait so that an exception goes off at the right time
  26.347 + * (as a call from the fsave or fwait in effect) rather than to
  26.348 + * the wrong process. Lazy FP saving no longer makes any sense
  26.349 + * with modern CPU's, and this simplifies a lot of things (SMP
  26.350 + * and UP become the same).
  26.351 + *
  26.352 + * NOTE! We used to use the x86 hardware context switching. The
  26.353 + * reason for not using it any more becomes apparent when you
  26.354 + * try to recover gracefully from saved state that is no longer
  26.355 + * valid (stale segment register values in particular). With the
  26.356 + * hardware task-switch, there is no way to fix up bad state in
  26.357 + * a reasonable manner.
  26.358 + *
  26.359 + * The fact that Intel documents the hardware task-switching to
  26.360 + * be slow is a fairly red herring - this code is not noticeably
  26.361 + * faster. However, there _is_ some room for improvement here,
  26.362 + * so the performance issues may eventually be a valid point.
  26.363 + * More important, however, is the fact that this allows us much
  26.364 + * more flexibility.
  26.365 + */
  26.366 +/* NB. prev_p passed in %eax, next_p passed in %edx */
  26.367 +void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  26.368 +{
  26.369 +    struct thread_struct *prev = &prev_p->thread,
  26.370 +        *next = &next_p->thread;
  26.371 +    struct tss_struct *tss = init_tss + smp_processor_id();
  26.372 +
  26.373 +    unlazy_fpu(prev_p);
  26.374 +
  26.375 +    tss->esp0 = next->esp0;
  26.376 +    tss->esp1 = next->esp1;
  26.377 +    tss->ss1  = next->ss1;
  26.378 +
  26.379 +    /*
  26.380 +     * Save away %fs and %gs. No need to save %es and %ds, as
  26.381 +     * those are always kernel segments while inside the kernel.
  26.382 +     */
  26.383 +    asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
  26.384 +    asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
  26.385 +
  26.386 +    /*
  26.387 +     * Restore %fs and %gs.
  26.388 +     */
  26.389 +    loadsegment(fs, next->fs);
  26.390 +    loadsegment(gs, next->gs);
  26.391 +
  26.392 +    /*
  26.393 +     * Now maybe reload the debug registers
  26.394 +     */
  26.395 +    if (next->debugreg[7]){
  26.396 +        loaddebug(next, 0);
  26.397 +        loaddebug(next, 1);
  26.398 +        loaddebug(next, 2);
  26.399 +        loaddebug(next, 3);
  26.400 +        /* no 4 and 5 */
  26.401 +        loaddebug(next, 6);
  26.402 +        loaddebug(next, 7);
  26.403 +    }
  26.404 +
  26.405 +}
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/xen-2.4.16/arch/i386/rwlock.c	Wed Nov 20 12:02:17 2002 +0000
    27.3 @@ -0,0 +1,33 @@
    27.4 +#include <asm/atomic.h>
    27.5 +#include <asm/rwlock.h>
    27.6 +
    27.7 +#if defined(CONFIG_SMP)
    27.8 +asm(
    27.9 +"
   27.10 +.align  4
   27.11 +.globl  __write_lock_failed
   27.12 +__write_lock_failed:
   27.13 +        " LOCK "addl    $" RW_LOCK_BIAS_STR ",(%eax)
   27.14 +1:      rep; nop
   27.15 +        cmpl    $" RW_LOCK_BIAS_STR ",(%eax)
   27.16 +        jne     1b
   27.17 +
   27.18 +        " LOCK "subl    $" RW_LOCK_BIAS_STR ",(%eax)
   27.19 +        jnz     __write_lock_failed
   27.20 +        ret
   27.21 +
   27.22 +
   27.23 +.align  4
   27.24 +.globl  __read_lock_failed
   27.25 +__read_lock_failed:
   27.26 +        lock ; incl     (%eax)
   27.27 +1:      rep; nop
   27.28 +        cmpl    $1,(%eax)
   27.29 +        js      1b
   27.30 +
   27.31 +        lock ; decl     (%eax)
   27.32 +        js      __read_lock_failed
   27.33 +        ret
   27.34 +"
   27.35 +);
   27.36 +#endif
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen-2.4.16/arch/i386/setup.c	Wed Nov 20 12:02:17 2002 +0000
    28.3 @@ -0,0 +1,333 @@
    28.4 +
    28.5 +#include <xeno/config.h>
    28.6 +#include <xeno/init.h>
    28.7 +#include <xeno/interrupt.h>
    28.8 +#include <xeno/lib.h>
    28.9 +#include <xeno/sched.h>
   28.10 +#include <xeno/bootmem.h>
   28.11 +#include <xeno/pci.h>
   28.12 +#include <asm/bitops.h>
   28.13 +#include <asm/smp.h>
   28.14 +#include <asm/processor.h>
   28.15 +#include <asm/mpspec.h>
   28.16 +#include <asm/apic.h>
   28.17 +#include <asm/desc.h>
   28.18 +
   28.19 +struct cpuinfo_x86 boot_cpu_data = { 0 };
   28.20 +/* Lots of nice things, since we only target PPro+. */
   28.21 +unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
   28.22 +unsigned long wait_init_idle;
   28.23 +
   28.24 +/* Standard macro to see if a specific flag is changeable */
   28.25 +static inline int flag_is_changeable_p(u32 flag)
   28.26 +{
   28.27 +    u32 f1, f2;
   28.28 +
   28.29 +    asm("pushfl\n\t"
   28.30 +        "pushfl\n\t"
   28.31 +        "popl %0\n\t"
   28.32 +        "movl %0,%1\n\t"
   28.33 +        "xorl %2,%0\n\t"
   28.34 +        "pushl %0\n\t"
   28.35 +        "popfl\n\t"
   28.36 +        "pushfl\n\t"
   28.37 +        "popl %0\n\t"
   28.38 +        "popfl\n\t"
   28.39 +        : "=&r" (f1), "=&r" (f2)
   28.40 +        : "ir" (flag));
   28.41 +
   28.42 +    return ((f1^f2) & flag) != 0;
   28.43 +}
   28.44 +
   28.45 +/* Probe for the CPUID instruction */
   28.46 +static int __init have_cpuid_p(void)
   28.47 +{
   28.48 +    return flag_is_changeable_p(X86_EFLAGS_ID);
   28.49 +}
   28.50 +
   28.51 +void __init get_cpu_vendor(struct cpuinfo_x86 *c)
   28.52 +{
   28.53 +        char *v = c->x86_vendor_id;
   28.54 +
   28.55 +        if (!strcmp(v, "GenuineIntel"))
   28.56 +                c->x86_vendor = X86_VENDOR_INTEL;
   28.57 +        else if (!strcmp(v, "AuthenticAMD"))
   28.58 +                c->x86_vendor = X86_VENDOR_AMD;
   28.59 +        else if (!strcmp(v, "CyrixInstead"))
   28.60 +                c->x86_vendor = X86_VENDOR_CYRIX;
   28.61 +        else if (!strcmp(v, "UMC UMC UMC "))
   28.62 +                c->x86_vendor = X86_VENDOR_UMC;
   28.63 +        else if (!strcmp(v, "CentaurHauls"))
   28.64 +                c->x86_vendor = X86_VENDOR_CENTAUR;
   28.65 +        else if (!strcmp(v, "NexGenDriven"))
   28.66 +                c->x86_vendor = X86_VENDOR_NEXGEN;
   28.67 +        else if (!strcmp(v, "RiseRiseRise"))
   28.68 +                c->x86_vendor = X86_VENDOR_RISE;
   28.69 +        else if (!strcmp(v, "GenuineTMx86") ||
   28.70 +                 !strcmp(v, "TransmetaCPU"))
   28.71 +                c->x86_vendor = X86_VENDOR_TRANSMETA;
   28.72 +        else
   28.73 +                c->x86_vendor = X86_VENDOR_UNKNOWN;
   28.74 +}
   28.75 +
   28.76 +static void __init init_intel(struct cpuinfo_x86 *c)
   28.77 +{
   28.78 +    /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
   28.79 +    if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
   28.80 +        clear_bit(X86_FEATURE_SEP, &c->x86_capability);
   28.81 +}
   28.82 +
   28.83 +static void __init init_amd(struct cpuinfo_x86 *c)
   28.84 +{
   28.85 +    /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
   28.86 +       3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
   28.87 +    clear_bit(0*32+31, &c->x86_capability);
   28.88 +	
   28.89 +    switch(c->x86)
   28.90 +    {
   28.91 +    case 5:
   28.92 +        panic("AMD K6 is not supported.\n");
   28.93 +    case 6:	/* An Athlon/Duron. We can trust the BIOS probably */
   28.94 +        break;		
   28.95 +    }
   28.96 +}
   28.97 +
   28.98 +/*
   28.99 + * This does the hard work of actually picking apart the CPU stuff...
  28.100 + */
  28.101 +void __init identify_cpu(struct cpuinfo_x86 *c)
  28.102 +{
  28.103 +    int junk, i;
  28.104 +    u32 xlvl, tfms;
  28.105 +
  28.106 +    c->x86_vendor = X86_VENDOR_UNKNOWN;
  28.107 +    c->cpuid_level = -1;	/* CPUID not detected */
  28.108 +    c->x86_model = c->x86_mask = 0;	/* So far unknown... */
  28.109 +    c->x86_vendor_id[0] = '\0'; /* Unset */
  28.110 +    memset(&c->x86_capability, 0, sizeof c->x86_capability);
  28.111 +
  28.112 +    if ( !have_cpuid_p() )
  28.113 +        panic("Ancient processors not supported\n");
  28.114 +
  28.115 +    /* Get vendor name */
  28.116 +    cpuid(0x00000000, &c->cpuid_level,
  28.117 +          (int *)&c->x86_vendor_id[0],
  28.118 +          (int *)&c->x86_vendor_id[8],
  28.119 +          (int *)&c->x86_vendor_id[4]);
  28.120 +
  28.121 +    get_cpu_vendor(c);
  28.122 +		
  28.123 +    if ( c->cpuid_level == 0 )
  28.124 +        panic("Decrepit CPUID not supported\n");
  28.125 +
  28.126 +    cpuid(0x00000001, &tfms, &junk, &junk,
  28.127 +          &c->x86_capability[0]);
  28.128 +    c->x86 = (tfms >> 8) & 15;
  28.129 +    c->x86_model = (tfms >> 4) & 15;
  28.130 +    c->x86_mask = tfms & 15;
  28.131 +
  28.132 +    /* AMD-defined flags: level 0x80000001 */
  28.133 +    xlvl = cpuid_eax(0x80000000);
  28.134 +    if ( (xlvl & 0xffff0000) == 0x80000000 ) {
  28.135 +        if ( xlvl >= 0x80000001 )
  28.136 +            c->x86_capability[1] = cpuid_edx(0x80000001);
  28.137 +    }
  28.138 +
  28.139 +    /* Transmeta-defined flags: level 0x80860001 */
  28.140 +    xlvl = cpuid_eax(0x80860000);
  28.141 +    if ( (xlvl & 0xffff0000) == 0x80860000 ) {
  28.142 +        if (  xlvl >= 0x80860001 )
  28.143 +            c->x86_capability[2] = cpuid_edx(0x80860001);
  28.144 +    }
  28.145 +
  28.146 +    printk("CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
  28.147 +           c->x86_capability[0],
  28.148 +           c->x86_capability[1],
  28.149 +           c->x86_capability[2],
  28.150 +           c->x86_vendor);
  28.151 +
  28.152 +    switch ( c->x86_vendor ) {
  28.153 +    case X86_VENDOR_INTEL:
  28.154 +        init_intel(c);
  28.155 +        break;
  28.156 +    case X86_VENDOR_AMD:
  28.157 +        init_amd(c);
  28.158 +        break;
  28.159 +    default:
  28.160 +        panic("Only support Intel processors (P6+)\n");
  28.161 +    }
  28.162 +	
  28.163 +    printk("CPU caps: %08x %08x %08x %08x\n",
  28.164 +           c->x86_capability[0],
  28.165 +           c->x86_capability[1],
  28.166 +           c->x86_capability[2],
  28.167 +           c->x86_capability[3]);
  28.168 +
  28.169 +    /*
  28.170 +     * On SMP, boot_cpu_data holds the common feature set between
  28.171 +     * all CPUs; so make sure that we indicate which features are
  28.172 +     * common between the CPUs.  The first time this routine gets
  28.173 +     * executed, c == &boot_cpu_data.
  28.174 +     */
  28.175 +    if ( c != &boot_cpu_data ) {
  28.176 +        /* AND the already accumulated flags with these */
  28.177 +        for ( i = 0 ; i < NCAPINTS ; i++ )
  28.178 +            boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
  28.179 +    }
  28.180 +}
  28.181 +
  28.182 +
  28.183 +unsigned long cpu_initialized;
  28.184 +void __init cpu_init(void)
  28.185 +{
  28.186 +    int nr = smp_processor_id();
  28.187 +    struct tss_struct * t = &init_tss[nr];
  28.188 +    
  28.189 +    if ( test_and_set_bit(nr, &cpu_initialized) )
  28.190 +        panic("CPU#%d already initialized!!!\n", nr);
  28.191 +    printk("Initializing CPU#%d\n", nr);
  28.192 +
  28.193 +    __asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
  28.194 +    __asm__ __volatile__("lidt %0": "=m" (idt_descr));
  28.195 +
  28.196 +    /* No nested task. */
  28.197 +    __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
  28.198 +
  28.199 +    /* Set up and load the per-CPU TSS and LDT. */
  28.200 +    t->ss0  = __HYPERVISOR_DS;
  28.201 +    t->esp0 = current->thread.esp0;
  28.202 +    set_tss_desc(nr,t);
  28.203 +    load_TR(nr);
  28.204 +    __asm__ __volatile__("lldt %%ax"::"a" (0));
  28.205 +
  28.206 +    /* Clear all 6 debug registers. */
  28.207 +#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
  28.208 +    CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
  28.209 +#undef CD
  28.210 +
  28.211 +    /* Stick the idle task on the run queue. */
  28.212 +    (void)wake_up(current);
  28.213 +}
  28.214 +
  28.215 +static void __init do_initcalls(void)
  28.216 +{
  28.217 +        initcall_t *call;
  28.218 +
  28.219 +        call = &__initcall_start;
  28.220 +        do {
  28.221 +                (*call)();
  28.222 +                call++;
  28.223 +        } while (call < &__initcall_end);
  28.224 +}
  28.225 +
  28.226 +/*
  28.227 + * IBM-compatible BIOSes place drive info tables at initial interrupt
  28.228 + * vectors 0x41 and 0x46. These are in the for of 16-bit-mode far ptrs.
  28.229 + */
  28.230 +struct drive_info_struct { unsigned char dummy[32]; } drive_info;
  28.231 +void get_bios_driveinfo(void)
  28.232 +{
  28.233 +    unsigned long seg, off, tab1, tab2;
  28.234 +
  28.235 +    off  = (unsigned long)*(unsigned short *)(4*0x41+0);
  28.236 +    seg  = (unsigned long)*(unsigned short *)(4*0x41+2);
  28.237 +    tab1 = (seg<<4) + off;
  28.238 +    
  28.239 +    off  = (unsigned long)*(unsigned short *)(4*0x46+0);
  28.240 +    seg  = (unsigned long)*(unsigned short *)(4*0x46+2);
  28.241 +    tab2 = (seg<<4) + off;
  28.242 +
  28.243 +    printk("Reading BIOS drive-info tables at 0x%05lx and 0x%05lx\n", 
  28.244 +           tab1, tab2);
  28.245 +
  28.246 +    memcpy(drive_info.dummy+ 0, (char *)tab1, 16);
  28.247 +    memcpy(drive_info.dummy+16, (char *)tab2, 16);
  28.248 +}
  28.249 +
  28.250 +
  28.251 +unsigned long pci_mem_start = 0x10000000;
  28.252 +
  28.253 +void __init start_of_day(void)
  28.254 +{
  28.255 +    extern void trap_init(void);
  28.256 +    extern void init_IRQ(void);
  28.257 +    extern void time_init(void);
  28.258 +    extern void softirq_init(void);
  28.259 +    extern void timer_bh(void);
  28.260 +    extern void tqueue_bh(void);
  28.261 +    extern void immediate_bh(void);
  28.262 +    extern void init_timervecs(void);
  28.263 +    extern int  setup_network_devices(void);
  28.264 +    extern void net_init(void);
  28.265 +
  28.266 +    unsigned long low_mem_size;
  28.267 +    
  28.268 +    /*
  28.269 +     * We do this early, but tables are in the lowest 1MB (usually
  28.270 +     * 0xfe000-0xfffff). Therefore they're unlikely to ever get clobbered.
  28.271 +     */
  28.272 +    get_bios_driveinfo();
  28.273 +
  28.274 +    /* Tell the PCI layer not to allocate too close to the RAM area.. */
  28.275 +    low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
  28.276 +    if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
  28.277 +    
  28.278 +    identify_cpu(&boot_cpu_data); /* get CPU type info */
  28.279 +    if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
  28.280 +    if ( cpu_has_xmm )  set_in_cr4(X86_CR4_OSXMMEXCPT);
  28.281 +    find_smp_config();            /* find ACPI tables */
  28.282 +    smp_alloc_memory();           /* trampoline which other CPUs jump at */
  28.283 +    paging_init();                /* not much here now, but sets up fixmap */
  28.284 +    if ( smp_found_config ) get_smp_config();
  28.285 +    domain_init();
  28.286 +    trap_init(); /*
  28.287 +                  * installs trap (s/w exception) wrappers.